Skip to content

Commit 75b8a3d

Browse files
authored
Add cursor-based pagination support for GraphQL records endpoints (#90)
1 parent f91201a commit 75b8a3d

File tree

7 files changed

+600
-17
lines changed

7 files changed

+600
-17
lines changed

crates/game_api/Cargo.toml

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,10 +9,7 @@ name = "game_api_lib"
99
[dependencies]
1010
thiserror = { workspace = true }
1111
tokio = { workspace = true, features = ["full"] }
12-
async-graphql = { workspace = true, features = [
13-
"dataloader",
14-
"apollo_tracing",
15-
] }
12+
async-graphql = { workspace = true }
1613
sqlx = { workspace = true }
1714
chrono = { workspace = true }
1815
deadpool = { workspace = true }

crates/graphql-api/Cargo.toml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,13 +4,14 @@ version = "0.1.0"
44
edition = "2024"
55

66
[dependencies]
7-
async-graphql = { workspace = true }
7+
async-graphql = { workspace = true, features = ["dataloader", "apollo_tracing", ] }
88
entity = { path = "../entity" }
99
records-lib = { path = "../records_lib" }
1010
sea-orm = { workspace = true }
1111
reqwest = { workspace = true }
1212
serde = { workspace = true }
13-
tokio = { workspace = true }
13+
tokio = { workspace = true, features = ["macros"] }
1414
deadpool-redis = { workspace = true }
1515
chrono = { workspace = true }
1616
futures = { workspace = true }
17+
base64 = "0.22"

crates/graphql-api/src/lib.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
11
pub mod loaders;
22
pub mod objects;
33
pub mod schema;
4+
5+
pub mod records_connection;

crates/graphql-api/src/objects/map.rs

Lines changed: 228 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
use async_graphql::{ID, dataloader::DataLoader};
1+
use async_graphql::{ID, connection, dataloader::DataLoader};
22
use deadpool_redis::redis::AsyncCommands as _;
33
use entity::{
44
event_edition, event_edition_maps, global_event_records, global_records, maps, player_rating,
@@ -24,6 +24,7 @@ use crate::{
2424
event_edition::EventEdition, player::Player, player_rating::PlayerRating,
2525
ranked_record::RankedRecord, related_edition::RelatedEdition, sort_state::SortState,
2626
},
27+
records_connection::{ConnectionParameters, decode_cursor, encode_cursor},
2728
};
2829

2930
#[derive(FromQueryResult)]
@@ -137,6 +138,161 @@ async fn get_map_records<C: ConnectionTrait + StreamTrait>(
137138
Ok(ranked_records)
138139
}
139140

141+
async fn get_map_records_connection<C: ConnectionTrait + StreamTrait>(
142+
conn: &C,
143+
redis_conn: &mut RedisConnection,
144+
map_id: u32,
145+
event: OptEvent<'_>,
146+
ConnectionParameters {
147+
before,
148+
after,
149+
first,
150+
last,
151+
}: ConnectionParameters,
152+
rank_sort_by: Option<SortState>,
153+
date_sort_by: Option<SortState>,
154+
) -> async_graphql::Result<connection::Connection<ID, RankedRecord>> {
155+
let limit = if let Some(first) = first {
156+
if !(1..=100).contains(&first) {
157+
return Err(async_graphql::Error::new(
158+
"'first' must be between 1 and 100",
159+
));
160+
}
161+
first
162+
} else if let Some(last) = last {
163+
if !(1..=100).contains(&last) {
164+
return Err(async_graphql::Error::new(
165+
"'last' must be between 1 and 100",
166+
));
167+
}
168+
last
169+
} else {
170+
50 // Default limit
171+
};
172+
173+
// Decode cursors if provided
174+
let after_timestamp = if let Some(cursor) = after.as_ref() {
175+
Some(decode_cursor(cursor).map_err(async_graphql::Error::new)?)
176+
} else {
177+
None
178+
};
179+
180+
let before_timestamp = if let Some(cursor) = before.as_ref() {
181+
Some(decode_cursor(cursor).map_err(async_graphql::Error::new)?)
182+
} else {
183+
None
184+
};
185+
186+
// Determine if we're going forward or backward
187+
let is_backward = last.is_some() || before.is_some();
188+
let has_previous_page = after.is_some();
189+
190+
let _key = map_key(map_id, event);
191+
update_leaderboard(conn, redis_conn, map_id, event).await?;
192+
193+
let mut select = Query::select();
194+
195+
let select = match event.get() {
196+
Some((ev, ed)) => select.from_as(global_event_records::Entity, "r").and_where(
197+
Expr::col(("r", global_event_records::Column::EventId))
198+
.eq(ev.id)
199+
.and(Expr::col(("r", global_event_records::Column::EditionId)).eq(ed.id)),
200+
),
201+
None => select.from_as(global_records::Entity, "r"),
202+
}
203+
.column(Asterisk)
204+
.and_where(Expr::col(("r", records::Column::MapId)).eq(map_id));
205+
206+
// Apply cursor filters
207+
if let Some(timestamp) = after_timestamp {
208+
let dt = chrono::DateTime::from_timestamp_millis(timestamp)
209+
.ok_or_else(|| async_graphql::Error::new("Invalid timestamp in cursor"))?
210+
.naive_utc();
211+
212+
select.and_where(Expr::col(("r", records::Column::RecordDate)).lt(dt));
213+
}
214+
215+
if let Some(timestamp) = before_timestamp {
216+
let dt = chrono::DateTime::from_timestamp_millis(timestamp)
217+
.ok_or_else(|| async_graphql::Error::new("Invalid timestamp in cursor"))?
218+
.naive_utc();
219+
220+
select.and_where(Expr::col(("r", records::Column::RecordDate)).gt(dt));
221+
}
222+
223+
// Apply ordering based on date_sort_by and pagination direction
224+
if let Some(ref s) = date_sort_by {
225+
let order = match (s, is_backward) {
226+
(SortState::Sort, false) => sea_orm::Order::Desc,
227+
(SortState::Sort, true) => sea_orm::Order::Asc,
228+
(SortState::Reverse, false) => sea_orm::Order::Asc,
229+
(SortState::Reverse, true) => sea_orm::Order::Desc,
230+
};
231+
select.order_by_expr(Expr::col(("r", records::Column::RecordDate)).into(), order);
232+
} else if rank_sort_by.is_some() {
233+
// For rank-based sorting with pagination, we need to fetch player IDs from Redis
234+
// This is complex and may not work well with cursors
235+
// For now, we'll order by time which correlates with rank
236+
let to_reverse = matches!(rank_sort_by, Some(SortState::Reverse));
237+
let order = match (to_reverse, is_backward) {
238+
(false, false) => sea_orm::Order::Asc, // Best times first
239+
(false, true) => sea_orm::Order::Desc,
240+
(true, false) => sea_orm::Order::Desc, // Worst times first
241+
(true, true) => sea_orm::Order::Asc,
242+
};
243+
select.order_by_expr(Expr::col(("r", records::Column::Time)).into(), order);
244+
select.order_by_expr(
245+
Expr::col(("r", records::Column::RecordDate)).into(),
246+
sea_orm::Order::Asc,
247+
);
248+
} else {
249+
// Default ordering by record date
250+
let order = if is_backward {
251+
sea_orm::Order::Asc
252+
} else {
253+
sea_orm::Order::Desc
254+
};
255+
select.order_by_expr(Expr::col(("r", records::Column::RecordDate)).into(), order);
256+
}
257+
258+
// Fetch one extra to determine if there's a next/previous page
259+
select.limit((limit + 1) as u64);
260+
261+
let stmt = conn.get_database_backend().build(&*select);
262+
let mut records = conn
263+
.query_all(stmt)
264+
.await?
265+
.into_iter()
266+
.map(|result| records::Model::from_query_result(&result, ""))
267+
.collect::<Result<Vec<_>, _>>()?;
268+
269+
// If backward pagination, reverse the results
270+
if is_backward {
271+
records.reverse();
272+
}
273+
274+
let mut connection = connection::Connection::new(has_previous_page, records.len() > limit);
275+
276+
for record in records {
277+
let rank = get_rank(
278+
conn,
279+
redis_conn,
280+
map_id,
281+
record.record_player_id,
282+
record.time,
283+
event,
284+
)
285+
.await?;
286+
287+
connection.edges.push(connection::Edge::new(
288+
ID(encode_cursor(&record.record_date.and_utc())),
289+
records::RankedRecord { rank, record }.into(),
290+
));
291+
}
292+
293+
Ok(connection)
294+
}
295+
140296
impl Map {
141297
pub(super) async fn get_records(
142298
&self,
@@ -161,6 +317,50 @@ impl Map {
161317
}))
162318
.await
163319
}
320+
321+
#[allow(clippy::too_many_arguments)]
322+
pub(super) async fn get_records_connection(
323+
&self,
324+
gql_ctx: &async_graphql::Context<'_>,
325+
event: OptEvent<'_>,
326+
after: Option<String>,
327+
before: Option<String>,
328+
first: Option<i32>,
329+
last: Option<i32>,
330+
rank_sort_by: Option<SortState>,
331+
date_sort_by: Option<SortState>,
332+
) -> async_graphql::Result<connection::Connection<ID, RankedRecord>> {
333+
let db = gql_ctx.data_unchecked::<Database>();
334+
let mut redis_conn = db.redis_pool.get().await?;
335+
336+
records_lib::assert_future_send(transaction::within(&db.sql_conn, async |txn| {
337+
connection::query(
338+
after,
339+
before,
340+
first,
341+
last,
342+
|after, before, first, last| async move {
343+
get_map_records_connection(
344+
txn,
345+
&mut redis_conn,
346+
self.inner.id,
347+
event,
348+
ConnectionParameters {
349+
after,
350+
before,
351+
first,
352+
last,
353+
},
354+
rank_sort_by,
355+
date_sort_by,
356+
)
357+
.await
358+
},
359+
)
360+
.await
361+
}))
362+
.await
363+
}
164364
}
165365

166366
#[async_graphql::Object]
@@ -269,4 +469,31 @@ impl Map {
269469
self.get_records(ctx, Default::default(), rank_sort_by, date_sort_by)
270470
.await
271471
}
472+
473+
#[allow(clippy::too_many_arguments)]
474+
async fn records_connection(
475+
&self,
476+
ctx: &async_graphql::Context<'_>,
477+
#[graphql(desc = "Cursor to fetch records after (for forward pagination)")] after: Option<
478+
String,
479+
>,
480+
#[graphql(desc = "Cursor to fetch records before (for backward pagination)")]
481+
before: Option<String>,
482+
#[graphql(desc = "Number of records to fetch (default: 50, max: 100)")] first: Option<i32>,
483+
#[graphql(desc = "Number of records to fetch from the end (for backward pagination)")] last: Option<i32>,
484+
rank_sort_by: Option<SortState>,
485+
date_sort_by: Option<SortState>,
486+
) -> async_graphql::Result<connection::Connection<ID, RankedRecord>> {
487+
self.get_records_connection(
488+
ctx,
489+
Default::default(),
490+
after,
491+
before,
492+
first,
493+
last,
494+
rank_sort_by,
495+
date_sort_by,
496+
)
497+
.await
498+
}
272499
}

0 commit comments

Comments
 (0)