From 4fb1182177303e2c04f4532e7892d97845adaf7a Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Tue, 24 Feb 2026 18:32:55 +0100 Subject: [PATCH 1/3] add timeout for http and db requests --- backend/Cargo.toml | 2 +- backend/crates/atlas-api/src/handlers/blocks.rs | 13 ++++++++++--- backend/crates/atlas-api/src/main.rs | 6 ++++++ backend/crates/atlas-common/src/db.rs | 12 ++++++++++-- 4 files changed, 27 insertions(+), 6 deletions(-) diff --git a/backend/Cargo.toml b/backend/Cargo.toml index 833181e..3ed4006 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -18,7 +18,7 @@ tokio = { version = "1.43", features = ["full"] } # Web framework axum = { version = "0.8", features = ["macros"] } -tower-http = { version = "0.6", features = ["cors", "trace"] } +tower-http = { version = "0.6", features = ["cors", "trace", "timeout"] } # Database sqlx = { version = "0.8", features = ["runtime-tokio", "tls-rustls", "postgres", "migrate", "json", "bigdecimal", "chrono"] } diff --git a/backend/crates/atlas-api/src/handlers/blocks.rs b/backend/crates/atlas-api/src/handlers/blocks.rs index 7b13de9..e819ade 100644 --- a/backend/crates/atlas-api/src/handlers/blocks.rs +++ b/backend/crates/atlas-api/src/handlers/blocks.rs @@ -19,14 +19,21 @@ pub async fn list_blocks( .await?; let total_count = total.0.unwrap_or(0); + // Convert page-based navigation to a keyset cursor using block numbers. + // Blocks are sequential so: cursor = max_block - (page - 1) * limit + // WHERE number <= cursor is O(log N) via primary key; OFFSET was O(N). + let limit = pagination.limit(); + let cursor = (total_count - 1) - pagination.offset(); + let blocks: Vec = sqlx::query_as( "SELECT number, hash, parent_hash, timestamp, gas_used, gas_limit, transaction_count, indexed_at FROM blocks + WHERE number <= $2 ORDER BY number DESC - LIMIT $1 OFFSET $2" + LIMIT $1" ) - .bind(pagination.limit()) - .bind(pagination.offset()) + .bind(limit) + .bind(cursor) .fetch_all(&state.pool) .await?; diff --git a/backend/crates/atlas-api/src/main.rs b/backend/crates/atlas-api/src/main.rs index db84865..58d6cd1 100644 --- a/backend/crates/atlas-api/src/main.rs +++ b/backend/crates/atlas-api/src/main.rs @@ -5,7 +5,9 @@ use axum::{ }; use sqlx::PgPool; use std::sync::Arc; +use std::time::Duration; use tower_http::cors::{Any, CorsLayer}; +use tower_http::timeout::TimeoutLayer; use tower_http::trace::TraceLayer; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; @@ -209,6 +211,10 @@ async fn main() -> Result<()> { .route("/api/status", get(handlers::status::get_status)) // Health .route("/health", get(|| async { "OK" })) + .layer(TimeoutLayer::with_status_code( + axum::http::StatusCode::REQUEST_TIMEOUT, + Duration::from_secs(10), + )) .layer( CorsLayer::new() .allow_origin(Any) diff --git a/backend/crates/atlas-common/src/db.rs b/backend/crates/atlas-common/src/db.rs index 188822a..7f874ca 100644 --- a/backend/crates/atlas-common/src/db.rs +++ b/backend/crates/atlas-common/src/db.rs @@ -1,10 +1,18 @@ use sqlx::postgres::PgPoolOptions; -use sqlx::PgPool; +use sqlx::{Executor, PgPool}; -/// Create a database connection pool +/// Create a database connection pool. +/// Sets statement_timeout = 10s on every connection to prevent slow queries +/// from exhausting the pool. pub async fn create_pool(database_url: &str, max_connections: u32) -> Result { PgPoolOptions::new() .max_connections(max_connections) + .after_connect(|conn, _meta| { + Box::pin(async move { + conn.execute("SET statement_timeout = '10s'").await?; + Ok(()) + }) + }) .connect(database_url) .await } From bfea70156fe72d86e4877f0318cedcfc50a8e190 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Wed, 25 Feb 2026 10:53:20 +0100 Subject: [PATCH 2/3] fix pagination --- backend/crates/atlas-api/src/handlers/blocks.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/crates/atlas-api/src/handlers/blocks.rs b/backend/crates/atlas-api/src/handlers/blocks.rs index e819ade..d9a10b8 100644 --- a/backend/crates/atlas-api/src/handlers/blocks.rs +++ b/backend/crates/atlas-api/src/handlers/blocks.rs @@ -23,7 +23,7 @@ pub async fn list_blocks( // Blocks are sequential so: cursor = max_block - (page - 1) * limit // WHERE number <= cursor is O(log N) via primary key; OFFSET was O(N). let limit = pagination.limit(); - let cursor = (total_count - 1) - pagination.offset(); + let cursor = (total_count - 1) - (pagination.page.saturating_sub(1) as i64) * limit; let blocks: Vec = sqlx::query_as( "SELECT number, hash, parent_hash, timestamp, gas_used, gas_limit, transaction_count, indexed_at From e4b37f4373c1e93e3d1677aefe2d54f05f233825 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Wed, 25 Feb 2026 10:58:42 +0100 Subject: [PATCH 3/3] use different pool for migrations to avoid timeouts --- backend/crates/atlas-api/src/main.rs | 2 +- backend/crates/atlas-common/src/db.rs | 15 ++++++++++++--- backend/crates/atlas-indexer/src/main.rs | 2 +- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/backend/crates/atlas-api/src/main.rs b/backend/crates/atlas-api/src/main.rs index 58d6cd1..3e471f0 100644 --- a/backend/crates/atlas-api/src/main.rs +++ b/backend/crates/atlas-api/src/main.rs @@ -51,7 +51,7 @@ async fn main() -> Result<()> { // Run migrations tracing::info!("Running database migrations"); - atlas_common::db::run_migrations(&pool).await?; + atlas_common::db::run_migrations(&database_url).await?; let state = Arc::new(AppState { pool, diff --git a/backend/crates/atlas-common/src/db.rs b/backend/crates/atlas-common/src/db.rs index 7f874ca..f2d7edf 100644 --- a/backend/crates/atlas-common/src/db.rs +++ b/backend/crates/atlas-common/src/db.rs @@ -17,7 +17,16 @@ pub async fn create_pool(database_url: &str, max_connections: u32) -> Result Result<(), sqlx::migrate::MigrateError> { - sqlx::migrate!("../../migrations").run(pool).await +/// Run database migrations using a dedicated connection without statement_timeout, +/// since migrations (index builds, bulk inserts) can legitimately exceed 10s. +pub async fn run_migrations(database_url: &str) -> Result<(), sqlx::Error> { + let pool = PgPoolOptions::new() + .max_connections(1) + .connect(database_url) + .await?; + sqlx::migrate!("../../migrations") + .run(&pool) + .await + .map_err(|e| sqlx::Error::Migrate(Box::new(e)))?; + Ok(()) } diff --git a/backend/crates/atlas-indexer/src/main.rs b/backend/crates/atlas-indexer/src/main.rs index 331f047..62451e8 100644 --- a/backend/crates/atlas-indexer/src/main.rs +++ b/backend/crates/atlas-indexer/src/main.rs @@ -36,7 +36,7 @@ async fn main() -> Result<()> { // Run migrations tracing::info!("Running database migrations"); - atlas_common::db::run_migrations(&pool).await?; + atlas_common::db::run_migrations(&config.database_url).await?; // Start indexer let indexer = indexer::Indexer::new(pool.clone(), config.clone());