diff --git a/graphile/graphile-presigned-url-plugin/__tests__/s3-signer.integration.test.ts b/graphile/graphile-presigned-url-plugin/__tests__/s3-signer.integration.test.ts index 2474a1f3c..8890ac4d9 100644 --- a/graphile/graphile-presigned-url-plugin/__tests__/s3-signer.integration.test.ts +++ b/graphile/graphile-presigned-url-plugin/__tests__/s3-signer.integration.test.ts @@ -16,6 +16,7 @@ import { generatePresignedPutUrl, generatePresignedGetUrl, headObject, + deleteS3Object, } from '../src/s3-signer'; import type { S3Config } from '../src/types'; @@ -229,6 +230,41 @@ describe('s3-signer integration (MinIO)', () => { }); }); + describe('deleteS3Object', () => { + it('should delete an existing object from S3', async () => { + const key = 'test-delete-' + Date.now() + '.txt'; + const content = 'file to delete'; + const contentType = 'text/plain'; + + // Upload a file first + const putUrl = await generatePresignedPutUrl( + s3Config, + key, + contentType, + Buffer.byteLength(content), + ); + const putRes = await uploadToPresignedUrl(putUrl, content, contentType); + expect(putRes.status).toBe(200); + + // Verify it exists + const beforeHead = await headObject(s3Config, key); + expect(beforeHead).not.toBeNull(); + + // Delete it + await deleteS3Object(s3Config, key); + + // Verify it's gone + const afterHead = await headObject(s3Config, key); + expect(afterHead).toBeNull(); + }); + + it('should be idempotent (no error deleting non-existent key)', async () => { + await expect( + deleteS3Object(s3Config, 'non-existent-key-' + Date.now()), + ).resolves.toBeUndefined(); + }); + }); + describe('full round-trip: PUT → HEAD → GET', () => { it('should upload, verify, and download a text payload', async () => { const key = 'roundtrip-test-' + Date.now() + '.txt'; diff --git a/graphile/graphile-presigned-url-plugin/src/download-url-field.ts b/graphile/graphile-presigned-url-plugin/src/download-url-field.ts index 0a6f7abae..a58d3b07d 100644 --- a/graphile/graphile-presigned-url-plugin/src/download-url-field.ts +++ b/graphile/graphile-presigned-url-plugin/src/download-url-field.ts @@ -20,12 +20,13 @@ */ import type { GraphileConfig } from 'graphile-config'; +import 'graphile-build'; import { context as grafastContext, lambda, object } from 'grafast'; import { Logger } from '@pgpmjs/logger'; import type { PresignedUrlPluginOptions, S3Config, StorageModuleConfig } from './types'; import { generatePresignedGetUrl } from './s3-signer'; -import { getStorageModuleConfig } from './storage-module-cache'; +import { loadAllStorageModules, resolveStorageConfigFromCodec } from './storage-module-cache'; const log = new Logger('graphile-presigned-url:download-url'); @@ -110,6 +111,8 @@ export function createDownloadUrlPlugin( graphql: { GraphQLString }, } = build; + const capturedCodec = pgCodec; + return build.extend( fields, { @@ -121,12 +124,10 @@ export function createDownloadUrlPlugin( 'For private files, returns a time-limited presigned URL.', type: GraphQLString, plan($parent: any) { - // Access file attributes from the parent PgSelectSingleStep const $key = $parent.get('key'); const $isPublic = $parent.get('is_public'); const $filename = $parent.get('filename'); - // Access GraphQL context for per-database config resolution const $withPgClient = (grafastContext() as any).get('withPgClient'); const $pgSettings = (grafastContext() as any).get('pgSettings'); @@ -141,9 +142,8 @@ export function createDownloadUrlPlugin( return lambda($combined, async ({ key, isPublic, filename, withPgClient, pgSettings }: any) => { if (!key) return null; - // Resolve per-database config (bucket, publicUrlPrefix, expiry) - let s3ForDb = resolveS3(options); // fallback to global - let downloadUrlExpirySeconds = 3600; // fallback default + let s3ForDb = resolveS3(options); + let downloadUrlExpirySeconds = 3600; try { if (withPgClient && pgSettings) { const resolved = await withPgClient(null, async (pgClient: any) => { @@ -152,7 +152,8 @@ export function createDownloadUrlPlugin( }); const databaseId = dbResult.rows[0]?.id; if (!databaseId) return null; - const config = await getStorageModuleConfig(pgClient, databaseId); + const allConfigs = await loadAllStorageModules(pgClient, databaseId); + const config = resolveStorageConfigFromCodec(capturedCodec, allConfigs); if (!config) return null; return { config, databaseId }; }); @@ -166,11 +167,9 @@ export function createDownloadUrlPlugin( } if (isPublic && s3ForDb.publicUrlPrefix) { - // Public file: return direct CDN URL (per-database prefix) return `${s3ForDb.publicUrlPrefix}/${key}`; } - // Private file: generate presigned GET URL (per-database bucket) return generatePresignedGetUrl( s3ForDb, key, diff --git a/graphile/graphile-presigned-url-plugin/src/index.ts b/graphile/graphile-presigned-url-plugin/src/index.ts index b8b209222..94e3babc0 100644 --- a/graphile/graphile-presigned-url-plugin/src/index.ts +++ b/graphile/graphile-presigned-url-plugin/src/index.ts @@ -1,9 +1,10 @@ /** * Presigned URL Plugin for PostGraphile v5 * - * Provides presigned URL upload capabilities for PostGraphile v5: - * - requestUploadUrl mutation (presigned PUT URL generation + dedup) - * - downloadUrl computed field (presigned GET URL / public URL) + * Provides per-table S3 storage middleware for PostGraphile v5: + * - Upload fields on @storageBuckets types (requestUploadUrl, requestBulkUploadUrls) + * - Delete middleware on @storageFiles tables (S3 cleanup on delete) + * - downloadUrl computed field on @storageFiles types * * @example * ```typescript @@ -29,8 +30,8 @@ export { PresignedUrlPlugin, createPresignedUrlPlugin } from './plugin'; export { createDownloadUrlPlugin } from './download-url-field'; export { PresignedUrlPreset } from './preset'; -export { getStorageModuleConfig, getStorageModuleConfigForOwner, getBucketConfig, resolveStorageModuleByFileId, clearStorageModuleCache, clearBucketCache, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache'; -export { generatePresignedPutUrl, generatePresignedGetUrl, headObject } from './s3-signer'; +export { getStorageModuleConfig, getStorageModuleConfigForOwner, getBucketConfig, resolveStorageModuleByFileId, loadAllStorageModules, resolveStorageConfigFromCodec, clearStorageModuleCache, clearBucketCache, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache'; +export { generatePresignedPutUrl, generatePresignedGetUrl, deleteS3Object, headObject } from './s3-signer'; export type { BucketConfig, StorageModuleConfig, diff --git a/graphile/graphile-presigned-url-plugin/src/plugin.ts b/graphile/graphile-presigned-url-plugin/src/plugin.ts index 030095498..212943871 100644 --- a/graphile/graphile-presigned-url-plugin/src/plugin.ts +++ b/graphile/graphile-presigned-url-plugin/src/plugin.ts @@ -1,26 +1,29 @@ /** - * Presigned URL Plugin for PostGraphile v5 + * Per-Table Storage Middleware Plugin for PostGraphile v5 * - * Adds presigned URL upload support to PostGraphile v5: + * Hooks into PostGraphile's auto-generated CRUD mutations to add S3 operations: * - * 1. `requestUploadUrl` mutation — generates a presigned PUT URL for direct - * client-to-S3 upload. Checks bucket access via RLS, deduplicates by - * content hash via UNIQUE(bucket_id, key) constraint. + * 1. Delete middleware — wraps `delete*` mutations on `@storageFiles`-tagged tables + * with S3 object cleanup (sync + async GC fallback via AFTER DELETE trigger). * - * 2. `downloadUrl` computed field on File types — generates presigned GET URLs - * for private files, returns public URL prefix + key for public files. + * 2. Upload fields — adds `requestUploadUrl` and `requestBulkUploadUrls` fields + * on `@storageBuckets`-tagged types, so clients upload via the typed bucket API. * - * Uses the extendSchema + grafast plan pattern (same as PublicKeySignature). + * 3. downloadUrl — handled by download-url-field.ts (separate plugin). + * + * No global mutations — all S3 operations are scoped to the per-table types that + * PostGraphile already generates. Scope resolution uses the codec's schema/table + * name matched against cached storage module configs. */ import { context as grafastContext, lambda, object } from 'grafast'; import type { GraphileConfig } from 'graphile-config'; -import { extendSchema, gql } from 'graphile-utils'; +import 'graphile-build'; import { Logger } from '@pgpmjs/logger'; import type { PresignedUrlPluginOptions, S3Config, StorageModuleConfig, BucketConfig } from './types'; -import { getStorageModuleConfig, getStorageModuleConfigForOwner, getBucketConfig, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache'; -import { generatePresignedPutUrl } from './s3-signer'; +import { loadAllStorageModules, resolveStorageConfigFromCodec, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache'; +import { generatePresignedPutUrl, deleteS3Object } from './s3-signer'; const log = new Logger('graphile-presigned-url:plugin'); @@ -28,32 +31,20 @@ const log = new Logger('graphile-presigned-url:plugin'); const MAX_CONTENT_HASH_LENGTH = 128; const MAX_CONTENT_TYPE_LENGTH = 255; -const MAX_BUCKET_KEY_LENGTH = 255; const MAX_CUSTOM_KEY_LENGTH = 1024; const SHA256_HEX_REGEX = /^[a-f0-9]{64}$/; const CUSTOM_KEY_REGEX = /^[a-zA-Z0-9][a-zA-Z0-9_.\-\/]*$/; // --- Helpers --- -/** - * Validate a SHA-256 hex string. - */ function isValidSha256(hash: string): boolean { return SHA256_HEX_REGEX.test(hash); } -/** - * Build the S3 key from content hash. - * Format: {contentHash} (flat namespace, content-addressed) - */ function buildS3Key(contentHash: string): string { return contentHash; } -/** - * Validate a custom S3 key. - * Must be 1-1024 chars, no path traversal, no leading slash, no null bytes. - */ function validateCustomKey(key: string): string | null { if (key.length === 0 || key.length > MAX_CUSTOM_KEY_LENGTH) { return 'INVALID_KEY_LENGTH: must be 1-1024 characters'; @@ -73,11 +64,6 @@ function validateCustomKey(key: string): string | null { return null; } -/** - * Derive an ltree path from a custom S3 key's directory portion. - * e.g., "reports/2024/Q1/revenue.pdf" → "reports.2024.Q1" - * Returns null if the key has no directory component. - */ function derivePathFromKey(key: string): string | null { const lastSlash = key.lastIndexOf('/'); if (lastSlash <= 0) return null; @@ -85,12 +71,6 @@ function derivePathFromKey(key: string): string | null { return dir.replace(/\//g, '.'); } -/** - * Resolve the database_id from the JWT context. - * The server middleware sets jwt.claims.database_id, which is accessible - * via jwt_private.current_database_id() — a simple function call, no - * metaschema query needed. - */ async function resolveDatabaseId(pgClient: any): Promise { const result = await pgClient.query({ text: `SELECT jwt_private.current_database_id() AS id`, @@ -98,31 +78,15 @@ async function resolveDatabaseId(pgClient: any): Promise { return result.rows[0]?.id ?? null; } -// --- Plugin factory --- - -/** - * Resolve the S3 config from the options. If the option is a lazy getter - * function, call it (and cache the result). This avoids reading env vars - * or constructing an S3Client at module-import time. - */ function resolveS3(options: PresignedUrlPluginOptions): S3Config { if (typeof options.s3 === 'function') { const resolved = options.s3(); - // Cache so subsequent calls don't re-evaluate options.s3 = resolved; return resolved; } return options.s3; } -/** - * Build a per-database S3Config by overlaying storage_module overrides - * onto the global S3Config. - * - * - Bucket name: from resolveBucketName(databaseId) if provided, else global - * - publicUrlPrefix: from storageConfig.publicUrlPrefix if set, else global - * - S3 client (credentials, endpoint): always global (shared IAM key) - */ function resolveS3ForDatabase( options: PresignedUrlPluginOptions, storageConfig: StorageModuleConfig, @@ -145,16 +109,6 @@ function resolveS3ForDatabase( }; } -/** - * Ensure the S3 bucket for a database exists, provisioning it lazily if needed. - * - * Checks an in-memory Set of known-provisioned bucket names. On the first - * request for an unseen bucket, calls the `ensureBucketProvisioned` callback - * (which creates the bucket with correct CORS, policies, etc.), then marks - * it as provisioned so subsequent requests skip the check entirely. - * - * If no `ensureBucketProvisioned` callback is configured, this is a no-op. - */ async function ensureS3BucketExists( options: PresignedUrlPluginOptions, s3BucketName: string, @@ -171,288 +125,414 @@ async function ensureS3BucketExists( log.info(`Lazy-provisioned S3 bucket "${s3BucketName}" successfully`); } +// --- Plugin factory --- + export function createPresignedUrlPlugin( options: PresignedUrlPluginOptions, ): GraphileConfig.Plugin { - return extendSchema(() => ({ - typeDefs: gql` - input RequestUploadUrlInput { - """Bucket key (e.g., "public", "private")""" - bucketKey: String! - """ - Owner entity ID for entity-scoped uploads. - Omit for app-level (database-wide) storage. - When provided, resolves the storage module for the entity type - that owns this entity instance (e.g., a data room ID, team ID). - """ - ownerId: UUID - """SHA-256 content hash computed by the client (hex-encoded, 64 chars)""" - contentHash: String! - """MIME type of the file (e.g., "image/png")""" - contentType: String! - """File size in bytes""" - size: Int! - """Original filename (optional, for display and Content-Disposition)""" - filename: String - """ - Custom S3 key (e.g., "reports/2024/Q1.pdf"). - Only allowed when the bucket has allow_custom_keys=true. - When omitted, key defaults to contentHash (content-addressed dedup). - When provided, the file is stored at this key. - Re-uploading to an existing key auto-creates a new version. - """ - key: String - } - - type RequestUploadUrlPayload { - """Presigned PUT URL (null if file was deduplicated)""" - uploadUrl: String - """The file ID (existing if deduplicated, new if fresh upload)""" - fileId: UUID! - """The S3 object key""" - key: String! - """Whether this file was deduplicated (already exists with same hash)""" - deduplicated: Boolean! - """Presigned URL expiry time (null if deduplicated)""" - expiresAt: Datetime - """ID of the previous version (set when re-uploading to an existing custom key)""" - previousVersionId: UUID - } - - input BulkUploadFileInput { - """SHA-256 content hash computed by the client (hex-encoded, 64 chars)""" - contentHash: String! - """MIME type of the file (e.g., "image/png")""" - contentType: String! - """File size in bytes""" - size: Int! - """Original filename (optional, for display and Content-Disposition)""" - filename: String - """Custom S3 key (only when bucket has allow_custom_keys=true)""" - key: String - } - - input RequestBulkUploadUrlsInput { - """Bucket key (e.g., "public", "private")""" - bucketKey: String! - """Owner entity ID for entity-scoped uploads""" - ownerId: UUID - """Array of files to upload""" - files: [BulkUploadFileInput!]! - } - - type BulkUploadFilePayload { - """Presigned PUT URL (null if file was deduplicated)""" - uploadUrl: String - """The file ID""" - fileId: UUID! - """The S3 object key""" - key: String! - """Whether this file was deduplicated""" - deduplicated: Boolean! - """Presigned URL expiry time (null if deduplicated)""" - expiresAt: Datetime - """ID of the previous version (set when re-uploading to an existing custom key)""" - previousVersionId: UUID - """Index of this file in the input array (for client correlation)""" - index: Int! - } - - type RequestBulkUploadUrlsPayload { - """Array of results, one per input file""" - files: [BulkUploadFilePayload!]! - } - - extend type Mutation { - """ - Request a presigned URL for uploading a file directly to S3. - Client computes SHA-256 of the file content and provides it here. - If a file with the same hash already exists (dedup), returns the - existing file ID and deduplicated=true with no uploadUrl. - """ - requestUploadUrl( - input: RequestUploadUrlInput! - ): RequestUploadUrlPayload - - """ - Request presigned URLs for uploading multiple files in a single batch. - Subject to per-storage-module limits (max_bulk_files, max_bulk_total_size). - Each file is processed independently — some may dedup while others get fresh URLs. - """ - requestBulkUploadUrls( - input: RequestBulkUploadUrlsInput! - ): RequestBulkUploadUrlsPayload - } - `, - plans: { - Mutation: { - requestUploadUrl(_$mutation: any, fieldArgs: any) { - const $input = fieldArgs.getRaw('input'); - const $withPgClient = (grafastContext() as any).get('withPgClient'); - const $pgSettings = (grafastContext() as any).get('pgSettings'); - const $combined = object({ - input: $input, - withPgClient: $withPgClient, - pgSettings: $pgSettings, + return { + name: 'PresignedUrlPlugin', + version: '1.0.0', + description: 'Per-table S3 storage middleware: upload fields on @storageBuckets, delete middleware on @storageFiles', + + after: ['PgAttributesPlugin', 'PgMutationCreatePlugin', 'PgMutationUpdateDeletePlugin'], + + schema: { + hooks: { + /** + * Add requestUploadUrl and requestBulkUploadUrls fields on @storageBuckets types. + */ + GraphQLObjectType_fields(fields, build, context) { + const { + scope: { pgCodec, isPgClassType }, + } = context as any; + + if (!isPgClassType || !pgCodec || !pgCodec.attributes) { + return fields; + } + + const tags = (pgCodec.extensions as any)?.tags; + if (!tags?.storageBuckets) { + return fields; + } + + log.debug(`Adding upload fields to bucket type: ${pgCodec.name} (has @storageBuckets tag)`); + + const { + graphql: { + GraphQLString, + GraphQLNonNull, + GraphQLInt, + GraphQLBoolean, + GraphQLObjectType, + GraphQLList, + GraphQLInputObjectType, + }, + } = build; + + // --- Shared output types --- + + const UploadUrlPayloadType = new GraphQLObjectType({ + name: `${build.inflection.upperCamelCase(pgCodec.name)}RequestUploadUrlPayload`, + fields: { + uploadUrl: { type: GraphQLString, description: 'Presigned PUT URL (null if deduplicated)' }, + fileId: { type: new GraphQLNonNull(GraphQLString), description: 'The file ID' }, + key: { type: new GraphQLNonNull(GraphQLString), description: 'The S3 object key' }, + deduplicated: { type: new GraphQLNonNull(GraphQLBoolean), description: 'Whether this file was deduplicated' }, + expiresAt: { type: GraphQLString, description: 'Presigned URL expiry time (null if deduplicated)' }, + previousVersionId: { type: GraphQLString, description: 'ID of the previous version' }, + }, }); - return lambda($combined, async ({ input, withPgClient, pgSettings }: any) => { - const result = await processUpload(options, input, withPgClient, pgSettings); - return result; - }); - }, - requestBulkUploadUrls(_$mutation: any, fieldArgs: any) { - const $input = fieldArgs.getRaw('input'); - const $withPgClient = (grafastContext() as any).get('withPgClient'); - const $pgSettings = (grafastContext() as any).get('pgSettings'); - const $combined = object({ - input: $input, - withPgClient: $withPgClient, - pgSettings: $pgSettings, + const BulkUploadFilePayloadType = new GraphQLObjectType({ + name: `${build.inflection.upperCamelCase(pgCodec.name)}BulkUploadFilePayload`, + fields: { + uploadUrl: { type: GraphQLString }, + fileId: { type: new GraphQLNonNull(GraphQLString) }, + key: { type: new GraphQLNonNull(GraphQLString) }, + deduplicated: { type: new GraphQLNonNull(GraphQLBoolean) }, + expiresAt: { type: GraphQLString }, + previousVersionId: { type: GraphQLString }, + index: { type: new GraphQLNonNull(GraphQLInt), description: 'Index in the input array' }, + }, }); - return lambda($combined, async ({ input, withPgClient, pgSettings }: any) => { - const { bucketKey, ownerId, files } = input; - - if (!bucketKey || typeof bucketKey !== 'string' || bucketKey.length > MAX_BUCKET_KEY_LENGTH) { - throw new Error('INVALID_BUCKET_KEY'); - } - if (!Array.isArray(files) || files.length === 0) { - throw new Error('INVALID_FILES: must provide at least one file'); - } - - return withPgClient(pgSettings, async (pgClient: any) => { - return pgClient.withTransaction(async (txClient: any) => { - const databaseId = await resolveDatabaseId(txClient); - if (!databaseId) { - throw new Error('DATABASE_NOT_FOUND'); - } + const BulkUploadUrlsPayloadType = new GraphQLObjectType({ + name: `${build.inflection.upperCamelCase(pgCodec.name)}RequestBulkUploadUrlsPayload`, + fields: { + files: { type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(BulkUploadFilePayloadType))) }, + }, + }); - const storageConfig = ownerId - ? await getStorageModuleConfigForOwner(txClient, databaseId, ownerId) - : await getStorageModuleConfig(txClient, databaseId); - if (!storageConfig) { - throw new Error( - ownerId - ? 'STORAGE_MODULE_NOT_FOUND_FOR_OWNER: no storage module found for the given ownerId' - : 'STORAGE_MODULE_NOT_PROVISIONED', - ); - } + const BulkUploadFileInputType = new GraphQLInputObjectType({ + name: `${build.inflection.upperCamelCase(pgCodec.name)}BulkUploadFileInput`, + fields: { + contentHash: { type: new GraphQLNonNull(GraphQLString) }, + contentType: { type: new GraphQLNonNull(GraphQLString) }, + size: { type: new GraphQLNonNull(GraphQLInt) }, + filename: { type: GraphQLString }, + key: { type: GraphQLString }, + }, + }); - // --- Validate bulk limits --- - if (files.length > storageConfig.maxBulkFiles) { - throw new Error(`BULK_LIMIT_EXCEEDED: max ${storageConfig.maxBulkFiles} files per batch`); - } - const totalSize = files.reduce((sum: number, f: any) => sum + (f.size || 0), 0); - if (totalSize > storageConfig.maxBulkTotalSize) { - throw new Error(`BULK_SIZE_EXCEEDED: total size ${totalSize} exceeds max ${storageConfig.maxBulkTotalSize} bytes`); - } + // Capture codec for closure + const capturedCodec = pgCodec; + + return build.extend( + fields, + { + requestUploadUrl: context.fieldWithHooks( + { fieldName: 'requestUploadUrl' } as any, + { + description: 'Request a presigned URL for uploading a file to this bucket.', + type: UploadUrlPayloadType, + args: { + contentHash: { type: new GraphQLNonNull(GraphQLString), description: 'SHA-256 content hash (hex-encoded, 64 chars)' }, + contentType: { type: new GraphQLNonNull(GraphQLString), description: 'MIME type of the file' }, + size: { type: new GraphQLNonNull(GraphQLInt), description: 'File size in bytes' }, + filename: { type: GraphQLString, description: 'Original filename (optional)' }, + key: { type: GraphQLString, description: 'Custom S3 key (only when bucket has allow_custom_keys=true)' }, + }, + plan($parent: any, fieldArgs: any) { + const $bucketId = $parent.get('id'); + const $bucketKey = $parent.get('key'); + const $bucketType = $parent.get('type'); + const $bucketIsPublic = $parent.get('is_public'); + const $bucketAllowCustomKeys = $parent.get('allow_custom_keys'); + const $bucketAllowedMimeTypes = $parent.get('allowed_mime_types'); + const $bucketMaxFileSize = $parent.get('max_file_size'); + const $bucketOwnerId = capturedCodec.attributes.owner_id ? $parent.get('owner_id') : lambda(null, (): null => null); + + const $contentHash = fieldArgs.getRaw('contentHash'); + const $contentType = fieldArgs.getRaw('contentType'); + const $size = fieldArgs.getRaw('size'); + const $filename = fieldArgs.getRaw('filename'); + const $customKey = fieldArgs.getRaw('key'); + + const $withPgClient = (grafastContext() as any).get('withPgClient'); + const $pgSettings = (grafastContext() as any).get('pgSettings'); + + const $combined = object({ + bucketId: $bucketId, + bucketKey: $bucketKey, + bucketType: $bucketType, + bucketIsPublic: $bucketIsPublic, + bucketAllowCustomKeys: $bucketAllowCustomKeys, + bucketAllowedMimeTypes: $bucketAllowedMimeTypes, + bucketMaxFileSize: $bucketMaxFileSize, + bucketOwnerId: $bucketOwnerId, + contentHash: $contentHash, + contentType: $contentType, + size: $size, + filename: $filename, + customKey: $customKey, + withPgClient: $withPgClient, + pgSettings: $pgSettings, + }); + + return lambda($combined, async (vals: any) => { + return vals.withPgClient(vals.pgSettings, async (pgClient: any) => { + return pgClient.withTransaction(async (txClient: any) => { + const databaseId = await resolveDatabaseId(txClient); + if (!databaseId) throw new Error('DATABASE_NOT_FOUND'); + + const allConfigs = await loadAllStorageModules(txClient, databaseId); + const storageConfig = resolveStorageConfigFromCodec(capturedCodec, allConfigs); + if (!storageConfig) throw new Error('STORAGE_MODULE_NOT_FOUND'); + + const bucket: BucketConfig = { + id: vals.bucketId, + key: vals.bucketKey, + type: vals.bucketType, + is_public: vals.bucketIsPublic, + owner_id: vals.bucketOwnerId, + allowed_mime_types: vals.bucketAllowedMimeTypes, + max_file_size: vals.bucketMaxFileSize, + allow_custom_keys: vals.bucketAllowCustomKeys ?? false, + }; + + const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId); + await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins); + + return processSingleFile( + options, txClient, storageConfig, databaseId, bucket, s3ForDb, { + contentHash: vals.contentHash, + contentType: vals.contentType, + size: vals.size, + filename: vals.filename, + key: vals.customKey, + }, + ); + }); + }); + }); + }, + }, + ), + requestBulkUploadUrls: context.fieldWithHooks( + { fieldName: 'requestBulkUploadUrls' } as any, + { + description: 'Request presigned URLs for uploading multiple files to this bucket.', + type: BulkUploadUrlsPayloadType, + args: { + files: { + type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(BulkUploadFileInputType))), + description: 'Array of files to upload', + }, + }, + plan($parent: any, fieldArgs: any) { + const $bucketId = $parent.get('id'); + const $bucketKey = $parent.get('key'); + const $bucketType = $parent.get('type'); + const $bucketIsPublic = $parent.get('is_public'); + const $bucketAllowCustomKeys = $parent.get('allow_custom_keys'); + const $bucketAllowedMimeTypes = $parent.get('allowed_mime_types'); + const $bucketMaxFileSize = $parent.get('max_file_size'); + const $bucketOwnerId = capturedCodec.attributes.owner_id ? $parent.get('owner_id') : lambda(null, (): null => null); + + const $files = fieldArgs.getRaw('files'); + const $withPgClient = (grafastContext() as any).get('withPgClient'); + const $pgSettings = (grafastContext() as any).get('pgSettings'); + + const $combined = object({ + bucketId: $bucketId, + bucketKey: $bucketKey, + bucketType: $bucketType, + bucketIsPublic: $bucketIsPublic, + bucketAllowCustomKeys: $bucketAllowCustomKeys, + bucketAllowedMimeTypes: $bucketAllowedMimeTypes, + bucketMaxFileSize: $bucketMaxFileSize, + bucketOwnerId: $bucketOwnerId, + files: $files, + withPgClient: $withPgClient, + pgSettings: $pgSettings, + }); + + return lambda($combined, async (vals: any) => { + const { files } = vals; + if (!Array.isArray(files) || files.length === 0) { + throw new Error('INVALID_FILES: must provide at least one file'); + } + + return vals.withPgClient(vals.pgSettings, async (pgClient: any) => { + return pgClient.withTransaction(async (txClient: any) => { + const databaseId = await resolveDatabaseId(txClient); + if (!databaseId) throw new Error('DATABASE_NOT_FOUND'); + + const allConfigs = await loadAllStorageModules(txClient, databaseId); + const storageConfig = resolveStorageConfigFromCodec(capturedCodec, allConfigs); + if (!storageConfig) throw new Error('STORAGE_MODULE_NOT_FOUND'); + + if (files.length > storageConfig.maxBulkFiles) { + throw new Error(`BULK_LIMIT_EXCEEDED: max ${storageConfig.maxBulkFiles} files per batch`); + } + const totalSize = files.reduce((sum: number, f: any) => sum + (f.size || 0), 0); + if (totalSize > storageConfig.maxBulkTotalSize) { + throw new Error(`BULK_SIZE_EXCEEDED: total size ${totalSize} exceeds max ${storageConfig.maxBulkTotalSize} bytes`); + } + + const bucket: BucketConfig = { + id: vals.bucketId, + key: vals.bucketKey, + type: vals.bucketType, + is_public: vals.bucketIsPublic, + owner_id: vals.bucketOwnerId, + allowed_mime_types: vals.bucketAllowedMimeTypes, + max_file_size: vals.bucketMaxFileSize, + allow_custom_keys: vals.bucketAllowCustomKeys ?? false, + }; + + const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId); + await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins); + + const results = []; + for (let i = 0; i < files.length; i++) { + const result = await processSingleFile( + options, txClient, storageConfig, databaseId, bucket, s3ForDb, files[i], + ); + results.push({ ...result, index: i }); + } + + return { files: results }; + }); + }); + }); + }, + }, + ), + }, + `PresignedUrlPlugin adding upload fields to ${pgCodec.name}`, + ); + }, - const bucket = await getBucketConfig(txClient, storageConfig, databaseId, bucketKey, ownerId); - if (!bucket) { - throw new Error('BUCKET_NOT_FOUND'); + /** + * Wrap delete* mutations on @storageFiles-tagged tables with S3 cleanup. + * + * Pattern: identical to graphile-bucket-provisioner-plugin's create/update hooks. + * 1. Read the file row BEFORE delete (need key + bucket_id for S3 cleanup) + * 2. Call PostGraphile's generated delete (RLS enforced) + * 3. If delete succeeded, check refcount and attempt sync S3 delete + * 4. AFTER DELETE trigger (constructive-db) enqueues async GC job as fallback + */ + GraphQLObjectType_fields_field(field: any, build: any, context: any) { + const { + scope: { isRootMutation, fieldName, pgCodec }, + } = context; + + if (!isRootMutation || !pgCodec || !pgCodec.attributes) { + return field; + } + + const tags = pgCodec.extensions?.tags; + if (!tags?.storageFiles) { + return field; + } + + if (!fieldName.startsWith('delete')) { + return field; + } + + log.debug(`Wrapping delete mutation "${fieldName}" with S3 cleanup (codec: ${pgCodec.name})`); + + const defaultResolver = (obj: any) => obj[fieldName]; + const { resolve: oldResolve = defaultResolver, ...rest } = field; + const capturedCodec = pgCodec; + + return { + ...rest, + async resolve(source: any, args: any, graphqlContext: any, info: any) { + // Extract the file ID from the mutation input + const inputKey = Object.keys(args.input || {}).find( + (k) => k !== 'clientMutationId', + ); + const fileInput = inputKey ? args.input[inputKey] : null; + + let fileRow: { key: string; bucket_id: string } | null = null; + + if (fileInput) { + // Read the file row BEFORE delete to get the S3 key + bucket_id + const withPgClient = graphqlContext.withPgClient; + const pgSettings = graphqlContext.pgSettings; + + if (withPgClient) { + try { + await withPgClient(pgSettings, async (pgClient: any) => { + const databaseId = await resolveDatabaseId(pgClient); + if (!databaseId) return; + + const allConfigs = await loadAllStorageModules(pgClient, databaseId); + const storageConfig = resolveStorageConfigFromCodec(capturedCodec, allConfigs); + if (!storageConfig) return; + + // Read the file row (RLS enforced) + const result = await pgClient.query({ + text: `SELECT key, bucket_id FROM ${storageConfig.filesQualifiedName} WHERE id = $1 LIMIT 1`, + values: [fileInput], + }); + if (result.rows.length > 0) { + fileRow = result.rows[0] as { key: string; bucket_id: string }; + } + }); + } catch (err: any) { + log.warn(`Pre-delete file lookup failed: ${err.message}`); + } } - - // --- Ensure S3 bucket exists once for the batch --- - const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId); - await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins); - - // --- Process each file --- - const results = []; - for (let i = 0; i < files.length; i++) { - const fileInput = files[i]; - const singleInput = { - ...fileInput, - bucketKey, - ownerId, - }; - const result = await processSingleFile( - options, txClient, storageConfig, databaseId, bucket, s3ForDb, singleInput, - ); - results.push({ ...result, index: i }); + } + + // Call PostGraphile's generated delete (RLS enforced) + const result = await oldResolve(source, args, graphqlContext, info); + + // Attempt sync S3 cleanup if we have the file row + if (fileRow) { + const withPgClient = graphqlContext.withPgClient; + const pgSettings = graphqlContext.pgSettings; + + if (withPgClient) { + try { + await withPgClient(pgSettings, async (pgClient: any) => { + const databaseId = await resolveDatabaseId(pgClient); + if (!databaseId) return; + + const allConfigs = await loadAllStorageModules(pgClient, databaseId); + const storageConfig = resolveStorageConfigFromCodec(capturedCodec, allConfigs); + if (!storageConfig) return; + + // Check refcount: any other file with the same key in this bucket? + const refResult = await pgClient.query({ + text: `SELECT COUNT(*)::int AS ref_count FROM ${storageConfig.filesQualifiedName} WHERE key = $1 AND bucket_id = $2`, + values: [fileRow!.key, fileRow!.bucket_id], + }); + const refCount = refResult.rows[0]?.ref_count ?? 0; + + if (refCount > 0) { + log.info(`File deleted from DB; S3 key ${fileRow!.key} still referenced by ${refCount} file(s)`); + return; + } + + // No other references — attempt sync S3 delete + const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId); + await deleteS3Object(s3ForDb, fileRow!.key); + log.info(`Sync S3 delete succeeded for key=${fileRow!.key}`); + }); + } catch (err: any) { + // Sync S3 delete failed — the AFTER DELETE trigger has enqueued an async GC job + log.warn(`Sync S3 delete failed for key=${fileRow.key}; async GC job will retry: ${err.message}`); + } } + } - return { files: results }; - }); - }); - }); + return result; + }, + }; }, }, }, - })); + }; } // --- Shared upload logic --- -/** - * Process a single upload request (used by both requestUploadUrl and requestBulkUploadUrls). - */ -async function processUpload( - options: PresignedUrlPluginOptions, - input: any, - withPgClient: any, - pgSettings: any, -) { - const { bucketKey, ownerId, contentHash, contentType, size, filename, key: customKey } = input; - - if (!bucketKey || typeof bucketKey !== 'string' || bucketKey.length > MAX_BUCKET_KEY_LENGTH) { - throw new Error('INVALID_BUCKET_KEY'); - } - if (!contentHash || typeof contentHash !== 'string' || contentHash.length > MAX_CONTENT_HASH_LENGTH) { - throw new Error('INVALID_CONTENT_HASH'); - } - if (!isValidSha256(contentHash)) { - throw new Error('INVALID_CONTENT_HASH_FORMAT: must be a 64-char lowercase hex SHA-256'); - } - if (!contentType || typeof contentType !== 'string' || contentType.length > MAX_CONTENT_TYPE_LENGTH) { - throw new Error('INVALID_CONTENT_TYPE'); - } - - return withPgClient(pgSettings, async (pgClient: any) => { - return pgClient.withTransaction(async (txClient: any) => { - const databaseId = await resolveDatabaseId(txClient); - if (!databaseId) { - throw new Error('DATABASE_NOT_FOUND'); - } - - const storageConfig = ownerId - ? await getStorageModuleConfigForOwner(txClient, databaseId, ownerId) - : await getStorageModuleConfig(txClient, databaseId); - if (!storageConfig) { - throw new Error( - ownerId - ? 'STORAGE_MODULE_NOT_FOUND_FOR_OWNER: no storage module found for the given ownerId' - : 'STORAGE_MODULE_NOT_PROVISIONED', - ); - } - - if (typeof size !== 'number' || size <= 0 || size > storageConfig.defaultMaxFileSize) { - throw new Error(`INVALID_FILE_SIZE: must be between 1 and ${storageConfig.defaultMaxFileSize} bytes`); - } - if (filename !== undefined && filename !== null) { - if (typeof filename !== 'string' || filename.length > storageConfig.maxFilenameLength) { - throw new Error('INVALID_FILENAME'); - } - } - - const bucket = await getBucketConfig(txClient, storageConfig, databaseId, bucketKey, ownerId); - if (!bucket) { - throw new Error('BUCKET_NOT_FOUND'); - } - - const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId); - await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins); - - return processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, input); - }); - }); -} - -/** - * Process a single file upload within an already-resolved context. - * Handles dedup, custom keys, versioning, and auto-path derivation. - */ async function processSingleFile( options: PresignedUrlPluginOptions, txClient: any, @@ -464,8 +544,10 @@ async function processSingleFile( ) { const { contentHash, contentType, size, filename, key: customKey } = input; - // --- Validate inputs --- - if (!contentHash || !isValidSha256(contentHash)) { + if (!contentHash || typeof contentHash !== 'string' || contentHash.length > MAX_CONTENT_HASH_LENGTH) { + throw new Error('INVALID_CONTENT_HASH'); + } + if (!isValidSha256(contentHash)) { throw new Error('INVALID_CONTENT_HASH_FORMAT: must be a 64-char lowercase hex SHA-256'); } if (!contentType || typeof contentType !== 'string' || contentType.length > MAX_CONTENT_TYPE_LENGTH) { @@ -480,7 +562,7 @@ async function processSingleFile( } } - // --- Validate content type against bucket's allowed_mime_types --- + // Validate content type against bucket's allowed_mime_types if (bucket.allowed_mime_types && bucket.allowed_mime_types.length > 0) { const allowed = bucket.allowed_mime_types as string[]; const isAllowed = allowed.some((pattern: string) => { @@ -496,12 +578,12 @@ async function processSingleFile( } } - // --- Validate size against bucket's max_file_size --- + // Validate size against bucket's max_file_size if (bucket.max_file_size && size > bucket.max_file_size) { throw new Error(`FILE_TOO_LARGE: exceeds bucket max of ${bucket.max_file_size} bytes`); } - // --- Determine S3 key --- + // Determine S3 key let s3Key: string; let isCustomKey = false; if (customKey) { @@ -518,12 +600,10 @@ async function processSingleFile( s3Key = buildS3Key(contentHash); } - // --- Dedup / versioning check --- + // Dedup / versioning check let previousVersionId: string | null = null; if (isCustomKey) { - // Custom key mode: check if a file with this key already exists in this bucket. - // If so, auto-version by linking via previous_version_id. const existingResult = await txClient.query({ text: `SELECT id, content_hash FROM ${storageConfig.filesQualifiedName} @@ -536,7 +616,6 @@ async function processSingleFile( if (existingResult.rows.length > 0) { const existing = existingResult.rows[0]; - // Same content hash = true dedup (no new upload needed) if (existing.content_hash === contentHash) { log.info(`Dedup hit (custom key): file ${existing.id} for key ${s3Key}`); return { @@ -548,12 +627,10 @@ async function processSingleFile( previousVersionId: null as string | null, }; } - // Different content = new version previousVersionId = existing.id; log.info(`Versioning: new version of key ${s3Key}, previous=${previousVersionId}`); } } else { - // Hash-based mode: dedup by content_hash in this bucket const dedupResult = await txClient.query({ text: `SELECT id FROM ${storageConfig.filesQualifiedName} @@ -578,29 +655,25 @@ async function processSingleFile( } } - // --- Auto-derive ltree path from custom key directory (only when has_path_shares) --- + // Auto-derive ltree path from custom key directory (only when has_path_shares) const derivedPath = isCustomKey && storageConfig.hasPathShares ? derivePathFromKey(s3Key) : null; - // --- Create file record --- + // Create file record const hasOwnerColumn = storageConfig.membershipType !== null; const columns = ['bucket_id', 'key', 'content_hash', 'mime_type', 'size', 'filename', 'is_public']; const values: any[] = [bucket.id, s3Key, contentHash, contentType, size, filename || null, bucket.is_public]; - let paramIdx = values.length; if (hasOwnerColumn) { columns.push('owner_id'); values.push(bucket.owner_id); - paramIdx = values.length; } if (previousVersionId) { columns.push('previous_version_id'); values.push(previousVersionId); - paramIdx = values.length; } if (derivedPath) { columns.push('path'); values.push(derivedPath); - paramIdx = values.length; } const placeholders = values.map((_: any, i: number) => `$${i + 1}`).join(', '); @@ -614,7 +687,7 @@ async function processSingleFile( const fileId = fileResult.rows[0].id; - // --- Generate presigned PUT URL --- + // Generate presigned PUT URL const uploadUrl = await generatePresignedPutUrl( s3ForDb, s3Key, diff --git a/graphile/graphile-presigned-url-plugin/src/s3-signer.ts b/graphile/graphile-presigned-url-plugin/src/s3-signer.ts index ded687974..c4fbba5d6 100644 --- a/graphile/graphile-presigned-url-plugin/src/s3-signer.ts +++ b/graphile/graphile-presigned-url-plugin/src/s3-signer.ts @@ -3,6 +3,7 @@ import { PutObjectCommand, GetObjectCommand, HeadObjectCommand, + DeleteObjectCommand, } from '@aws-sdk/client-s3'; import { getSignedUrl } from '@aws-sdk/s3-request-presigner'; import { Logger } from '@pgpmjs/logger'; @@ -78,9 +79,28 @@ export async function generatePresignedGetUrl( } /** - * Check if an object exists in S3 and optionally verify its content-type. + * Delete an object from S3. + * + * Idempotent — deleting a non-existent key is a no-op (S3 returns 204). * - * Checks whether an object exists in S3 and retrieves its content-type. + * @param s3Config - S3 client and bucket configuration + * @param key - S3 object key to delete + */ +export async function deleteS3Object( + s3Config: S3Config, + key: string, +): Promise { + await s3Config.client.send( + new DeleteObjectCommand({ + Bucket: s3Config.bucket, + Key: key, + }), + ); + log.debug(`Deleted S3 object: bucket=${s3Config.bucket}, key=${key}`); +} + +/** + * Check if an object exists in S3 and optionally verify its content-type. * * @param s3Config - S3 client and bucket configuration * @param key - S3 object key diff --git a/graphile/graphile-presigned-url-plugin/src/storage-module-cache.ts b/graphile/graphile-presigned-url-plugin/src/storage-module-cache.ts index 4ec64e5c4..cf1c7e482 100644 --- a/graphile/graphile-presigned-url-plugin/src/storage-module-cache.ts +++ b/graphile/graphile-presigned-url-plugin/src/storage-module-cache.ts @@ -321,6 +321,66 @@ export async function resolveStorageModuleByFileId( return null; } +/** + * Load all storage modules for a database, using the LRU cache. + * + * Returns an array of all StorageModuleConfig entries (app-level + entity-scoped). + * The result is cached per-database so subsequent calls avoid the DB query. + */ +export async function loadAllStorageModules( + pgClient: { query: (opts: { text: string; values?: unknown[] }) => Promise<{ rows: unknown[] }> }, + databaseId: string, +): Promise { + const cacheKey = `storage:${databaseId}:all-list`; + const cached = storageModuleCache.get(cacheKey); + if (cached) { + return (cached as any)._allConfigs as StorageModuleConfig[]; + } + + log.debug(`Loading all storage modules for database ${databaseId}`); + const result = await pgClient.query({ text: ALL_STORAGE_MODULES_QUERY, values: [databaseId] }); + const configs = (result.rows as StorageModuleRow[]).map(buildConfig); + + // Cache each individual config by its membership type + for (const config of configs) { + const key = config.membershipType === null + ? `storage:${databaseId}:app` + : `storage:${databaseId}:mt:${config.membershipType}`; + storageModuleCache.set(key, config); + } + + // Store the full list under a sentinel key + const sentinel = { ...configs[0] || {}, _allConfigs: configs } as any; + storageModuleCache.set(cacheKey, sentinel); + + return configs; +} + +/** + * Resolve the storage module config from a PostGraphile pgCodec. + * + * Matches the codec's schema + table name against cached storage modules. + * Works for both files codecs (@storageFiles) and buckets codecs (@storageBuckets). + * + * @param pgCodec - The PostGraphile codec (has extensions.pg.schemaName, name) + * @param allConfigs - All storage module configs for this database + * @returns The matching StorageModuleConfig or null + */ +export function resolveStorageConfigFromCodec( + pgCodec: { name: string; extensions?: { pg?: { schemaName?: string } }; sqlType?: string }, + allConfigs: StorageModuleConfig[], +): StorageModuleConfig | null { + const schemaName = pgCodec.extensions?.pg?.schemaName; + const tableName = pgCodec.name; + + if (!schemaName || !tableName) return null; + + return allConfigs.find((c) => + (c.filesTableName === tableName && c.schemaName === schemaName) || + (c.bucketsTableName === tableName && c.schemaName === schemaName), + ) || null; +} + // --- Bucket metadata cache --- /** diff --git a/graphql/server-test/__tests__/__snapshots__/schema-snapshot.test.ts.snap b/graphql/server-test/__tests__/__snapshots__/schema-snapshot.test.ts.snap index dec27269b..ea8384a7d 100644 --- a/graphql/server-test/__tests__/__snapshots__/schema-snapshot.test.ts.snap +++ b/graphql/server-test/__tests__/__snapshots__/schema-snapshot.test.ts.snap @@ -1,7 +1,160 @@ // Jest Snapshot v1, https://jestjs.io/docs/snapshot-testing exports[`Schema Snapshot should generate consistent GraphQL SDL from the test schema 1`] = ` -""""A connection to a list of \`PostTag\` values.""" +""""The root query type which gives access points into the data universe.""" +type Query { + """Reads and enables pagination through a set of \`PostTag\`.""" + postTags( + """Only read the first \`n\` values of the set.""" + first: Int + + """Only read the last \`n\` values of the set.""" + last: Int + + """ + Skip the first \`n\` values from our \`after\` cursor, an alternative to cursor + based pagination. May not be used with \`last\`. + """ + offset: Int + + """Read all values in the set before (above) this cursor.""" + before: Cursor + + """Read all values in the set after (below) this cursor.""" + after: Cursor + + """ + A filter to be used in determining which values should be returned by the collection. + """ + where: PostTagFilter + + """The method to use when ordering \`PostTag\`.""" + orderBy: [PostTagOrderBy!] = [PRIMARY_KEY_ASC] + ): PostTagConnection + + """Reads and enables pagination through a set of \`Tag\`.""" + tags( + """Only read the first \`n\` values of the set.""" + first: Int + + """Only read the last \`n\` values of the set.""" + last: Int + + """ + Skip the first \`n\` values from our \`after\` cursor, an alternative to cursor + based pagination. May not be used with \`last\`. + """ + offset: Int + + """Read all values in the set before (above) this cursor.""" + before: Cursor + + """Read all values in the set after (below) this cursor.""" + after: Cursor + + """ + A filter to be used in determining which values should be returned by the collection. + """ + where: TagFilter + + """The method to use when ordering \`Tag\`.""" + orderBy: [TagOrderBy!] = [PRIMARY_KEY_ASC] + ): TagConnection + + """Reads and enables pagination through a set of \`User\`.""" + users( + """Only read the first \`n\` values of the set.""" + first: Int + + """Only read the last \`n\` values of the set.""" + last: Int + + """ + Skip the first \`n\` values from our \`after\` cursor, an alternative to cursor + based pagination. May not be used with \`last\`. + """ + offset: Int + + """Read all values in the set before (above) this cursor.""" + before: Cursor + + """Read all values in the set after (below) this cursor.""" + after: Cursor + + """ + A filter to be used in determining which values should be returned by the collection. + """ + where: UserFilter + + """The method to use when ordering \`User\`.""" + orderBy: [UserOrderBy!] = [PRIMARY_KEY_ASC] + ): UserConnection + + """Reads and enables pagination through a set of \`Comment\`.""" + comments( + """Only read the first \`n\` values of the set.""" + first: Int + + """Only read the last \`n\` values of the set.""" + last: Int + + """ + Skip the first \`n\` values from our \`after\` cursor, an alternative to cursor + based pagination. May not be used with \`last\`. + """ + offset: Int + + """Read all values in the set before (above) this cursor.""" + before: Cursor + + """Read all values in the set after (below) this cursor.""" + after: Cursor + + """ + A filter to be used in determining which values should be returned by the collection. + """ + where: CommentFilter + + """The method to use when ordering \`Comment\`.""" + orderBy: [CommentOrderBy!] = [PRIMARY_KEY_ASC] + ): CommentConnection + + """Reads and enables pagination through a set of \`Post\`.""" + posts( + """Only read the first \`n\` values of the set.""" + first: Int + + """Only read the last \`n\` values of the set.""" + last: Int + + """ + Skip the first \`n\` values from our \`after\` cursor, an alternative to cursor + based pagination. May not be used with \`last\`. + """ + offset: Int + + """Read all values in the set before (above) this cursor.""" + before: Cursor + + """Read all values in the set after (below) this cursor.""" + after: Cursor + + """ + A filter to be used in determining which values should be returned by the collection. + """ + where: PostFilter + + """The method to use when ordering \`Post\`.""" + orderBy: [PostOrderBy!] = [PRIMARY_KEY_ASC] + ): PostConnection + + """ + Metadata about the database schema, including tables, fields, indexes, and constraints. Useful for code generation tools. + """ + _meta: MetaSchema +} + +"""A connection to a list of \`PostTag\` values.""" type PostTagConnection { """A list of \`PostTag\` objects.""" nodes: [PostTag]! @@ -1414,58 +1567,196 @@ type MetaQuery { delete: String } -"""The output of our create \`PostTag\` mutation.""" -type CreatePostTagPayload { - """ - The exact same \`clientMutationId\` that was provided in the mutation input, - unchanged and unused. May be used by a client to track mutations. - """ - clientMutationId: String +""" +The root mutation type which contains root level fields which mutate data. +""" +type Mutation { + """Creates a single \`PostTag\`.""" + createPostTag( + """ + The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. + """ + input: CreatePostTagInput! + ): CreatePostTagPayload - """The \`PostTag\` that was created by this mutation.""" - postTag: PostTag + """Creates a single \`Tag\`.""" + createTag( + """ + The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. + """ + input: CreateTagInput! + ): CreateTagPayload - """ - Our root query field type. Allows us to run any query from our mutation payload. - """ - query: Query + """Creates a single \`User\`.""" + createUser( + """ + The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. + """ + input: CreateUserInput! + ): CreateUserPayload - """An edge for our \`PostTag\`. May be used by Relay 1.""" - postTagEdge( - """The method to use when ordering \`PostTag\`.""" - orderBy: [PostTagOrderBy!]! = [PRIMARY_KEY_ASC] - ): PostTagEdge -} + """Creates a single \`Comment\`.""" + createComment( + """ + The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. + """ + input: CreateCommentInput! + ): CreateCommentPayload -"""All input for the create \`PostTag\` mutation.""" -input CreatePostTagInput { - """ - An arbitrary string value with no semantic meaning. Will be included in the - payload verbatim. May be used to track mutations by the client. - """ - clientMutationId: String + """Creates a single \`Post\`.""" + createPost( + """ + The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. + """ + input: CreatePostInput! + ): CreatePostPayload - """The \`PostTag\` to be created by this mutation.""" - postTag: PostTagInput! -} + """Updates a single \`PostTag\` using a unique key and a patch.""" + updatePostTag( + """ + The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. + """ + input: UpdatePostTagInput! + ): UpdatePostTagPayload -"""An input for mutations affecting \`PostTag\`""" -input PostTagInput { - id: UUID - postId: UUID! - tagId: UUID! - createdAt: Datetime -} + """Updates a single \`Tag\` using a unique key and a patch.""" + updateTag( + """ + The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. + """ + input: UpdateTagInput! + ): UpdateTagPayload -"""The output of our create \`Tag\` mutation.""" -type CreateTagPayload { - """ - The exact same \`clientMutationId\` that was provided in the mutation input, - unchanged and unused. May be used by a client to track mutations. - """ - clientMutationId: String + """Updates a single \`User\` using a unique key and a patch.""" + updateUser( + """ + The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. + """ + input: UpdateUserInput! + ): UpdateUserPayload - """The \`Tag\` that was created by this mutation.""" + """Updates a single \`Comment\` using a unique key and a patch.""" + updateComment( + """ + The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. + """ + input: UpdateCommentInput! + ): UpdateCommentPayload + + """Updates a single \`Post\` using a unique key and a patch.""" + updatePost( + """ + The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. + """ + input: UpdatePostInput! + ): UpdatePostPayload + + """Deletes a single \`PostTag\` using a unique key.""" + deletePostTag( + """ + The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. + """ + input: DeletePostTagInput! + ): DeletePostTagPayload + + """Deletes a single \`Tag\` using a unique key.""" + deleteTag( + """ + The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. + """ + input: DeleteTagInput! + ): DeleteTagPayload + + """Deletes a single \`User\` using a unique key.""" + deleteUser( + """ + The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. + """ + input: DeleteUserInput! + ): DeleteUserPayload + + """Deletes a single \`Comment\` using a unique key.""" + deleteComment( + """ + The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. + """ + input: DeleteCommentInput! + ): DeleteCommentPayload + + """Deletes a single \`Post\` using a unique key.""" + deletePost( + """ + The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. + """ + input: DeletePostInput! + ): DeletePostPayload + + """ + Provision an S3 bucket for a logical bucket in the database. + Reads the bucket config via RLS, then creates and configures + the S3 bucket with the appropriate privacy policies, CORS rules, + and lifecycle settings. + """ + provisionBucket( + """ + The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. + """ + input: ProvisionBucketInput! + ): ProvisionBucketPayload +} + +"""The output of our create \`PostTag\` mutation.""" +type CreatePostTagPayload { + """ + The exact same \`clientMutationId\` that was provided in the mutation input, + unchanged and unused. May be used by a client to track mutations. + """ + clientMutationId: String + + """The \`PostTag\` that was created by this mutation.""" + postTag: PostTag + + """ + Our root query field type. Allows us to run any query from our mutation payload. + """ + query: Query + + """An edge for our \`PostTag\`. May be used by Relay 1.""" + postTagEdge( + """The method to use when ordering \`PostTag\`.""" + orderBy: [PostTagOrderBy!]! = [PRIMARY_KEY_ASC] + ): PostTagEdge +} + +"""All input for the create \`PostTag\` mutation.""" +input CreatePostTagInput { + """ + An arbitrary string value with no semantic meaning. Will be included in the + payload verbatim. May be used to track mutations by the client. + """ + clientMutationId: String + + """The \`PostTag\` to be created by this mutation.""" + postTag: PostTagInput! +} + +"""An input for mutations affecting \`PostTag\`""" +input PostTagInput { + id: UUID + postId: UUID! + tagId: UUID! + createdAt: Datetime +} + +"""The output of our create \`Tag\` mutation.""" +type CreateTagPayload { + """ + The exact same \`clientMutationId\` that was provided in the mutation input, + unchanged and unused. May be used by a client to track mutations. + """ + clientMutationId: String + + """The \`Tag\` that was created by this mutation.""" tag: Tag """ @@ -2066,436 +2357,6 @@ input DeletePostInput { id: UUID! } -input RequestUploadUrlInput { - """Bucket key (e.g., "public", "private")""" - bucketKey: String! - - """ - Owner entity ID for entity-scoped uploads. - Omit for app-level (database-wide) storage. - When provided, resolves the storage module for the entity type - that owns this entity instance (e.g., a data room ID, team ID). - """ - ownerId: UUID - - """SHA-256 content hash computed by the client (hex-encoded, 64 chars)""" - contentHash: String! - - """MIME type of the file (e.g., "image/png")""" - contentType: String! - - """File size in bytes""" - size: Int! - - """Original filename (optional, for display and Content-Disposition)""" - filename: String - - """ - Custom S3 key (e.g., "reports/2024/Q1.pdf"). - Only allowed when the bucket has allow_custom_keys=true. - When omitted, key defaults to contentHash (content-addressed dedup). - When provided, the file is stored at this key. - Re-uploading to an existing key auto-creates a new version. - """ - key: String -} - -type RequestUploadUrlPayload { - """Presigned PUT URL (null if file was deduplicated)""" - uploadUrl: String - - """The file ID (existing if deduplicated, new if fresh upload)""" - fileId: UUID! - - """The S3 object key""" - key: String! - - """Whether this file was deduplicated (already exists with same hash)""" - deduplicated: Boolean! - - """Presigned URL expiry time (null if deduplicated)""" - expiresAt: Datetime - - """ - ID of the previous version (set when re-uploading to an existing custom key) - """ - previousVersionId: UUID -} - -input BulkUploadFileInput { - """SHA-256 content hash computed by the client (hex-encoded, 64 chars)""" - contentHash: String! - - """MIME type of the file (e.g., "image/png")""" - contentType: String! - - """File size in bytes""" - size: Int! - - """Original filename (optional, for display and Content-Disposition)""" - filename: String - - """Custom S3 key (only when bucket has allow_custom_keys=true)""" - key: String -} - -input RequestBulkUploadUrlsInput { - """Bucket key (e.g., "public", "private")""" - bucketKey: String! - - """Owner entity ID for entity-scoped uploads""" - ownerId: UUID - - """Array of files to upload""" - files: [BulkUploadFileInput!]! -} - -type BulkUploadFilePayload { - """Presigned PUT URL (null if file was deduplicated)""" - uploadUrl: String - - """The file ID""" - fileId: UUID! - - """The S3 object key""" - key: String! - - """Whether this file was deduplicated""" - deduplicated: Boolean! - - """Presigned URL expiry time (null if deduplicated)""" - expiresAt: Datetime - - """ - ID of the previous version (set when re-uploading to an existing custom key) - """ - previousVersionId: UUID - - """Index of this file in the input array (for client correlation)""" - index: Int! -} - -type RequestBulkUploadUrlsPayload { - """Array of results, one per input file""" - files: [BulkUploadFilePayload!]! -} - -"""The root query type which gives access points into the data universe.""" -type Query { - """Reads and enables pagination through a set of \`PostTag\`.""" - postTags( - """Only read the first \`n\` values of the set.""" - first: Int - - """Only read the last \`n\` values of the set.""" - last: Int - - """ - Skip the first \`n\` values from our \`after\` cursor, an alternative to cursor - based pagination. May not be used with \`last\`. - """ - offset: Int - - """Read all values in the set before (above) this cursor.""" - before: Cursor - - """Read all values in the set after (below) this cursor.""" - after: Cursor - - """ - A filter to be used in determining which values should be returned by the collection. - """ - where: PostTagFilter - - """The method to use when ordering \`PostTag\`.""" - orderBy: [PostTagOrderBy!] = [PRIMARY_KEY_ASC] - ): PostTagConnection - - """Reads and enables pagination through a set of \`Tag\`.""" - tags( - """Only read the first \`n\` values of the set.""" - first: Int - - """Only read the last \`n\` values of the set.""" - last: Int - - """ - Skip the first \`n\` values from our \`after\` cursor, an alternative to cursor - based pagination. May not be used with \`last\`. - """ - offset: Int - - """Read all values in the set before (above) this cursor.""" - before: Cursor - - """Read all values in the set after (below) this cursor.""" - after: Cursor - - """ - A filter to be used in determining which values should be returned by the collection. - """ - where: TagFilter - - """The method to use when ordering \`Tag\`.""" - orderBy: [TagOrderBy!] = [PRIMARY_KEY_ASC] - ): TagConnection - - """Reads and enables pagination through a set of \`User\`.""" - users( - """Only read the first \`n\` values of the set.""" - first: Int - - """Only read the last \`n\` values of the set.""" - last: Int - - """ - Skip the first \`n\` values from our \`after\` cursor, an alternative to cursor - based pagination. May not be used with \`last\`. - """ - offset: Int - - """Read all values in the set before (above) this cursor.""" - before: Cursor - - """Read all values in the set after (below) this cursor.""" - after: Cursor - - """ - A filter to be used in determining which values should be returned by the collection. - """ - where: UserFilter - - """The method to use when ordering \`User\`.""" - orderBy: [UserOrderBy!] = [PRIMARY_KEY_ASC] - ): UserConnection - - """Reads and enables pagination through a set of \`Comment\`.""" - comments( - """Only read the first \`n\` values of the set.""" - first: Int - - """Only read the last \`n\` values of the set.""" - last: Int - - """ - Skip the first \`n\` values from our \`after\` cursor, an alternative to cursor - based pagination. May not be used with \`last\`. - """ - offset: Int - - """Read all values in the set before (above) this cursor.""" - before: Cursor - - """Read all values in the set after (below) this cursor.""" - after: Cursor - - """ - A filter to be used in determining which values should be returned by the collection. - """ - where: CommentFilter - - """The method to use when ordering \`Comment\`.""" - orderBy: [CommentOrderBy!] = [PRIMARY_KEY_ASC] - ): CommentConnection - - """Reads and enables pagination through a set of \`Post\`.""" - posts( - """Only read the first \`n\` values of the set.""" - first: Int - - """Only read the last \`n\` values of the set.""" - last: Int - - """ - Skip the first \`n\` values from our \`after\` cursor, an alternative to cursor - based pagination. May not be used with \`last\`. - """ - offset: Int - - """Read all values in the set before (above) this cursor.""" - before: Cursor - - """Read all values in the set after (below) this cursor.""" - after: Cursor - - """ - A filter to be used in determining which values should be returned by the collection. - """ - where: PostFilter - - """The method to use when ordering \`Post\`.""" - orderBy: [PostOrderBy!] = [PRIMARY_KEY_ASC] - ): PostConnection - - """ - Metadata about the database schema, including tables, fields, indexes, and constraints. Useful for code generation tools. - """ - _meta: MetaSchema -} - -""" -The root mutation type which contains root level fields which mutate data. -""" -type Mutation { - """Creates a single \`PostTag\`.""" - createPostTag( - """ - The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. - """ - input: CreatePostTagInput! - ): CreatePostTagPayload - - """Creates a single \`Tag\`.""" - createTag( - """ - The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. - """ - input: CreateTagInput! - ): CreateTagPayload - - """Creates a single \`User\`.""" - createUser( - """ - The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. - """ - input: CreateUserInput! - ): CreateUserPayload - - """Creates a single \`Comment\`.""" - createComment( - """ - The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. - """ - input: CreateCommentInput! - ): CreateCommentPayload - - """Creates a single \`Post\`.""" - createPost( - """ - The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. - """ - input: CreatePostInput! - ): CreatePostPayload - - """Updates a single \`PostTag\` using a unique key and a patch.""" - updatePostTag( - """ - The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. - """ - input: UpdatePostTagInput! - ): UpdatePostTagPayload - - """Updates a single \`Tag\` using a unique key and a patch.""" - updateTag( - """ - The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. - """ - input: UpdateTagInput! - ): UpdateTagPayload - - """Updates a single \`User\` using a unique key and a patch.""" - updateUser( - """ - The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. - """ - input: UpdateUserInput! - ): UpdateUserPayload - - """Updates a single \`Comment\` using a unique key and a patch.""" - updateComment( - """ - The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. - """ - input: UpdateCommentInput! - ): UpdateCommentPayload - - """Updates a single \`Post\` using a unique key and a patch.""" - updatePost( - """ - The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. - """ - input: UpdatePostInput! - ): UpdatePostPayload - - """Deletes a single \`PostTag\` using a unique key.""" - deletePostTag( - """ - The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. - """ - input: DeletePostTagInput! - ): DeletePostTagPayload - - """Deletes a single \`Tag\` using a unique key.""" - deleteTag( - """ - The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. - """ - input: DeleteTagInput! - ): DeleteTagPayload - - """Deletes a single \`User\` using a unique key.""" - deleteUser( - """ - The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. - """ - input: DeleteUserInput! - ): DeleteUserPayload - - """Deletes a single \`Comment\` using a unique key.""" - deleteComment( - """ - The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. - """ - input: DeleteCommentInput! - ): DeleteCommentPayload - - """Deletes a single \`Post\` using a unique key.""" - deletePost( - """ - The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. - """ - input: DeletePostInput! - ): DeletePostPayload - - """ - Request a presigned URL for uploading a file directly to S3. - Client computes SHA-256 of the file content and provides it here. - If a file with the same hash already exists (dedup), returns the - existing file ID and deduplicated=true with no uploadUrl. - """ - requestUploadUrl( - """ - The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. - """ - input: RequestUploadUrlInput! - ): RequestUploadUrlPayload - - """ - Request presigned URLs for uploading multiple files in a single batch. - Subject to per-storage-module limits (max_bulk_files, max_bulk_total_size). - Each file is processed independently — some may dedup while others get fresh URLs. - """ - requestBulkUploadUrls( - """ - The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. - """ - input: RequestBulkUploadUrlsInput! - ): RequestBulkUploadUrlsPayload - - """ - Provision an S3 bucket for a logical bucket in the database. - Reads the bucket config via RLS, then creates and configures - the S3 bucket with the appropriate privacy policies, CORS rules, - and lifecycle settings. - """ - provisionBucket( - """ - The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields. - """ - input: ProvisionBucketInput! - ): ProvisionBucketPayload -} - input ProvisionBucketInput { """The logical bucket key (e.g., "public", "private")""" bucketKey: String! diff --git a/graphql/server-test/__tests__/upload.integration.test.ts b/graphql/server-test/__tests__/upload.integration.test.ts index d9a07e529..71175077d 100644 --- a/graphql/server-test/__tests__/upload.integration.test.ts +++ b/graphql/server-test/__tests__/upload.integration.test.ts @@ -1,8 +1,8 @@ /** * Upload Integration Tests — end-to-end presigned URL flow * - * Exercises the full upload pipeline for both public and private files: - * requestUploadUrl → PUT to presigned URL → downloadUrl + * Exercises the per-table upload pipeline: + * query bucket → requestUploadUrl field → PUT to presigned URL → downloadUrl * * Uses real MinIO (available in CI as minio_cdn service) and lazy bucket * provisioning. No RLS — that will be tested in constructive-db. @@ -42,29 +42,33 @@ const seedFiles = [ // --- GraphQL operations --- const REQUEST_UPLOAD_URL = ` - mutation RequestUploadUrl($input: RequestUploadUrlInput!) { - requestUploadUrl(input: $input) { - uploadUrl - fileId - key - deduplicated - expiresAt + query RequestUploadUrl($key: String!, $contentHash: String!, $contentType: String!, $size: Int!, $filename: String) { + buckets(where: { key: { equalTo: $key } }) { + nodes { + id + requestUploadUrl( + contentHash: $contentHash + contentType: $contentType + size: $size + filename: $filename + ) { + uploadUrl + fileId + key + deduplicated + expiresAt + } + } } } `; // --- Helpers --- -/** - * Generate a deterministic SHA-256 hex hash for test content. - */ function sha256(content: string): string { return crypto.createHash('sha256').update(content).digest('hex'); } -/** - * PUT file content to a presigned URL using fetch. - */ async function putToPresignedUrl( url: string, content: string, @@ -82,7 +86,7 @@ async function putToPresignedUrl( // --- Tests --- -describe('Upload integration (presigned URL flow)', () => { +describe('Upload integration (per-table presigned URL flow)', () => { let request: supertest.Agent; let teardown: () => Promise; @@ -118,31 +122,33 @@ describe('Upload integration (presigned URL flow)', () => { if (teardown) await teardown(); }); - describe('Public file upload', () => { + describe('Public file upload via bucket field', () => { const fileContent = 'Hello, public world!'; const contentType = 'text/plain'; const contentHash = sha256(fileContent); let uploadUrl: string; let fileId: string; - it('should return a presigned PUT URL via requestUploadUrl', async () => { + it('should return a presigned PUT URL via bucket.requestUploadUrl', async () => { const res = await postGraphQL({ query: REQUEST_UPLOAD_URL, variables: { - input: { - bucketKey: 'public', - contentHash, - contentType, - size: Buffer.byteLength(fileContent), - filename: 'hello-public.txt', - }, + key: 'public', + contentHash, + contentType, + size: Buffer.byteLength(fileContent), + filename: 'hello-public.txt', }, }); expect(res.status).toBe(200); expect(res.body.errors).toBeUndefined(); - const payload = res.body.data.requestUploadUrl; + const bucket = res.body.data.buckets.nodes[0]; + expect(bucket).toBeTruthy(); + expect(bucket.id).toBeTruthy(); + + const payload = bucket.requestUploadUrl; expect(payload.uploadUrl).toBeTruthy(); expect(payload.fileId).toBeTruthy(); expect(payload.key).toBe(contentHash); @@ -159,31 +165,32 @@ describe('Upload integration (presigned URL flow)', () => { }); }); - describe('Private file upload', () => { + describe('Private file upload via bucket field', () => { const fileContent = 'Hello, private world!'; const contentType = 'text/plain'; const contentHash = sha256(fileContent); let uploadUrl: string; let fileId: string; - it('should return a presigned PUT URL via requestUploadUrl', async () => { + it('should return a presigned PUT URL via bucket.requestUploadUrl', async () => { const res = await postGraphQL({ query: REQUEST_UPLOAD_URL, variables: { - input: { - bucketKey: 'private', - contentHash, - contentType, - size: Buffer.byteLength(fileContent), - filename: 'hello-private.txt', - }, + key: 'private', + contentHash, + contentType, + size: Buffer.byteLength(fileContent), + filename: 'hello-private.txt', }, }); expect(res.status).toBe(200); expect(res.body.errors).toBeUndefined(); - const payload = res.body.data.requestUploadUrl; + const bucket = res.body.data.buckets.nodes[0]; + expect(bucket).toBeTruthy(); + + const payload = bucket.requestUploadUrl; expect(payload.uploadUrl).toBeTruthy(); expect(payload.fileId).toBeTruthy(); expect(payload.key).toBe(contentHash); @@ -202,27 +209,24 @@ describe('Upload integration (presigned URL flow)', () => { describe('Deduplication', () => { it('should return deduplicated=true for a file with an existing content hash', async () => { - // Re-request the same public file content hash const fileContent = 'Hello, public world!'; const contentHash = sha256(fileContent); const res = await postGraphQL({ query: REQUEST_UPLOAD_URL, variables: { - input: { - bucketKey: 'public', - contentHash, - contentType: 'text/plain', - size: Buffer.byteLength(fileContent), - filename: 'hello-public-copy.txt', - }, + key: 'public', + contentHash, + contentType: 'text/plain', + size: Buffer.byteLength(fileContent), + filename: 'hello-public-copy.txt', }, }); expect(res.status).toBe(200); expect(res.body.errors).toBeUndefined(); - const payload = res.body.data.requestUploadUrl; + const payload = res.body.data.buckets.nodes[0].requestUploadUrl; expect(payload.deduplicated).toBe(true); expect(payload.uploadUrl).toBeNull(); expect(payload.expiresAt).toBeNull(); diff --git a/graphql/test/__tests__/__snapshots__/graphile-test.test.ts.snap b/graphql/test/__tests__/__snapshots__/graphile-test.test.ts.snap index 68edec297..e0c88e162 100644 --- a/graphql/test/__tests__/__snapshots__/graphile-test.test.ts.snap +++ b/graphql/test/__tests__/__snapshots__/graphile-test.test.ts.snap @@ -117,6 +117,121 @@ exports[`introspection query snapshot: introspection 1`] = ` }, "subscriptionType": null, "types": [ + { + "description": "The root query type which gives access points into the data universe.", + "enumValues": null, + "fields": [ + { + "args": [ + { + "defaultValue": null, + "description": "Only read the first \`n\` values of the set.", + "name": "first", + "type": { + "kind": "SCALAR", + "name": "Int", + "ofType": null, + }, + }, + { + "defaultValue": null, + "description": "Only read the last \`n\` values of the set.", + "name": "last", + "type": { + "kind": "SCALAR", + "name": "Int", + "ofType": null, + }, + }, + { + "defaultValue": null, + "description": "Skip the first \`n\` values from our \`after\` cursor, an alternative to cursor +based pagination. May not be used with \`last\`.", + "name": "offset", + "type": { + "kind": "SCALAR", + "name": "Int", + "ofType": null, + }, + }, + { + "defaultValue": null, + "description": "Read all values in the set before (above) this cursor.", + "name": "before", + "type": { + "kind": "SCALAR", + "name": "Cursor", + "ofType": null, + }, + }, + { + "defaultValue": null, + "description": "Read all values in the set after (below) this cursor.", + "name": "after", + "type": { + "kind": "SCALAR", + "name": "Cursor", + "ofType": null, + }, + }, + { + "defaultValue": null, + "description": "A filter to be used in determining which values should be returned by the collection.", + "name": "where", + "type": { + "kind": "INPUT_OBJECT", + "name": "UserFilter", + "ofType": null, + }, + }, + { + "defaultValue": "[PRIMARY_KEY_ASC]", + "description": "The method to use when ordering \`User\`.", + "name": "orderBy", + "type": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "ENUM", + "name": "UserOrderBy", + "ofType": null, + }, + }, + }, + }, + ], + "deprecationReason": null, + "description": "Reads and enables pagination through a set of \`User\`.", + "isDeprecated": false, + "name": "users", + "type": { + "kind": "OBJECT", + "name": "UserConnection", + "ofType": null, + }, + }, + { + "args": [], + "deprecationReason": null, + "description": "Metadata about the database schema, including tables, fields, indexes, and constraints. Useful for code generation tools.", + "isDeprecated": false, + "name": "_meta", + "type": { + "kind": "OBJECT", + "name": "MetaSchema", + "ofType": null, + }, + }, + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "Query", + "possibleTypes": null, + }, { "description": "A connection to a list of \`User\` values.", "enumValues": null, @@ -2741,6 +2856,128 @@ exports[`introspection query snapshot: introspection 1`] = ` "name": "MetaQuery", "possibleTypes": null, }, + { + "description": "The root mutation type which contains root level fields which mutate data.", + "enumValues": null, + "fields": [ + { + "args": [ + { + "defaultValue": null, + "description": "The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields.", + "name": "input", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "INPUT_OBJECT", + "name": "CreateUserInput", + "ofType": null, + }, + }, + }, + ], + "deprecationReason": null, + "description": "Creates a single \`User\`.", + "isDeprecated": false, + "name": "createUser", + "type": { + "kind": "OBJECT", + "name": "CreateUserPayload", + "ofType": null, + }, + }, + { + "args": [ + { + "defaultValue": null, + "description": "The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields.", + "name": "input", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "INPUT_OBJECT", + "name": "UpdateUserInput", + "ofType": null, + }, + }, + }, + ], + "deprecationReason": null, + "description": "Updates a single \`User\` using a unique key and a patch.", + "isDeprecated": false, + "name": "updateUser", + "type": { + "kind": "OBJECT", + "name": "UpdateUserPayload", + "ofType": null, + }, + }, + { + "args": [ + { + "defaultValue": null, + "description": "The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields.", + "name": "input", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "INPUT_OBJECT", + "name": "DeleteUserInput", + "ofType": null, + }, + }, + }, + ], + "deprecationReason": null, + "description": "Deletes a single \`User\` using a unique key.", + "isDeprecated": false, + "name": "deleteUser", + "type": { + "kind": "OBJECT", + "name": "DeleteUserPayload", + "ofType": null, + }, + }, + { + "args": [ + { + "defaultValue": null, + "description": "The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields.", + "name": "input", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "INPUT_OBJECT", + "name": "ProvisionBucketInput", + "ofType": null, + }, + }, + }, + ], + "deprecationReason": null, + "description": "Provision an S3 bucket for a logical bucket in the database. +Reads the bucket config via RLS, then creates and configures +the S3 bucket with the appropriate privacy policies, CORS rules, +and lifecycle settings.", + "isDeprecated": false, + "name": "provisionBucket", + "type": { + "kind": "OBJECT", + "name": "ProvisionBucketPayload", + "ofType": null, + }, + }, + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "Mutation", + "possibleTypes": null, + }, { "description": "The output of our create \`User\` mutation.", "enumValues": null, @@ -3185,7 +3422,7 @@ payload verbatim. May be used to track mutations by the client.", "inputFields": [ { "defaultValue": null, - "description": "Bucket key (e.g., "public", "private")", + "description": "The logical bucket key (e.g., "public", "private")", "name": "bucketKey", "type": { "kind": "NON_NULL", @@ -3199,10 +3436,8 @@ payload verbatim. May be used to track mutations by the client.", }, { "defaultValue": null, - "description": "Owner entity ID for entity-scoped uploads. -Omit for app-level (database-wide) storage. -When provided, resolves the storage module for the entity type -that owns this entity instance (e.g., a data room ID, team ID).", + "description": "Owner entity ID for entity-scoped bucket provisioning. +Omit for app-level (database-wide) storage.", "name": "ownerId", "type": { "kind": "SCALAR", @@ -3210,76 +3445,10 @@ that owns this entity instance (e.g., a data room ID, team ID).", "ofType": null, }, }, - { - "defaultValue": null, - "description": "SHA-256 content hash computed by the client (hex-encoded, 64 chars)", - "name": "contentHash", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "String", - "ofType": null, - }, - }, - }, - { - "defaultValue": null, - "description": "MIME type of the file (e.g., "image/png")", - "name": "contentType", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "String", - "ofType": null, - }, - }, - }, - { - "defaultValue": null, - "description": "File size in bytes", - "name": "size", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "Int", - "ofType": null, - }, - }, - }, - { - "defaultValue": null, - "description": "Original filename (optional, for display and Content-Disposition)", - "name": "filename", - "type": { - "kind": "SCALAR", - "name": "String", - "ofType": null, - }, - }, - { - "defaultValue": null, - "description": "Custom S3 key (e.g., "reports/2024/Q1.pdf"). -Only allowed when the bucket has allow_custom_keys=true. -When omitted, key defaults to contentHash (content-addressed dedup). -When provided, the file is stored at this key. -Re-uploading to an existing key auto-creates a new version.", - "name": "key", - "type": { - "kind": "SCALAR", - "name": "String", - "ofType": null, - }, - }, ], "interfaces": null, "kind": "INPUT_OBJECT", - "name": "RequestUploadUrlInput", + "name": "ProvisionBucketInput", "possibleTypes": null, }, { @@ -3292,723 +3461,6 @@ Re-uploading to an existing key auto-creates a new version.", "name": "UUID", "possibleTypes": null, }, - { - "description": null, - "enumValues": null, - "fields": [ - { - "args": [], - "deprecationReason": null, - "description": "Presigned PUT URL (null if file was deduplicated)", - "isDeprecated": false, - "name": "uploadUrl", - "type": { - "kind": "SCALAR", - "name": "String", - "ofType": null, - }, - }, - { - "args": [], - "deprecationReason": null, - "description": "The file ID (existing if deduplicated, new if fresh upload)", - "isDeprecated": false, - "name": "fileId", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "UUID", - "ofType": null, - }, - }, - }, - { - "args": [], - "deprecationReason": null, - "description": "The S3 object key", - "isDeprecated": false, - "name": "key", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "String", - "ofType": null, - }, - }, - }, - { - "args": [], - "deprecationReason": null, - "description": "Whether this file was deduplicated (already exists with same hash)", - "isDeprecated": false, - "name": "deduplicated", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "Boolean", - "ofType": null, - }, - }, - }, - { - "args": [], - "deprecationReason": null, - "description": "Presigned URL expiry time (null if deduplicated)", - "isDeprecated": false, - "name": "expiresAt", - "type": { - "kind": "SCALAR", - "name": "Datetime", - "ofType": null, - }, - }, - { - "args": [], - "deprecationReason": null, - "description": "ID of the previous version (set when re-uploading to an existing custom key)", - "isDeprecated": false, - "name": "previousVersionId", - "type": { - "kind": "SCALAR", - "name": "UUID", - "ofType": null, - }, - }, - ], - "inputFields": null, - "interfaces": [], - "kind": "OBJECT", - "name": "RequestUploadUrlPayload", - "possibleTypes": null, - }, - { - "description": "A point in time as described by the [ISO -8601](https://en.wikipedia.org/wiki/ISO_8601) and, if it has a timezone, [RFC -3339](https://datatracker.ietf.org/doc/html/rfc3339) standards. Input values -that do not conform to both ISO 8601 and RFC 3339 may be coerced, which may lead -to unexpected results.", - "enumValues": null, - "fields": null, - "inputFields": null, - "interfaces": null, - "kind": "SCALAR", - "name": "Datetime", - "possibleTypes": null, - }, - { - "description": null, - "enumValues": null, - "fields": null, - "inputFields": [ - { - "defaultValue": null, - "description": "SHA-256 content hash computed by the client (hex-encoded, 64 chars)", - "name": "contentHash", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "String", - "ofType": null, - }, - }, - }, - { - "defaultValue": null, - "description": "MIME type of the file (e.g., "image/png")", - "name": "contentType", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "String", - "ofType": null, - }, - }, - }, - { - "defaultValue": null, - "description": "File size in bytes", - "name": "size", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "Int", - "ofType": null, - }, - }, - }, - { - "defaultValue": null, - "description": "Original filename (optional, for display and Content-Disposition)", - "name": "filename", - "type": { - "kind": "SCALAR", - "name": "String", - "ofType": null, - }, - }, - { - "defaultValue": null, - "description": "Custom S3 key (only when bucket has allow_custom_keys=true)", - "name": "key", - "type": { - "kind": "SCALAR", - "name": "String", - "ofType": null, - }, - }, - ], - "interfaces": null, - "kind": "INPUT_OBJECT", - "name": "BulkUploadFileInput", - "possibleTypes": null, - }, - { - "description": null, - "enumValues": null, - "fields": null, - "inputFields": [ - { - "defaultValue": null, - "description": "Bucket key (e.g., "public", "private")", - "name": "bucketKey", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "String", - "ofType": null, - }, - }, - }, - { - "defaultValue": null, - "description": "Owner entity ID for entity-scoped uploads", - "name": "ownerId", - "type": { - "kind": "SCALAR", - "name": "UUID", - "ofType": null, - }, - }, - { - "defaultValue": null, - "description": "Array of files to upload", - "name": "files", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "LIST", - "name": null, - "ofType": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "INPUT_OBJECT", - "name": "BulkUploadFileInput", - "ofType": null, - }, - }, - }, - }, - }, - ], - "interfaces": null, - "kind": "INPUT_OBJECT", - "name": "RequestBulkUploadUrlsInput", - "possibleTypes": null, - }, - { - "description": null, - "enumValues": null, - "fields": [ - { - "args": [], - "deprecationReason": null, - "description": "Presigned PUT URL (null if file was deduplicated)", - "isDeprecated": false, - "name": "uploadUrl", - "type": { - "kind": "SCALAR", - "name": "String", - "ofType": null, - }, - }, - { - "args": [], - "deprecationReason": null, - "description": "The file ID", - "isDeprecated": false, - "name": "fileId", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "UUID", - "ofType": null, - }, - }, - }, - { - "args": [], - "deprecationReason": null, - "description": "The S3 object key", - "isDeprecated": false, - "name": "key", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "String", - "ofType": null, - }, - }, - }, - { - "args": [], - "deprecationReason": null, - "description": "Whether this file was deduplicated", - "isDeprecated": false, - "name": "deduplicated", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "Boolean", - "ofType": null, - }, - }, - }, - { - "args": [], - "deprecationReason": null, - "description": "Presigned URL expiry time (null if deduplicated)", - "isDeprecated": false, - "name": "expiresAt", - "type": { - "kind": "SCALAR", - "name": "Datetime", - "ofType": null, - }, - }, - { - "args": [], - "deprecationReason": null, - "description": "ID of the previous version (set when re-uploading to an existing custom key)", - "isDeprecated": false, - "name": "previousVersionId", - "type": { - "kind": "SCALAR", - "name": "UUID", - "ofType": null, - }, - }, - { - "args": [], - "deprecationReason": null, - "description": "Index of this file in the input array (for client correlation)", - "isDeprecated": false, - "name": "index", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "Int", - "ofType": null, - }, - }, - }, - ], - "inputFields": null, - "interfaces": [], - "kind": "OBJECT", - "name": "BulkUploadFilePayload", - "possibleTypes": null, - }, - { - "description": null, - "enumValues": null, - "fields": [ - { - "args": [], - "deprecationReason": null, - "description": "Array of results, one per input file", - "isDeprecated": false, - "name": "files", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "LIST", - "name": null, - "ofType": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "OBJECT", - "name": "BulkUploadFilePayload", - "ofType": null, - }, - }, - }, - }, - }, - ], - "inputFields": null, - "interfaces": [], - "kind": "OBJECT", - "name": "RequestBulkUploadUrlsPayload", - "possibleTypes": null, - }, - { - "description": "The root query type which gives access points into the data universe.", - "enumValues": null, - "fields": [ - { - "args": [ - { - "defaultValue": null, - "description": "Only read the first \`n\` values of the set.", - "name": "first", - "type": { - "kind": "SCALAR", - "name": "Int", - "ofType": null, - }, - }, - { - "defaultValue": null, - "description": "Only read the last \`n\` values of the set.", - "name": "last", - "type": { - "kind": "SCALAR", - "name": "Int", - "ofType": null, - }, - }, - { - "defaultValue": null, - "description": "Skip the first \`n\` values from our \`after\` cursor, an alternative to cursor -based pagination. May not be used with \`last\`.", - "name": "offset", - "type": { - "kind": "SCALAR", - "name": "Int", - "ofType": null, - }, - }, - { - "defaultValue": null, - "description": "Read all values in the set before (above) this cursor.", - "name": "before", - "type": { - "kind": "SCALAR", - "name": "Cursor", - "ofType": null, - }, - }, - { - "defaultValue": null, - "description": "Read all values in the set after (below) this cursor.", - "name": "after", - "type": { - "kind": "SCALAR", - "name": "Cursor", - "ofType": null, - }, - }, - { - "defaultValue": null, - "description": "A filter to be used in determining which values should be returned by the collection.", - "name": "where", - "type": { - "kind": "INPUT_OBJECT", - "name": "UserFilter", - "ofType": null, - }, - }, - { - "defaultValue": "[PRIMARY_KEY_ASC]", - "description": "The method to use when ordering \`User\`.", - "name": "orderBy", - "type": { - "kind": "LIST", - "name": null, - "ofType": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "ENUM", - "name": "UserOrderBy", - "ofType": null, - }, - }, - }, - }, - ], - "deprecationReason": null, - "description": "Reads and enables pagination through a set of \`User\`.", - "isDeprecated": false, - "name": "users", - "type": { - "kind": "OBJECT", - "name": "UserConnection", - "ofType": null, - }, - }, - { - "args": [], - "deprecationReason": null, - "description": "Metadata about the database schema, including tables, fields, indexes, and constraints. Useful for code generation tools.", - "isDeprecated": false, - "name": "_meta", - "type": { - "kind": "OBJECT", - "name": "MetaSchema", - "ofType": null, - }, - }, - ], - "inputFields": null, - "interfaces": [], - "kind": "OBJECT", - "name": "Query", - "possibleTypes": null, - }, - { - "description": "The root mutation type which contains root level fields which mutate data.", - "enumValues": null, - "fields": [ - { - "args": [ - { - "defaultValue": null, - "description": "The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields.", - "name": "input", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "INPUT_OBJECT", - "name": "CreateUserInput", - "ofType": null, - }, - }, - }, - ], - "deprecationReason": null, - "description": "Creates a single \`User\`.", - "isDeprecated": false, - "name": "createUser", - "type": { - "kind": "OBJECT", - "name": "CreateUserPayload", - "ofType": null, - }, - }, - { - "args": [ - { - "defaultValue": null, - "description": "The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields.", - "name": "input", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "INPUT_OBJECT", - "name": "UpdateUserInput", - "ofType": null, - }, - }, - }, - ], - "deprecationReason": null, - "description": "Updates a single \`User\` using a unique key and a patch.", - "isDeprecated": false, - "name": "updateUser", - "type": { - "kind": "OBJECT", - "name": "UpdateUserPayload", - "ofType": null, - }, - }, - { - "args": [ - { - "defaultValue": null, - "description": "The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields.", - "name": "input", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "INPUT_OBJECT", - "name": "DeleteUserInput", - "ofType": null, - }, - }, - }, - ], - "deprecationReason": null, - "description": "Deletes a single \`User\` using a unique key.", - "isDeprecated": false, - "name": "deleteUser", - "type": { - "kind": "OBJECT", - "name": "DeleteUserPayload", - "ofType": null, - }, - }, - { - "args": [ - { - "defaultValue": null, - "description": "The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields.", - "name": "input", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "INPUT_OBJECT", - "name": "RequestUploadUrlInput", - "ofType": null, - }, - }, - }, - ], - "deprecationReason": null, - "description": "Request a presigned URL for uploading a file directly to S3. -Client computes SHA-256 of the file content and provides it here. -If a file with the same hash already exists (dedup), returns the -existing file ID and deduplicated=true with no uploadUrl.", - "isDeprecated": false, - "name": "requestUploadUrl", - "type": { - "kind": "OBJECT", - "name": "RequestUploadUrlPayload", - "ofType": null, - }, - }, - { - "args": [ - { - "defaultValue": null, - "description": "The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields.", - "name": "input", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "INPUT_OBJECT", - "name": "RequestBulkUploadUrlsInput", - "ofType": null, - }, - }, - }, - ], - "deprecationReason": null, - "description": "Request presigned URLs for uploading multiple files in a single batch. -Subject to per-storage-module limits (max_bulk_files, max_bulk_total_size). -Each file is processed independently — some may dedup while others get fresh URLs.", - "isDeprecated": false, - "name": "requestBulkUploadUrls", - "type": { - "kind": "OBJECT", - "name": "RequestBulkUploadUrlsPayload", - "ofType": null, - }, - }, - { - "args": [ - { - "defaultValue": null, - "description": "The exclusive input argument for this mutation. An object type, make sure to see documentation for this object’s fields.", - "name": "input", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "INPUT_OBJECT", - "name": "ProvisionBucketInput", - "ofType": null, - }, - }, - }, - ], - "deprecationReason": null, - "description": "Provision an S3 bucket for a logical bucket in the database. -Reads the bucket config via RLS, then creates and configures -the S3 bucket with the appropriate privacy policies, CORS rules, -and lifecycle settings.", - "isDeprecated": false, - "name": "provisionBucket", - "type": { - "kind": "OBJECT", - "name": "ProvisionBucketPayload", - "ofType": null, - }, - }, - ], - "inputFields": null, - "interfaces": [], - "kind": "OBJECT", - "name": "Mutation", - "possibleTypes": null, - }, - { - "description": null, - "enumValues": null, - "fields": null, - "inputFields": [ - { - "defaultValue": null, - "description": "The logical bucket key (e.g., "public", "private")", - "name": "bucketKey", - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "String", - "ofType": null, - }, - }, - }, - { - "defaultValue": null, - "description": "Owner entity ID for entity-scoped bucket provisioning. -Omit for app-level (database-wide) storage.", - "name": "ownerId", - "type": { - "kind": "SCALAR", - "name": "UUID", - "ofType": null, - }, - }, - ], - "interfaces": null, - "kind": "INPUT_OBJECT", - "name": "ProvisionBucketInput", - "possibleTypes": null, - }, { "description": null, "enumValues": null, diff --git a/packages/upload-client/__tests__/upload.test.ts b/packages/upload-client/__tests__/upload.test.ts index 445917e80..9c1e8dcfe 100644 --- a/packages/upload-client/__tests__/upload.test.ts +++ b/packages/upload-client/__tests__/upload.test.ts @@ -1,6 +1,6 @@ import { uploadFile } from '../src/upload'; import { UploadError } from '../src/types'; -import { REQUEST_UPLOAD_URL_MUTATION } from '../src/queries'; +import { DEFAULT_BUCKET_QUERY_FIELD } from '../src/queries'; import type { GraphQLExecutor, FileInput } from '../src/types'; /** @@ -34,7 +34,6 @@ const HELLO_WORLD_HASH = 'b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7 const originalFetch = global.fetch; beforeEach(() => { - // Reset fetch mock before each test global.fetch = originalFetch; }); @@ -42,32 +41,40 @@ afterAll(() => { global.fetch = originalFetch; }); +/** + * Build a mock executor that returns data nested under the bucket query field. + * The per-table pattern returns: { bucketByKey: { requestUploadUrl: { ... } } } + */ +function createMockExecutor( + payload: Record, + bucketQueryField = DEFAULT_BUCKET_QUERY_FIELD, +): { execute: GraphQLExecutor; calls: Array<{ query: string; variables: Record }> } { + const calls: Array<{ query: string; variables: Record }> = []; + const execute: GraphQLExecutor = async (query, variables) => { + calls.push({ query, variables }); + return { + [bucketQueryField]: { + id: 'bucket-uuid', + requestUploadUrl: payload, + }, + }; + }; + return { execute, calls }; +} + describe('uploadFile', () => { describe('fresh upload (not deduplicated)', () => { - it('should hash, request URL, and PUT to S3', async () => { + it('should hash, request URL via bucket field, and PUT to S3', async () => { const file = createMockFile('hello world'); - const executeCalls: Array<{ query: string; variables: Record }> = []; - - const execute: GraphQLExecutor = async (query, variables) => { - executeCalls.push({ query, variables }); - - if (query === REQUEST_UPLOAD_URL_MUTATION) { - return { - requestUploadUrl: { - uploadUrl: 'https://s3.example.com/presigned-put-url', - fileId: 'file-uuid-123', - key: HELLO_WORLD_HASH, - deduplicated: false, - expiresAt: new Date(Date.now() + 900_000).toISOString(), - status: 'requested', - }, - }; - } - - throw new Error(`Unexpected query: ${query}`); - }; + const { execute, calls } = createMockExecutor({ + uploadUrl: 'https://s3.example.com/presigned-put-url', + fileId: 'file-uuid-123', + key: HELLO_WORLD_HASH, + deduplicated: false, + expiresAt: new Date(Date.now() + 900_000).toISOString(), + previousVersionId: null, + }); - // Mock fetch for S3 PUT global.fetch = jest.fn().mockResolvedValue({ ok: true, status: 200, @@ -80,20 +87,19 @@ describe('uploadFile', () => { execute, }); - // Verify result expect(result.fileId).toBe('file-uuid-123'); expect(result.key).toBe(HELLO_WORLD_HASH); expect(result.deduplicated).toBe(false); - expect(result.status).toBe('requested'); - // Verify requestUploadUrl was called with correct input - expect(executeCalls[0].query).toBe(REQUEST_UPLOAD_URL_MUTATION); - const requestInput = (executeCalls[0].variables.input as Record); - expect(requestInput.bucketKey).toBe('avatars'); - expect(requestInput.contentHash).toBe(HELLO_WORLD_HASH); - expect(requestInput.contentType).toBe('text/plain'); - expect(requestInput.size).toBe(11); - expect(requestInput.filename).toBe('test.txt'); + // Verify per-table query was called with flat variables (not input object) + expect(calls).toHaveLength(1); + expect(calls[0].query).toContain('bucketByKey'); + expect(calls[0].query).toContain('requestUploadUrl'); + expect(calls[0].variables.key).toBe('avatars'); + expect(calls[0].variables.contentHash).toBe(HELLO_WORLD_HASH); + expect(calls[0].variables.contentType).toBe('text/plain'); + expect(calls[0].variables.size).toBe(11); + expect(calls[0].variables.filename).toBe('test.txt'); // Verify S3 PUT was called expect(global.fetch).toHaveBeenCalledWith( @@ -103,35 +109,20 @@ describe('uploadFile', () => { headers: { 'Content-Type': 'text/plain' }, }), ); - - // Only requestUploadUrl should have been called (no confirm step) - expect(executeCalls).toHaveLength(1); }); }); describe('deduplicated upload', () => { - it('should skip PUT and confirm when deduplicated', async () => { + it('should skip PUT when deduplicated', async () => { const file = createMockFile('hello world'); - const executeCalls: Array<{ query: string }> = []; - - const execute: GraphQLExecutor = async (query) => { - executeCalls.push({ query }); - - if (query === REQUEST_UPLOAD_URL_MUTATION) { - return { - requestUploadUrl: { - uploadUrl: null, - fileId: 'existing-file-uuid', - key: HELLO_WORLD_HASH, - deduplicated: true, - expiresAt: null, - status: 'ready', - }, - }; - } - - throw new Error(`Unexpected query after dedup: ${query}`); - }; + const { execute, calls } = createMockExecutor({ + uploadUrl: null, + fileId: 'existing-file-uuid', + key: HELLO_WORLD_HASH, + deduplicated: true, + expiresAt: null, + previousVersionId: null, + }); global.fetch = jest.fn(); @@ -143,15 +134,44 @@ describe('uploadFile', () => { expect(result.fileId).toBe('existing-file-uuid'); expect(result.deduplicated).toBe(true); - expect(result.status).toBe('ready'); - // Only requestUploadUrl should have been called (no PUT) - expect(executeCalls).toHaveLength(1); - expect(executeCalls[0].query).toBe(REQUEST_UPLOAD_URL_MUTATION); + expect(calls).toHaveLength(1); expect(global.fetch).not.toHaveBeenCalled(); }); }); + describe('custom bucketQueryField', () => { + it('should use the provided bucket query field name', async () => { + const file = createMockFile('hello world'); + const { execute, calls } = createMockExecutor( + { + uploadUrl: 'https://s3.example.com/put', + fileId: 'file-1', + key: HELLO_WORLD_HASH, + deduplicated: false, + expiresAt: new Date().toISOString(), + previousVersionId: null, + }, + 'appBucketByKey', + ); + + global.fetch = jest.fn().mockResolvedValue({ + ok: true, + status: 200, + text: async () => '', + }); + + await uploadFile({ + file, + bucketKey: 'private', + execute, + bucketQueryField: 'appBucketByKey', + }); + + expect(calls[0].query).toContain('appBucketByKey'); + }); + }); + describe('error handling', () => { it('should throw INVALID_FILE for null file', async () => { const execute: GraphQLExecutor = jest.fn(); @@ -168,7 +188,7 @@ describe('uploadFile', () => { ).rejects.toMatchObject({ code: 'INVALID_FILE' }); }); - it('should throw REQUEST_UPLOAD_URL_FAILED when mutation fails', async () => { + it('should throw REQUEST_UPLOAD_URL_FAILED when query fails', async () => { const file = createMockFile('test'); const execute: GraphQLExecutor = async () => { throw new Error('Network error'); @@ -179,24 +199,28 @@ describe('uploadFile', () => { ).rejects.toMatchObject({ code: 'REQUEST_UPLOAD_URL_FAILED' }); }); - it('should throw PUT_UPLOAD_FAILED when S3 returns error', async () => { + it('should throw REQUEST_UPLOAD_URL_FAILED when bucket not found', async () => { const file = createMockFile('test'); - const execute: GraphQLExecutor = async (query) => { - if (query === REQUEST_UPLOAD_URL_MUTATION) { - return { - requestUploadUrl: { - uploadUrl: 'https://s3.example.com/put', - fileId: 'file-1', - key: 'hash', - deduplicated: false, - expiresAt: new Date().toISOString(), - status: 'requested', - }, - }; - } - throw new Error('Unexpected'); + const execute: GraphQLExecutor = async () => { + return { bucketByKey: null } as any; }; + await expect( + uploadFile({ file, bucketKey: 'nonexistent', execute }), + ).rejects.toMatchObject({ code: 'REQUEST_UPLOAD_URL_FAILED' }); + }); + + it('should throw PUT_UPLOAD_FAILED when S3 returns error', async () => { + const file = createMockFile('test'); + const { execute } = createMockExecutor({ + uploadUrl: 'https://s3.example.com/put', + fileId: 'file-1', + key: 'hash', + deduplicated: false, + expiresAt: new Date().toISOString(), + previousVersionId: null, + }); + global.fetch = jest.fn().mockResolvedValue({ ok: false, status: 403, @@ -225,24 +249,14 @@ describe('uploadFile', () => { describe('content type handling', () => { it('should use application/octet-stream when file.type is empty', async () => { const file = createMockFile('binary data', 'file.bin', ''); - const executeCalls: Array<{ query: string; variables: Record }> = []; - - const execute: GraphQLExecutor = async (query, variables) => { - executeCalls.push({ query, variables }); - if (query === REQUEST_UPLOAD_URL_MUTATION) { - return { - requestUploadUrl: { - uploadUrl: 'https://s3.example.com/put', - fileId: 'file-1', - key: 'hash', - deduplicated: false, - expiresAt: new Date().toISOString(), - status: 'requested', - }, - }; - } - throw new Error('Unexpected'); - }; + const { execute, calls } = createMockExecutor({ + uploadUrl: 'https://s3.example.com/put', + fileId: 'file-1', + key: 'hash', + deduplicated: false, + expiresAt: new Date().toISOString(), + previousVersionId: null, + }); global.fetch = jest.fn().mockResolvedValue({ ok: true, @@ -252,8 +266,7 @@ describe('uploadFile', () => { await uploadFile({ file, bucketKey: 'test', execute }); - const requestInput = (executeCalls[0].variables.input as Record); - expect(requestInput.contentType).toBe('application/octet-stream'); + expect(calls[0].variables.contentType).toBe('application/octet-stream'); }); }); }); diff --git a/packages/upload-client/src/index.ts b/packages/upload-client/src/index.ts index 6689f2a2d..6bd562d64 100644 --- a/packages/upload-client/src/index.ts +++ b/packages/upload-client/src/index.ts @@ -32,8 +32,8 @@ export { hashFile, hashFileChunked } from './hash'; // Orchestrator export { uploadFile } from './upload'; -// GraphQL query strings (for custom integrations) -export { REQUEST_UPLOAD_URL_MUTATION } from './queries'; +// GraphQL query builders (for custom integrations) +export { buildRequestUploadUrlQuery, REQUEST_UPLOAD_URL_QUERY, REQUEST_UPLOAD_URL_MUTATION, DEFAULT_BUCKET_QUERY_FIELD } from './queries'; // Types export type { diff --git a/packages/upload-client/src/queries.ts b/packages/upload-client/src/queries.ts index a95618f3b..54262d38d 100644 --- a/packages/upload-client/src/queries.ts +++ b/packages/upload-client/src/queries.ts @@ -1,19 +1,51 @@ /** - * GraphQL mutation strings for the presigned URL upload pipeline. + * GraphQL query builders for the per-table presigned URL upload pipeline. * * These are plain strings — no graphql-tag dependency needed. - * They match the schema defined in graphile-presigned-url-plugin. + * They match the per-table schema defined in graphile-presigned-url-plugin: + * upload fields are on bucket types (via @storageBuckets smart tag), + * not global mutations. */ -export const REQUEST_UPLOAD_URL_MUTATION = ` - mutation RequestUploadUrl($input: RequestUploadUrlInput!) { - requestUploadUrl(input: $input) { - uploadUrl - fileId - key - deduplicated - expiresAt - status +/** + * Build the GraphQL query for requesting an upload URL from a specific bucket type. + * + * The query fetches a bucket by key from the per-table PostGraphile type, + * then calls the requestUploadUrl field on that bucket instance. + * + * @param bucketQueryField - The PostGraphile query field name for the bucket type + * (e.g., "bucketByKey", "appBucketByKey", "dataRoomBucketByKeyAndOwnerId") + */ +export function buildRequestUploadUrlQuery(bucketQueryField: string): string { + return ` + query RequestUploadUrl($key: String!, $contentHash: String!, $contentType: String!, $size: Int!, $filename: String) { + ${bucketQueryField}(key: $key) { + id + requestUploadUrl( + contentHash: $contentHash + contentType: $contentType + size: $size + filename: $filename + ) { + uploadUrl + fileId + key + deduplicated + expiresAt + } } } `; +} + +/** Default query field for app-level buckets */ +export const DEFAULT_BUCKET_QUERY_FIELD = 'bucketByKey'; + +/** Pre-built query for the default bucket type */ +export const REQUEST_UPLOAD_URL_QUERY = buildRequestUploadUrlQuery(DEFAULT_BUCKET_QUERY_FIELD); + +/** + * @deprecated Use REQUEST_UPLOAD_URL_QUERY instead. + * Kept for backward compatibility during migration. + */ +export const REQUEST_UPLOAD_URL_MUTATION = REQUEST_UPLOAD_URL_QUERY; diff --git a/packages/upload-client/src/types.ts b/packages/upload-client/src/types.ts index fab10051a..ac0b3094b 100644 --- a/packages/upload-client/src/types.ts +++ b/packages/upload-client/src/types.ts @@ -32,8 +32,8 @@ export interface RequestUploadUrlPayload { deduplicated: boolean; /** Presigned URL expiry time (ISO string, null if deduplicated) */ expiresAt: string | null; - /** File status — 'requested' for fresh uploads, 'uploaded'/'processed' for deduplicated files */ - status: string; + /** ID of the previous version (when uploading a new version of a custom-keyed file) */ + previousVersionId: string | null; } // --- Client options --- @@ -69,6 +69,12 @@ export interface UploadFileOptions { bucketKey: string; /** GraphQL executor function */ execute: GraphQLExecutor; + /** + * PostGraphile query field for the bucket type (e.g., "bucketByKey", "appBucketByKey"). + * Defaults to "bucketByKey". Override for entity-scoped storage modules where the + * bucket table has a different PostGraphile-inflected name. + */ + bucketQueryField?: string; /** Progress callback (0-100) — only fires during the S3 PUT */ onProgress?: (percent: number) => void; /** AbortSignal for cancellation */ @@ -82,8 +88,6 @@ export interface UploadResult { key: string; /** Whether this file was deduplicated (no bytes uploaded) */ deduplicated: boolean; - /** File status after upload ("requested" for fresh uploads, existing status for dedup) */ - status: string; } // --- File input abstraction --- diff --git a/packages/upload-client/src/upload.ts b/packages/upload-client/src/upload.ts index 69599c969..6a1c2fd59 100644 --- a/packages/upload-client/src/upload.ts +++ b/packages/upload-client/src/upload.ts @@ -8,7 +8,7 @@ */ import { hashFile } from './hash'; -import { REQUEST_UPLOAD_URL_MUTATION } from './queries'; +import { buildRequestUploadUrlQuery, DEFAULT_BUCKET_QUERY_FIELD } from './queries'; import { UploadError } from './types'; import type { UploadFileOptions, @@ -40,7 +40,7 @@ import type { * ``` */ export async function uploadFile(options: UploadFileOptions): Promise { - const { file, bucketKey, execute, onProgress, signal } = options; + const { file, bucketKey, execute, onProgress, signal, bucketQueryField } = options; // --- Validate input --- if (!file) { @@ -60,8 +60,9 @@ export async function uploadFile(options: UploadFileOptions): Promise { + const query = buildRequestUploadUrlQuery(bucketQueryField); + try { - const data = await execute(REQUEST_UPLOAD_URL_MUTATION, { input }); - const payload = data.requestUploadUrl as RequestUploadUrlPayload | undefined; + const data = await execute(query, { + key: input.bucketKey, + contentHash: input.contentHash, + contentType: input.contentType, + size: input.size, + filename: input.filename, + }); + + // Extract from the nested bucket response: { bucketByKey: { requestUploadUrl: { ... } } } + const bucketData = data[bucketQueryField] as Record | undefined; + if (!bucketData) { + throw new UploadError('REQUEST_UPLOAD_URL_FAILED', `Bucket not found for query field "${bucketQueryField}"`); + } + const payload = bucketData.requestUploadUrl as RequestUploadUrlPayload | undefined; if (!payload) { throw new UploadError('REQUEST_UPLOAD_URL_FAILED', 'No data returned from requestUploadUrl'); } @@ -130,7 +144,7 @@ async function requestUploadUrl( if (err instanceof UploadError) throw err; throw new UploadError( 'REQUEST_UPLOAD_URL_FAILED', - `requestUploadUrl mutation failed: ${err instanceof Error ? err.message : String(err)}`, + `requestUploadUrl query failed: ${err instanceof Error ? err.message : String(err)}`, err, ); }