Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
142 changes: 140 additions & 2 deletions graphile/graphile-presigned-url-plugin/src/plugin.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ import { extendSchema, gql } from 'graphile-utils';
import { Logger } from '@pgpmjs/logger';

import type { PresignedUrlPluginOptions, S3Config, StorageModuleConfig, BucketConfig } from './types';
import { getStorageModuleConfig, getStorageModuleConfigForOwner, getBucketConfig, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache';
import { generatePresignedPutUrl } from './s3-signer';
import { getStorageModuleConfig, getStorageModuleConfigForOwner, getBucketConfig, resolveStorageModuleByFileId, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache';
import { generatePresignedPutUrl, deleteS3Object } from './s3-signer';

const log = new Logger('graphile-presigned-url:plugin');

Expand Down Expand Up @@ -264,6 +264,20 @@ export function createPresignedUrlPlugin(
files: [BulkUploadFilePayload!]!
}

input DeleteFileInput {
"""File ID to delete"""
fileId: UUID!
}

type DeleteFilePayload {
"""Whether the file record was deleted from the database"""
success: Boolean!
"""Whether the S3 object was deleted (false if other files reference the same key)"""
deletedFromS3: Boolean!
"""The S3 key that was (or would have been) deleted"""
key: String
}

extend type Mutation {
"""
Request a presigned URL for uploading a file directly to S3.
Expand All @@ -283,6 +297,17 @@ export function createPresignedUrlPlugin(
requestBulkUploadUrls(
input: RequestBulkUploadUrlsInput!
): RequestBulkUploadUrlsPayload

"""
Delete a file record and its S3 object.
The DB record is always deleted (subject to RLS). The S3 object is
deleted only if no other file records reference the same key in the
same bucket (content-addressed dedup safety). If the inline S3
delete fails, cleanup falls back to the async delete_s3_object job.
"""
deleteFile(
input: DeleteFileInput!
): DeleteFilePayload
}
`,
plans: {
Expand All @@ -302,6 +327,21 @@ export function createPresignedUrlPlugin(
return result;
});
},
deleteFile(_$mutation: any, fieldArgs: any) {
const $input = fieldArgs.getRaw('input');
const $withPgClient = (grafastContext() as any).get('withPgClient');
const $pgSettings = (grafastContext() as any).get('pgSettings');
const $combined = object({
input: $input,
withPgClient: $withPgClient,
pgSettings: $pgSettings,
});

return lambda($combined, async ({ input, withPgClient, pgSettings }: any) => {
const result = await processDelete(options, input, withPgClient, pgSettings);
return result;
});
},
requestBulkUploadUrls(_$mutation: any, fieldArgs: any) {
const $input = fieldArgs.getRaw('input');
const $withPgClient = (grafastContext() as any).get('withPgClient');
Expand Down Expand Up @@ -635,5 +675,103 @@ async function processSingleFile(
};
}

// --- Delete logic ---

/**
* Process a file deletion: remove the DB record, then attempt sync S3 cleanup.
*
* The AFTER DELETE trigger on the files table always enqueues an async
* delete_s3_object job as a safety net. This function attempts the S3 delete
* inline for immediate cleanup — if it fails, the async job handles it.
*
* 1. Resolve the file row (key, bucket_id) and storage config
* 2. DELETE the file row (RLS enforced — only owner/admin can delete)
* → AFTER DELETE trigger enqueues async GC job (SECURITY DEFINER)
* 3. Check refcount: any other file with same key in the same bucket?
* 4. If orphaned: try S3 DeleteObject inline (sync, best-effort)
* 5. Return result
*/
async function processDelete(
options: PresignedUrlPluginOptions,
input: any,
withPgClient: any,
pgSettings: any,
) {
const { fileId } = input;

if (!fileId || typeof fileId !== 'string') {
throw new Error('INVALID_FILE_ID');
}

return withPgClient(pgSettings, async (pgClient: any) => {
return pgClient.withTransaction(async (txClient: any) => {
const databaseId = await resolveDatabaseId(txClient);
if (!databaseId) {
throw new Error('DATABASE_NOT_FOUND');
}

// 1. Resolve storage config + file across all storage modules
const resolved = await resolveStorageModuleByFileId(txClient, databaseId, fileId);
if (!resolved) {
throw new Error('FILE_NOT_FOUND: file does not exist or access denied');
}

const { storageConfig, file } = resolved;
const { key, bucket_id } = file;

// 2. DELETE the file row (RLS enforced)
const deleteResult = await txClient.query({
text: `DELETE FROM ${storageConfig.filesQualifiedName}
WHERE id = $1
RETURNING id`,
values: [fileId],
});

if (deleteResult.rows.length === 0) {
throw new Error('DELETE_DENIED: insufficient permissions to delete this file');
}

// 3. Check refcount: any other file with same key in this bucket?
const refcountResult = await txClient.query({
text: `SELECT COUNT(*)::int AS ref_count
FROM ${storageConfig.filesQualifiedName}
WHERE key = $1
AND bucket_id = $2`,
values: [key, bucket_id],
});

const refCount = refcountResult.rows[0]?.ref_count ?? 0;

if (refCount > 0) {
log.info(`File ${fileId} deleted from DB; S3 key ${key} still referenced by ${refCount} file(s)`);
return {
success: true,
deletedFromS3: false,
key,
};
}

// 4. Attempt sync S3 delete (best-effort; async GC job is the fallback)
try {
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
await deleteS3Object(s3ForDb, key);
log.info(`File ${fileId} deleted from DB and S3 (key=${key})`);
return {
success: true,
deletedFromS3: true,
key,
};
} catch (s3Error: any) {
log.warn(`Sync S3 delete failed for key=${key}; async GC job will retry: ${s3Error.message}`);
return {
success: true,
deletedFromS3: false,
key,
};
}
});
});
}

export const PresignedUrlPlugin = createPresignedUrlPlugin;
export default PresignedUrlPlugin;
25 changes: 25 additions & 0 deletions graphile/graphile-presigned-url-plugin/src/s3-signer.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import {
S3Client,
DeleteObjectCommand,
PutObjectCommand,
GetObjectCommand,
HeadObjectCommand,
Expand Down Expand Up @@ -117,3 +118,27 @@ export async function headObject(
throw e;
}
}

/**
* Delete an object from S3.
*
* Returns true if the object was deleted (or didn't exist — S3 DeleteObject
* is idempotent). Throws on unexpected errors (permissions, network).
*
* @param s3Config - S3 client and bucket configuration
* @param key - S3 object key to delete
* @returns true if deletion succeeded
*/
export async function deleteS3Object(
s3Config: S3Config,
key: string,
): Promise<boolean> {
await s3Config.client.send(
new DeleteObjectCommand({
Bucket: s3Config.bucket,
Key: key,
}),
);
log.debug(`Deleted S3 object: key=${key}, bucket=${s3Config.bucket}`);
return true;
}
11 changes: 11 additions & 0 deletions uploads/s3-utils/src/utils.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import {
DeleteObjectCommand,
GetObjectCommand,
HeadObjectCommand,
S3Client} from '@aws-sdk/client-s3';
Expand Down Expand Up @@ -30,6 +31,16 @@ export const fileExists = async ({ client, bucket, key }: FileOperationArgs): Pr
}
};

export const deleteObject = async ({ client, bucket, key }: FileOperationArgs): Promise<boolean> => {
try {
await client.send(new DeleteObjectCommand({ Bucket: bucket, Key: key }));
return true;
} catch (e: any) {
if (e.name === 'NoSuchKey' || e.$metadata?.httpStatusCode === 404) return false;
throw e;
}
};

export const download = async ({
client,
writeStream,
Expand Down
Loading