From 4e6700419462de3dcef05719131eab82e74b709d Mon Sep 17 00:00:00 2001 From: Anis Snoussi Date: Mon, 28 Jul 2025 08:43:40 +0200 Subject: [PATCH 1/2] fix(readme): make docs readable not actually a fix, but want to trigger a release --- README.md | 97 +++++++++++++++---------------------------------------- 1 file changed, 26 insertions(+), 71 deletions(-) diff --git a/README.md b/README.md index fc79cb0..9d3cb7a 100644 --- a/README.md +++ b/README.md @@ -9,15 +9,13 @@ > Using this package requires an API key from [PromptCage.com](https://promptcage.com/) -## Install +## 📦 Install ```bash npm install promptcage ``` -## Usage - -### Basic Usage +## 🚀 Basic Usage ```ts import { PromptCage } from 'promptcage'; @@ -25,15 +23,6 @@ import { PromptCage } from 'promptcage'; // Initialize with API key from PROMPTCAGE_API_KEY environment variable const promptCage = new PromptCage(); -// Or initialize with API key directly -const promptCage = new PromptCage('your-api-key-here'); - -// Or initialize with options object -const promptCage = new PromptCage({ - apiKey: 'your-api-key-here', - maxWaitTime: 1000 // 1 second max wait time -}); - // Detect prompt injection const result = await promptCage.detectInjection('Your user input here'); @@ -41,77 +30,43 @@ console.log(result); //=> { safe: true, detectionId: 'det_123456', error: undefined } ``` -### Advanced Usage with Metadata - -```ts -import { PromptCage } from 'promptcage'; - -const promptCage = new PromptCage({ - apiKey: 'your-api-key', - maxWaitTime: 3000 // 3 seconds max wait time (custom) -}); - -const result = await promptCage.detectInjection( - 'Your user input here', - 'user-123', // optional anonymous user ID - { - source: 'web-app', - version: '1.0', - sessionId: 'sess_456' - } // optional metadata -); - -if (result.safe) { - console.log('Prompt is safe to use'); -} else { - console.log('Potential prompt injection detected!'); - console.log('Detection ID:', result.detectionId); - if (result.error) { - console.log('Error:', result.error); - } -} -``` - -## Environment Variables +## 🔧 API -Set your API key as an environment variable: +### Constructor -```bash -export PROMPTCAGE_API_KEY=your-api-key-here -``` - -## API - -### PromptCage +The constructor accepts an optional configuration object or API key string. -The main class for interacting with the PromptCage API. +| Parameter | Type | Required | Default | Description | +|-----------|------|----------|---------|-------------| +| `options` | `string \| PromptCageOptions` | No | - | API key string or configuration object | +| `options.apiKey` | `string` | No | `process.env.PROMPTCAGE_API_KEY` | Your PromptCage API key | +| `options.maxWaitTime` | `number` | No | `1000` | Maximum wait time in milliseconds before treating request as safe | -#### constructor(options?) - -- `options` (optional): Configuration object or API key string - - `apiKey` (optional): Your PromptCage API key. If not provided, will use `PROMPTCAGE_API_KEY` environment variable - - `maxWaitTime` (optional): Maximum wait time in milliseconds before treating request as safe (default: 1000ms) - -#### detectInjection(prompt, userAnonId?, metadata?) +### detectInjection() Detects potential prompt injection in the given text. -- `prompt` (required): The text to analyze for prompt injection -- `userAnonId` (optional): Anonymous user identifier for tracking -- `metadata` (optional): Additional metadata object +| Parameter | Type | Required | Description | +|-----------|------|----------|-------------| +| `prompt` | `string` | Yes | The text to analyze for prompt injection | +| `userAnonId` | `string` | No | Anonymous user identifier for tracking | +| `metadata` | `object` | No | Additional metadata object | + +**Returns:** `Promise` -Returns a `Promise` with: -- `safe`: Boolean indicating if the prompt is safe -- `detectionId`: Unique identifier for this detection -- `error`: Error message if something went wrong (optional) +| Property | Type | Description | +|----------|------|-------------| +| `safe` | `boolean` | Boolean indicating if the prompt is safe | +| `detectionId` | `string` | Unique identifier for this detection | +| `error` | `string \| undefined` | Error message if something went wrong (optional) | -### Fail-Safe Behavior +## 🛡️ Fail-Safe Behavior The package is designed to be **fail-safe** and will never block your application. The SDK **fails open** in all error scenarios (Network errors, Rate limit exceeded, Quota exceeded ...). **Important**: In all error cases, `safe` will be `true` and `error` will contain the error message. This ensures your application continues to work even when the PromptCage API is down, slow, or experiencing issues. -### Error Handling +## ⚠️ Error Handling The SDK always returns `safe: true` in error cases, but you can still check for errors: @@ -134,7 +89,7 @@ if (result.safe) { } ``` -### Performance Considerations +## ⚡ Performance Considerations The `maxWaitTime` option helps prevent performance impact on your application: From 0b0c865a126d6bd48d175efd93d3ecb1bee2f32f Mon Sep 17 00:00:00 2001 From: Anis Snoussi Date: Mon, 28 Jul 2025 08:46:39 +0200 Subject: [PATCH 2/2] docs(readme): keep example with advanced usage --- README.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 9d3cb7a..0eba8b6 100644 --- a/README.md +++ b/README.md @@ -71,7 +71,15 @@ The package is designed to be **fail-safe** and will never block your applicatio The SDK always returns `safe: true` in error cases, but you can still check for errors: ```ts -const result = await promptCage.detectInjection('Your user input here'); +const result = await promptCage.detectInjection( + 'Your user input here', + 'user-123', // optional anonymous user ID + { + source: 'web-app', + version: '1.0', + sessionId: 'sess_456' + } // optional metadata +); if (result.safe) { if (result.error) {