From 335e2f5a779143f06d13a361551669086be0e40a Mon Sep 17 00:00:00 2001 From: Luke Oliff Date: Sat, 11 Apr 2026 14:02:17 +0100 Subject: [PATCH 1/3] chore: add Context7 auto-refresh workflow --- .github/workflows/context7.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 .github/workflows/context7.yml diff --git a/.github/workflows/context7.yml b/.github/workflows/context7.yml new file mode 100644 index 0000000..e91b43e --- /dev/null +++ b/.github/workflows/context7.yml @@ -0,0 +1,17 @@ +name: Update Context7 Documentation + +on: + release: + types: [published] + workflow_dispatch: + +jobs: + update-docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Update Context7 Documentation + id: context7 + uses: rennf93/upsert-context7@1.1 + with: + operation: refresh From 9c797971c884db92b0583550d6accfcb79856d2d Mon Sep 17 00:00:00 2001 From: Luke Oliff Date: Sat, 11 Apr 2026 19:06:18 +0100 Subject: [PATCH 2/3] ci: pin all action SHAs; fix notify-docs passing check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Pin all actions/checkout@v4, anthropics/claude-code-action@beta, and other unpinned refs to commit SHAs across all workflows - notify-docs: remove broken passing check (README has no ✅ markers); a merged [Example] PR is sufficient signal to raise the suggestion --- .github/workflows/claude-mentions.yml | 4 +-- .github/workflows/context7.yml | 4 +-- .github/workflows/engineer.yml | 16 +++++------ .github/workflows/lead-fix.yml | 10 +++---- .github/workflows/lead-review.yml | 10 +++---- .github/workflows/notify-docs.yml | 19 +++++--------- .github/workflows/pm-dashboard.yml | 10 +++---- .github/workflows/pm-suggestions.yml | 4 +-- .github/workflows/test-examples.yml | 38 +++++++++++++-------------- .github/workflows/test-existing.yml | 22 ++++++++-------- .github/workflows/vp.yml | 4 +-- 11 files changed, 68 insertions(+), 73 deletions(-) diff --git a/.github/workflows/claude-mentions.yml b/.github/workflows/claude-mentions.yml index 9fa3a82..5078b4c 100644 --- a/.github/workflows/claude-mentions.yml +++ b/.github/workflows/claude-mentions.yml @@ -18,7 +18,7 @@ jobs: id-token: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 with: fetch-depth: 0 @@ -79,7 +79,7 @@ jobs: - name: Respond if: steps.auth.outputs.allowed == 'true' - uses: anthropics/claude-code-action@beta + uses: anthropics/claude-code-action@b47fd721da662d48c5680e154ad16a73ed74d2e0 # v1.0.93 env: KAPA_API_KEY: ${{ secrets.KAPA_API_KEY }} KAPA_PROJECT_ID: ${{ vars.KAPA_PROJECT_ID }} diff --git a/.github/workflows/context7.yml b/.github/workflows/context7.yml index e91b43e..c732599 100644 --- a/.github/workflows/context7.yml +++ b/.github/workflows/context7.yml @@ -9,9 +9,9 @@ jobs: update-docs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - name: Update Context7 Documentation id: context7 - uses: rennf93/upsert-context7@1.1 + uses: rennf93/upsert-context7@c29ca04a37ecbab62635c0e94c3d8908049b9ed5 # 1.1 with: operation: refresh diff --git a/.github/workflows/engineer.yml b/.github/workflows/engineer.yml index 7bea7f9..48debb1 100644 --- a/.github/workflows/engineer.yml +++ b/.github/workflows/engineer.yml @@ -32,7 +32,7 @@ jobs: statuses: write id-token: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 with: fetch-depth: 0 @@ -41,27 +41,27 @@ jobs: git config user.name "examples-bot" git config user.email "noreply@deepgram.com" - - uses: pnpm/action-setup@v4 + - uses: pnpm/action-setup@b906affcce14559ad1aafd4ab0e942779e9f58b1 # v4 with: version: latest - - uses: actions/setup-node@v4 + - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 with: node-version: '20' - name: Install Bun - uses: oven-sh/setup-bun@v2 + uses: oven-sh/setup-bun@0c5077e51419868618aeaa5fe8019c62421857d6 # v2 - name: Install Deno - uses: denoland/setup-deno@v2 + uses: denoland/setup-deno@667a34cdef165d8d2b2e98dde39547c9daac7282 # v2.0.4 with: deno-version: v2.x - - uses: actions/setup-python@v5 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 with: python-version: '3.11' - - uses: actions/setup-go@v5 + - uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5 with: go-version: '1.22' @@ -125,7 +125,7 @@ jobs: - name: Build, test, and open PR if: steps.auth.outputs.allowed != 'false' && steps.backpressure.outputs.blocked != 'true' - uses: anthropics/claude-code-action@beta + uses: anthropics/claude-code-action@b47fd721da662d48c5680e154ad16a73ed74d2e0 # v1.0.93 with: anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/lead-fix.yml b/.github/workflows/lead-fix.yml index d1f3af8..a601e13 100644 --- a/.github/workflows/lead-fix.yml +++ b/.github/workflows/lead-fix.yml @@ -30,7 +30,7 @@ jobs: github.event.label.name == 'status:fix-needed' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 with: fetch-depth: 0 ref: ${{ github.event.pull_request.head.ref || '' }} @@ -40,15 +40,15 @@ jobs: git config user.name "examples-bot" git config user.email "noreply@deepgram.com" - - uses: actions/setup-node@v4 + - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 with: node-version: '20' - - uses: actions/setup-python@v5 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 with: python-version: '3.11' - - uses: actions/setup-go@v5 + - uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5 with: go-version: '1.22' @@ -87,7 +87,7 @@ jobs: - name: Run instruction if: steps.attempts.outputs.max_reached != 'true' - uses: anthropics/claude-code-action@beta + uses: anthropics/claude-code-action@b47fd721da662d48c5680e154ad16a73ed74d2e0 # v1.0.93 with: anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/lead-review.yml b/.github/workflows/lead-review.yml index 0c8717e..2393ee1 100644 --- a/.github/workflows/lead-review.yml +++ b/.github/workflows/lead-review.yml @@ -33,19 +33,19 @@ jobs: contains(github.event.pull_request.labels.*.name, 'type:fix') runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 with: fetch-depth: 0 - - uses: actions/setup-node@v4 + - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 with: node-version: '20' - - uses: actions/setup-python@v5 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 with: python-version: '3.11' - - uses: actions/setup-go@v5 + - uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5 with: go-version: '1.22' @@ -97,7 +97,7 @@ jobs: - name: Run instruction if: steps.auth.outputs.allowed != 'false' - uses: anthropics/claude-code-action@beta + uses: anthropics/claude-code-action@b47fd721da662d48c5680e154ad16a73ed74d2e0 # v1.0.93 with: anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/notify-docs.yml b/.github/workflows/notify-docs.yml index 4dcaa1f..c68143b 100644 --- a/.github/workflows/notify-docs.yml +++ b/.github/workflows/notify-docs.yml @@ -1,7 +1,7 @@ name: Notify docs — new example ready # When a new example PR merges to main, create a [Suggestion] issue in deepgram-docs -# so the content-pm workflow can queue a tutorial guide for it. +# so the content-pm workflow can research and queue a tutorial guide for it. # # Required secrets: # DOCS_PAT — PAT with issues:write scope on deepgram/deepgram-docs @@ -18,14 +18,16 @@ jobs: github.event.pull_request.merged == true && startsWith(github.event.pull_request.title, '[Example]') runs-on: ubuntu-latest + permissions: + contents: read steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 with: fetch-depth: 0 ref: main - - name: Find merged example and create suggestion + - name: Create guide suggestion in deepgram-docs env: GH_TOKEN: ${{ secrets.DOCS_PAT }} PR_NUMBER: ${{ github.event.pull_request.number }} @@ -34,7 +36,7 @@ jobs: run: | set -euo pipefail - # Find the example directory added by this PR + # Find the example directory added or modified by this PR EXAMPLE_DIR=$(git diff --name-only HEAD~1 HEAD \ | grep '^examples/' \ | head -1 \ @@ -48,17 +50,10 @@ jobs: SLUG=$(basename "$EXAMPLE_DIR") echo "Example slug: $SLUG" - # Read the example README for title and description + # Read the example README for title README_TITLE=$(head -3 "$EXAMPLE_DIR/README.md" 2>/dev/null \ | grep '^#' | head -1 | sed 's/^# *//' || echo "$SLUG") - # Check it's marked passing in the README table - PASSING=$(grep -F "$SLUG" README.md | grep -c "✅ passing" || true) - if [ "$PASSING" -eq 0 ]; then - echo "Example not yet marked passing — skipping" - exit 0 - fi - # Create suggestion issue in deepgram-docs gh issue create \ --repo deepgram/deepgram-docs \ diff --git a/.github/workflows/pm-dashboard.yml b/.github/workflows/pm-dashboard.yml index af93bd0..d2b7d1f 100644 --- a/.github/workflows/pm-dashboard.yml +++ b/.github/workflows/pm-dashboard.yml @@ -18,7 +18,7 @@ jobs: statuses: write actions: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 with: fetch-depth: 0 @@ -27,15 +27,15 @@ jobs: git config user.name "examples-bot" git config user.email "noreply@deepgram.com" - - uses: actions/setup-node@v4 + - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 with: node-version: '20' - - uses: actions/setup-python@v5 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 with: python-version: '3.11' - - uses: actions/setup-go@v5 + - uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5 with: go-version: '1.22' @@ -56,7 +56,7 @@ jobs: - name: Run dashboard agent if: steps.existing.outputs.skip == 'false' - uses: anthropics/claude-code-action@beta + uses: anthropics/claude-code-action@b47fd721da662d48c5680e154ad16a73ed74d2e0 # v1.0.93 env: KAPA_API_KEY: ${{ secrets.KAPA_API_KEY }} KAPA_PROJECT_ID: ${{ vars.KAPA_PROJECT_ID }} diff --git a/.github/workflows/pm-suggestions.yml b/.github/workflows/pm-suggestions.yml index 43d518c..7b3a27f 100644 --- a/.github/workflows/pm-suggestions.yml +++ b/.github/workflows/pm-suggestions.yml @@ -23,7 +23,7 @@ jobs: issues: write id-token: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 with: fetch-depth: 0 @@ -119,7 +119,7 @@ jobs: # ── Pass: run PM agent ──────────────────────────────────────────── - name: Route issue if: steps.permission.outputs.gate == 'pass' - uses: anthropics/claude-code-action@beta + uses: anthropics/claude-code-action@b47fd721da662d48c5680e154ad16a73ed74d2e0 # v1.0.93 with: anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/test-examples.yml b/.github/workflows/test-examples.yml index 238995c..7ff368a 100644 --- a/.github/workflows/test-examples.yml +++ b/.github/workflows/test-examples.yml @@ -62,7 +62,7 @@ jobs: has_cli: ${{ steps.scan.outputs.has_cli }} has_md: ${{ steps.scan.outputs.has_md }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 with: ref: ${{ inputs.ref || github.event.pull_request.head.sha || '' }} fetch-depth: 0 @@ -137,20 +137,20 @@ jobs: if: needs.detect.outputs.has_node == 'true' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 with: ref: ${{ inputs.ref || github.event.pull_request.head.sha || '' }} fetch-depth: 0 - - uses: pnpm/action-setup@v4 + - uses: pnpm/action-setup@b906affcce14559ad1aafd4ab0e942779e9f58b1 # v4 with: version: latest - - uses: actions/setup-node@v4 + - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 with: node-version: '20' - name: Install Bun - uses: oven-sh/setup-bun@v2 + uses: oven-sh/setup-bun@0c5077e51419868618aeaa5fe8019c62421857d6 # v2 - name: Install Deno - uses: denoland/setup-deno@v2 + uses: denoland/setup-deno@667a34cdef165d8d2b2e98dde39547c9daac7282 # v2.0.4 with: deno-version: v2.x - name: Run Node.js tests @@ -217,7 +217,7 @@ jobs: - name: Comment missing credentials if: steps.test.outputs.missing != '' && github.event_name == 'pull_request' - uses: actions/github-script@v7 + uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7 with: script: | const missing = '${{ steps.test.outputs.missing }}'.trim().split(/\s+/).filter(Boolean); @@ -233,11 +233,11 @@ jobs: if: needs.detect.outputs.has_python == 'true' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 with: ref: ${{ inputs.ref || github.event.pull_request.head.sha || '' }} fetch-depth: 0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 with: python-version: '3.11' - name: Run Python tests @@ -309,7 +309,7 @@ jobs: - name: Comment missing credentials if: steps.test.outputs.missing != '' && github.event_name == 'pull_request' - uses: actions/github-script@v7 + uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7 with: script: | const missing = '${{ steps.test.outputs.missing }}'.trim().split(/\s+/).filter(Boolean); @@ -325,11 +325,11 @@ jobs: if: needs.detect.outputs.has_go == 'true' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 with: ref: ${{ inputs.ref || github.event.pull_request.head.sha || '' }} fetch-depth: 0 - - uses: actions/setup-go@v5 + - uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5 with: go-version: '1.22' - name: Run Go tests @@ -370,11 +370,11 @@ jobs: if: needs.detect.outputs.has_java == 'true' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 with: ref: ${{ inputs.ref || github.event.pull_request.head.sha || '' }} fetch-depth: 0 - - uses: actions/setup-java@v4 + - uses: actions/setup-java@c1e323688fd81a25caa38c78aa6df2d33d3e20d9 # v4 with: distribution: 'temurin' java-version: '21' @@ -418,7 +418,7 @@ jobs: if: needs.detect.outputs.has_rust == 'true' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 with: ref: ${{ inputs.ref || github.event.pull_request.head.sha || '' }} fetch-depth: 0 @@ -468,11 +468,11 @@ jobs: if: needs.detect.outputs.has_dotnet == 'true' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 with: ref: ${{ inputs.ref || github.event.pull_request.head.sha || '' }} fetch-depth: 0 - - uses: actions/setup-dotnet@v4 + - uses: actions/setup-dotnet@67a3573c9a986a3f9c594539f4ab511d57bb3ce9 # v4 with: dotnet-version: '8.0' - name: Run .NET tests @@ -519,7 +519,7 @@ jobs: if: needs.detect.outputs.has_cli == 'true' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 with: ref: ${{ inputs.ref || github.event.pull_request.head.sha || '' }} fetch-depth: 0 @@ -573,7 +573,7 @@ jobs: if: needs.detect.outputs.has_md == 'true' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 with: ref: ${{ inputs.ref || github.event.pull_request.head.sha || '' }} fetch-depth: 0 diff --git a/.github/workflows/test-existing.yml b/.github/workflows/test-existing.yml index 8abaad2..8a18aaf 100644 --- a/.github/workflows/test-existing.yml +++ b/.github/workflows/test-existing.yml @@ -30,8 +30,8 @@ jobs: has_failures: ${{ steps.test.outputs.has_failures }} failed_examples: ${{ steps.test.outputs.failed_examples }} steps: - - uses: actions/checkout@v4 - - uses: actions/setup-node@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 + - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 with: node-version: '20' @@ -91,8 +91,8 @@ jobs: has_failures: ${{ steps.test.outputs.has_failures }} failed_examples: ${{ steps.test.outputs.failed_examples }} steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 with: python-version: '3.11' @@ -164,8 +164,8 @@ jobs: has_failures: ${{ steps.test.outputs.has_failures }} failed_examples: ${{ steps.test.outputs.failed_examples }} steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 + - uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5 with: go-version: '1.22' @@ -223,8 +223,8 @@ jobs: has_failures: ${{ steps.test.outputs.has_failures }} failed_examples: ${{ steps.test.outputs.failed_examples }} steps: - - uses: actions/checkout@v4 - - uses: actions/setup-java@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 + - uses: actions/setup-java@c1e323688fd81a25caa38c78aa6df2d33d3e20d9 # v4 with: distribution: 'temurin' java-version: '21' @@ -289,7 +289,7 @@ jobs: outdated_examples: ${{ steps.scan.outputs.outdated }} has_outdated: ${{ steps.scan.outputs.has_outdated }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - name: Fetch latest SDK versions id: versions @@ -373,7 +373,7 @@ jobs: if: always() runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 with: fetch-depth: 0 @@ -467,7 +467,7 @@ jobs: - name: Run fix agent for this failure if: steps.collect.outputs.has_target == 'true' - uses: anthropics/claude-code-action@beta + uses: anthropics/claude-code-action@b47fd721da662d48c5680e154ad16a73ed74d2e0 # v1.0.93 env: KAPA_API_KEY: ${{ secrets.KAPA_API_KEY }} KAPA_PROJECT_ID: 1908afc6-c134-4c6f-a684-ed7d8ce91759 diff --git a/.github/workflows/vp.yml b/.github/workflows/vp.yml index 3c96431..3c2fd29 100644 --- a/.github/workflows/vp.yml +++ b/.github/workflows/vp.yml @@ -26,7 +26,7 @@ jobs: run: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 with: fetch-depth: 0 @@ -40,7 +40,7 @@ jobs: run: echo "date=$(date -u +%Y-%m-%d)" >> $GITHUB_OUTPUT - name: Run VP instruction - uses: anthropics/claude-code-action@beta + uses: anthropics/claude-code-action@b47fd721da662d48c5680e154ad16a73ed74d2e0 # v1.0.93 env: KAPA_API_KEY: ${{ secrets.KAPA_API_KEY }} KAPA_PROJECT_ID: ${{ vars.KAPA_PROJECT_ID }} From 0a547ad660dd0e969052a627295b92540c0c7bbe Mon Sep 17 00:00:00 2001 From: Luke Oliff Date: Mon, 13 Apr 2026 17:14:37 +0100 Subject: [PATCH 3/3] feat(ci): replace multi-role workflows with unified engineering pipeline Build pipeline: - engineering.yml: build job (type:suggestion label -> Docker sandbox with planning phase, neurosymbolic agent loop, constraint checker, PR on pass) and engineering job (@claude in PRs -> full unrestricted repo access) - issues.yml: issue handler with 5-min cron sweep; all paths org-gated; external issues invisible until a team member applies type:suggestion - notify-docs.yml: rewritten to detect changed examples by file diff rather than PR title; one docs issue per changed example dir with full README and BLOG.md context embedded Scripts (.github/scripts/): - plan_agent.py: Haiku planning phase (runtime, slug, action, secrets) - run_agent.py: Opus build loop with tool-use in Docker sandbox - agent_state.py: neurosymbolic layer (WorkingMemory, forward-chaining RuleEngine, pre-AGENT_DONE constraint checker) - filter_secrets.py, next_example_number.py, system_prompt.md Labels: - setup-labels.yml: add type:suggestion and automated Removed: - engineer, lead-fix, lead-review, pm-suggestions, pm-dashboard, test-examples, test-existing, vp, claude-mentions workflows - instructions/ directory (superseded by .github/scripts/system_prompt.md) --- .github/CLAUDE.md | 104 ++++ .github/scripts/agent_state.py | 389 +++++++++++++ .github/scripts/filter_secrets.py | 48 ++ .github/scripts/next_example_number.py | 78 +++ .github/scripts/plan_agent.py | 138 +++++ .github/scripts/run_agent.py | 465 ++++++++++++++++ .github/scripts/system_prompt.md | 161 ++++++ .github/workflows/claude-mentions.yml | 130 ----- .github/workflows/engineer.yml | 179 ------ .github/workflows/engineering.yml | 489 +++++++++++++++++ .github/workflows/issues.yml | 212 +++++++ .github/workflows/lead-fix.yml | 139 ----- .github/workflows/lead-review.yml | 148 ----- .github/workflows/notify-docs.yml | 170 ++++-- .github/workflows/pm-dashboard.yml | 94 ---- .github/workflows/pm-suggestions.yml | 140 ----- .github/workflows/setup-labels.yml | 2 + .github/workflows/test-examples.yml | 651 ---------------------- .github/workflows/test-existing.yml | 501 ----------------- .github/workflows/vp.yml | 62 --- README.md | 11 +- instructions/engineer.md | 730 ------------------------- instructions/lead-fix.md | 218 -------- instructions/lead-review.md | 260 --------- instructions/pm-dashboard.md | 93 ---- instructions/pm-suggestions.md | 223 -------- instructions/pm.md | 130 ----- instructions/researcher.md | 153 ------ instructions/vp.md | 237 -------- 29 files changed, 2207 insertions(+), 4148 deletions(-) create mode 100644 .github/CLAUDE.md create mode 100644 .github/scripts/agent_state.py create mode 100644 .github/scripts/filter_secrets.py create mode 100644 .github/scripts/next_example_number.py create mode 100644 .github/scripts/plan_agent.py create mode 100644 .github/scripts/run_agent.py create mode 100644 .github/scripts/system_prompt.md delete mode 100644 .github/workflows/claude-mentions.yml delete mode 100644 .github/workflows/engineer.yml create mode 100644 .github/workflows/engineering.yml create mode 100644 .github/workflows/issues.yml delete mode 100644 .github/workflows/lead-fix.yml delete mode 100644 .github/workflows/lead-review.yml delete mode 100644 .github/workflows/pm-dashboard.yml delete mode 100644 .github/workflows/pm-suggestions.yml delete mode 100644 .github/workflows/test-examples.yml delete mode 100644 .github/workflows/test-existing.yml delete mode 100644 .github/workflows/vp.yml delete mode 100644 instructions/engineer.md delete mode 100644 instructions/lead-fix.md delete mode 100644 instructions/lead-review.md delete mode 100644 instructions/pm-dashboard.md delete mode 100644 instructions/pm-suggestions.md delete mode 100644 instructions/pm.md delete mode 100644 instructions/researcher.md delete mode 100644 instructions/vp.md diff --git a/.github/CLAUDE.md b/.github/CLAUDE.md new file mode 100644 index 0000000..2db17a7 --- /dev/null +++ b/.github/CLAUDE.md @@ -0,0 +1,104 @@ +# CLAUDE.md — .github/ + +Context for Claude Code working on the engineering pipeline automation in this directory. + +## What this does + +`workflows/engineering.yml` triggers when a `type:suggestion` label is added to an issue (build job), or when a Deepgram org member `@claude`s in any issue or PR (engineering job). The build job: + +1. Extracts secret *names* (not values) from the Actions secrets context +2. Runs a cheap planning call (`scripts/plan_agent.py`) to determine runtime, Docker image, slug, and which secrets the example actually needs +3. Builds a minimal env file with only the required secrets (`scripts/filter_secrets.py`) +4. Spins up a Docker container with that env file +5. Runs a full agentic build loop (`scripts/run_agent.py`) — Claude writes code, runs tests, fixes failures, repeats until tests pass or MAX_TURNS is hit +6. Commits the output to a branch and opens a PR with the full build log + +## File map + +``` +.github/ + workflows/ + engineering.yml # Unified pipeline: build job + engineering (@claude) job. + scripts/ + plan_agent.py # Planning phase. Haiku call. Outputs runtime/slug/required_secrets JSON. + filter_secrets.py # Filters full secrets blob to only required keys. Writes env file. + next_example_number.py # Reads examples/ dir, returns next available NNN slot. + run_agent.py # Main agentic loop. Tool-use with Docker sandbox. Runs until AGENT_DONE. + system_prompt.md # The agent's contract — conventions, priorities, definition of done. +``` + +## Key design decisions (don't relitigate these) + +**Single workflow, no role-based agents.** Previous design split PM/lead/engineer into separate workflows. Replaced with one workflow, one agent, no handoffs. + +**Secret names to LLM, not values.** `toJSON(secrets)` gives all secret names. The planner sees only the names and picks what it needs. `filter_secrets.py` then injects only those values into Docker. The agent never sees secrets it doesn't need, and the names aren't logged anywhere visible. + +**`ANTHROPIC_API_KEY` is the one hardwired secret.** It has to be — you can't ask the LLM to select its own API key. Everything else flows through the plan → filter pipeline. + +**Docker for the sandbox, not the raw runner.** The runner stays clean. Each language gets its own image. The agent runs `docker exec` for all commands. Network is `bridge` (outbound OK, no inbound). + +**MAX_TURNS defaults to 75.** High enough for a complex multi-service build. Low enough to fail loudly rather than burn indefinitely. Override via `MAX_TURNS` env var in the workflow if needed. When hit, exits 1 and writes `AGENT_TURN_LIMIT_EXCEEDED` to the build log. + +**Mock the upstream, never Deepgram.** Deepgram is always real (real API key, real calls). Things that genuinely can't run in CI (phone number provisioning, inbound webhooks, OAuth browser flows) get a local mock server standing in for the upstream. Documented in the README. + +**Implementation priority is strict.** Partner library with Deepgram built in > official Deepgram SDK > nothing else. No raw HTTP to the Deepgram API. No third-party wrappers. The system prompt enforces this. + +## What the agent has available inside the container + +- `deepgram` CLI — installed during bootstrap, authenticated with `DEEPGRAM_API_KEY` +- `context7` — invoked via `npx`, used for SDK/API doc lookup before writing code +- Playwright — installed during bootstrap, Chromium included +- Full shell access via `run_command` tool +- File read/write via `write_file` / `read_file` / `list_files` tools + +## Definition of done (what the agent checks before AGENT_DONE) + +- Unit tests pass (exit 0) +- Integration tests pass (exit 0, real Deepgram calls) +- Browser/Playwright tests pass if the example has UI +- Example demonstrates the integration end to end — nothing skipped, nothing mocked that could be real +- README is accurate: what it does, prerequisites, all env vars, how to run, what to expect, what's mocked and why +- `.env.example` lists every required variable +- No hardcoded secrets + +## Repo conventions the agent follows + +- `examples/{NNN}-{slug}/` — zero-padded 3-digit number, kebab-case slug +- New platform = next multiple of 10. Second example on same platform = subslot (021 after 020) +- `src/` for code, `tests/` for tests +- Tests: exit 0 pass, exit 1 fail, exit 2 missing credentials (skip, not fail) + +## Secrets that must exist in the repo before this workflow can run + +| Secret | Purpose | +|--------|---------| +| `ANTHROPIC_API_KEY` | Powers the planning call and the agent loop | +| `DEEPGRAM_API_KEY` | Used by the agent inside the sandbox for all Deepgram calls | +| Any partner secrets | Passed to the sandbox only if the planner selects them | + +`GITHUB_TOKEN` is auto-provided by Actions and is explicitly excluded from the secrets passed to Docker. + +## Things to be careful about + +- `toJSON(secrets)` output is masked in logs but the JSON blob is in memory. Never log `ALL_SECRETS` directly. +- The env file at `/tmp/sandbox.env` contains real secret values. It lives only for the duration of the run. +- `plan_agent.py` uses Haiku (fast/cheap). `run_agent.py` uses Opus (capable). Don't swap these without thinking about the cost/quality tradeoff. +- Bootstrap (deepgram CLI + Playwright) runs on every container start. If builds are slow, bake a custom base image. +- The build job uses `concurrency: group: build-{issue_number}` and the engineering job uses `concurrency: group: engineering-{number}` — both with `cancel-in-progress: false`, so they queue rather than cancel. + +## Neurosymbolic architecture + +`run_agent.py` uses a hybrid neural + symbolic design. The three symbolic components live in `scripts/agent_state.py`: + +**WorkingMemory** — a deterministic fact store updated after every tool dispatch. Records which files have been written, which phases are complete (readme, blog, env_example, screenshot, source, tests), whether tests are passing, and a command history for loop detection. The LLM never writes to working memory — only tool results do. + +**RuleEngine** — forward-chaining production rules evaluated every turn. Rules fire when conditions match (e.g. `ModuleNotFoundError` in stderr → R1 fires, injecting the missing module name). High-priority rules are injected as a text block after tool results in the next user turn. One-shot rules (`R2`, `R4`, `R7`, etc.) fire at most once per session. + +**check_constraints** — deterministic pre-`AGENT_DONE` gate. When the LLM outputs `AGENT_DONE`, the constraint checker verifies: required files exist (`README.md`, `BLOG.md`, `.env.example`), `src/` and `tests/` are non-empty, and no source files contain hardcoded Deepgram API key patterns. If any constraint fails, `AGENT_DONE` is rejected and violations are injected as a new user turn. The LLM cannot self-certify completion. + +Current rules: R1 (missing module), R2 (API auth failure), R3 (port conflict), R4 (anti-loop), R5 (tests passing/readme missing), R6 (readme done/blog missing), R7 (turn budget 80%), R8 (permission denied), R9 (network error), R10 (syntax error). + +## TODOs / known rough edges + +- `next_example_number.py` subslot detection is heuristic — the agent should verify and can override +- Bootstrap failures are non-fatal warnings — if deepgram CLI or Playwright fail to install, the agent will discover this when it tries to use them diff --git a/.github/scripts/agent_state.py b/.github/scripts/agent_state.py new file mode 100644 index 0000000..db063d1 --- /dev/null +++ b/.github/scripts/agent_state.py @@ -0,0 +1,389 @@ +""" +agent_state.py — Symbolic layer for the engineering pipeline's build agent loop. + +Implements three components of the neurosymbolic architecture: + +1. WorkingMemory — fact store, updated deterministically after each tool call. + The LLM never writes here; only tool dispatch does. + +2. RuleEngine — forward-chaining production rules over working memory. + Fires when conditions are met, injects guidance into the next + LLM turn. Rules are pattern-matched against tool output and + working memory state. + +3. check_constraints — deterministic pre-AGENT_DONE gate. Verifies the + definition-of-done symbolically so the LLM cannot + self-certify completion without meeting formal criteria. +""" + +from __future__ import annotations + +import json +import re +from collections import Counter +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any, Optional + + +# --------------------------------------------------------------------------- +# Working Memory +# --------------------------------------------------------------------------- + +class WorkingMemory: + """ + Symbolic fact store. Facts are (predicate, args) → value tuples. + + Predicates used: + file_written(path) — agent wrote this file + phase(name) — high-level phase is complete + names: readme, blog, env_example, screenshot, + source, tests, api_verified + tests_passing() — last test run exited 0 + tests_failing() — last test run exited non-0 + last_test_output(text) — stderr/stdout from last failing test run + + Command history is tracked separately for anti-loop detection. + """ + + def __init__(self) -> None: + self._facts: dict[tuple, Any] = {} + self._command_history: list[tuple[str, int]] = [] # (command, exit_code) + self.turn: int = 0 + + # ------------------------------------------------------------------ + # Fact manipulation + # ------------------------------------------------------------------ + + def assert_(self, predicate: str, *args: Any, value: Any = True) -> None: + self._facts[(predicate, args)] = value + + def retract(self, predicate: str, *args: Any) -> None: + self._facts.pop((predicate, args), None) + + def query(self, predicate: str, *args: Any) -> Any: + return self._facts.get((predicate, args)) + + # ------------------------------------------------------------------ + # Update from tool dispatch results + # ------------------------------------------------------------------ + + def update_from_tool_result( + self, + tool_name: str, + tool_input: dict, + result: dict, + ) -> None: + if tool_name == "write_file": + self._update_from_write(tool_input.get("path", "")) + elif tool_name == "run_command": + self._update_from_run( + tool_input.get("command", ""), + result.get("exit_code", -1), + result.get("stdout", ""), + result.get("stderr", ""), + ) + + def _update_from_write(self, path: str) -> None: + self.assert_("file_written", path) + name = path.split("/")[-1] + if name == "README.md": + self.assert_("phase", "readme") + elif name == "BLOG.md": + self.assert_("phase", "blog") + elif name == ".env.example": + self.assert_("phase", "env_example") + elif name == "screenshot.png": + self.assert_("phase", "screenshot") + if path.startswith("src/"): + self.assert_("phase", "source") + if path.startswith("tests/"): + self.assert_("phase", "tests") + + def _update_from_run( + self, cmd: str, exit_code: int, stdout: str, stderr: str + ) -> None: + self._command_history.append((cmd.strip(), exit_code)) + + combined = (stdout + "\n" + stderr).lower() + + # Test runner detection + test_runners = [ + "pytest", "npm test", "npm run test", "jest", "vitest", + "cargo test", "go test", "dotnet test", "mvn test", + "gradle test", + ] + if any(runner in cmd for runner in test_runners): + if exit_code == 0: + self.assert_("tests_passing") + self.retract("tests_failing") + self.retract("last_test_output") + else: + self.assert_("tests_failing") + self.retract("tests_passing") + self.assert_("last_test_output", (stderr or stdout)[:1000]) + + # API connectivity + if ("deepgram" in cmd.lower() or cmd.startswith("dg ")) and exit_code == 0: + self.assert_("phase", "api_verified") + + # Screenshot captured via Playwright + if "screenshot" in cmd.lower() and "playwright" in cmd.lower() and exit_code == 0: + self.assert_("phase", "screenshot") + + # ------------------------------------------------------------------ + # Anti-loop detection + # ------------------------------------------------------------------ + + def repeated_command(self, window: int = 7, threshold: int = 3) -> Optional[str]: + """ + Return the most-repeated command if it appears `threshold`+ times + in the last `window` commands, else None. + """ + if len(self._command_history) < threshold: + return None + recent = self._command_history[-window:] + counts = Counter(cmd for cmd, _ in recent) + cmd, count = counts.most_common(1)[0] + return cmd if count >= threshold else None + + # ------------------------------------------------------------------ + # Summary for logging + # ------------------------------------------------------------------ + + def summary(self) -> str: + phases = [args[0] for (pred, args) in self._facts if pred == "phase"] + tests = "passing" if self.query("tests_passing") else ( + "failing" if self.query("tests_failing") else "not yet run") + return ( + f"turn={self.turn} " + f"phases={sorted(phases)} " + f"tests={tests} " + f"history_len={len(self._command_history)}" + ) + + +# --------------------------------------------------------------------------- +# Rule Engine — forward-chaining production rules +# --------------------------------------------------------------------------- + +@dataclass +class RuleFiring: + rule_id: str + message: str + priority: int = 0 # higher = injected first + + +class RuleEngine: + """ + Forward-chaining rule engine. Evaluates rules against the current + working memory state and the latest batch of tool results. + + One-shot rules (marked with `once=True`) fire at most once per session. + Repeating rules (once=False) may fire every turn they're triggered. + """ + + def __init__(self, wm: WorkingMemory, max_turns: int) -> None: + self.wm = wm + self.max_turns = max_turns + self._fired_once: set[str] = set() + + def evaluate(self, turn_results: list[dict]) -> list[RuleFiring]: + """ + Evaluate all rules. `turn_results` is the list of raw result dicts + from tool dispatch this turn (one per tool call). + + Returns firings sorted by priority (highest first), capped at 3 + so we don't overwhelm the context window. + """ + wm = self.wm + firings: list[RuleFiring] = [] + + # Aggregate outputs across all tool calls this turn + combined_stderr = "\n".join(r.get("stderr", "") for r in turn_results) + combined_stdout = "\n".join(r.get("stdout", "") for r in turn_results) + combined_output = (combined_stderr + "\n" + combined_stdout).lower() + any_nonzero = any(r.get("exit_code", 0) != 0 for r in turn_results) + + # ------------------------------------------------------------------ + # R1 — Missing Python module + # ------------------------------------------------------------------ + m = re.search( + r"(?:ModuleNotFoundError|ImportError)[^\n]*No module named '([^']+)'", + combined_stderr, + ) + if m: + firings.append(RuleFiring("R1", + f"🔧 [RULE:missing-module] Missing module `{m.group(1)}`. " + f"Install it (e.g. `pip install {m.group(1).split('.')[0]}`) " + f"before retrying.", priority=10)) + + # ------------------------------------------------------------------ + # R2 — API authentication failure (one-shot per session) + # ------------------------------------------------------------------ + auth_signals = ["unauthorized", "invalid api key", "401", "403", + "authentication failed", "unauthenticated"] + if any_nonzero and any(s in combined_output for s in auth_signals): + if "R2" not in self._fired_once: + self._fired_once.add("R2") + firings.append(RuleFiring("R2", + "🔧 [RULE:auth-failure] API authentication failed. " + "Verify DEEPGRAM_API_KEY is set: `echo $DEEPGRAM_API_KEY | cut -c1-8`. " + "Keys start with `Token ` prefix in HTTP headers, not bare.", priority=9)) + + # ------------------------------------------------------------------ + # R3 — Port already in use + # ------------------------------------------------------------------ + if "address already in use" in combined_output or "eaddrinuse" in combined_output: + firings.append(RuleFiring("R3", + "🔧 [RULE:port-conflict] Port already in use. " + "Kill the occupying process: `fuser -k /tcp` or " + "`pkill -f `.", priority=8)) + + # ------------------------------------------------------------------ + # R4 — Anti-loop: same command repeated N times (one-shot) + # ------------------------------------------------------------------ + repeated = wm.repeated_command() + if repeated and "R4" not in self._fired_once: + self._fired_once.add("R4") + firings.append(RuleFiring("R4", + f"🔧 [RULE:anti-loop] Command `{repeated[:80]}` has been run 3+ " + f"times with the same result. This approach is not converging — " + f"step back and try a fundamentally different implementation strategy.", + priority=10)) + + # ------------------------------------------------------------------ + # R5 — Tests passing but README not yet written (one-shot) + # ------------------------------------------------------------------ + if wm.query("tests_passing") and not wm.query("phase", "readme"): + if "R5" not in self._fired_once: + self._fired_once.add("R5") + firings.append(RuleFiring("R5", + "📋 [RULE:missing-readme] Tests are passing but README.md " + "has not been written yet. Write the quickstart README next.", + priority=5)) + + # ------------------------------------------------------------------ + # R6 — README done but BLOG.md not yet written (one-shot) + # ------------------------------------------------------------------ + if (wm.query("tests_passing") and wm.query("phase", "readme") + and not wm.query("phase", "blog")): + if "R6" not in self._fired_once: + self._fired_once.add("R6") + firings.append(RuleFiring("R6", + "📋 [RULE:missing-blog] README is written but BLOG.md has not " + "been written. Write the developer blog post next.", priority=5)) + + # ------------------------------------------------------------------ + # R7 — Turn budget at 80% (one-shot) + # ------------------------------------------------------------------ + if wm.turn >= int(self.max_turns * 0.80) and "R7" not in self._fired_once: + self._fired_once.add("R7") + firings.append(RuleFiring("R7", + f"⚠️ [RULE:turn-budget] {wm.turn}/{self.max_turns} turns used " + f"({int(wm.turn / self.max_turns * 100)}%). Prioritise ruthlessly: " + "passing tests first, then README, then BLOG.md. " + "Do not start new features.", priority=7)) + + # ------------------------------------------------------------------ + # R8 — Permission denied + # ------------------------------------------------------------------ + if "permission denied" in combined_output and any_nonzero: + firings.append(RuleFiring("R8", + "🔧 [RULE:permission] Permission denied. " + "Use `chmod +x ` or check whether you need sudo.", priority=6)) + + # ------------------------------------------------------------------ + # R9 — Network / connection errors (one-shot) + # ------------------------------------------------------------------ + net_signals = ["connection refused", "connection timed out", + "name or service not known", "network unreachable", + "no route to host"] + if any_nonzero and any(s in combined_output for s in net_signals): + if "R9" not in self._fired_once: + self._fired_once.add("R9") + firings.append(RuleFiring("R9", + "🔧 [RULE:network] Network error detected. The container has " + "bridge networking (outbound-only). Verify the target URL is " + "reachable from outside the container; local services must be " + "started inside the container first.", priority=6)) + + # ------------------------------------------------------------------ + # R10 — Syntax / compilation error nudge (repeating) + # ------------------------------------------------------------------ + syntax_signals = ["syntaxerror", "unexpected token", "parse error", + "cannot find symbol", "undeclared identifier", + "error[e"] # Rust error codes + if any_nonzero and any(s in combined_output for s in syntax_signals): + firings.append(RuleFiring("R10", + "🔧 [RULE:syntax] Syntax or compilation error detected. " + "Read the file you just wrote with `read_file` before editing — " + "the actual content may differ from what you intended.", priority=7)) + + # Sort by priority descending, cap at 3 to protect context budget + return sorted(firings, key=lambda f: -f.priority)[:3] + + +# --------------------------------------------------------------------------- +# Symbolic constraint checker — pre-AGENT_DONE gate +# --------------------------------------------------------------------------- + +# Deepgram API key pattern: starts with optional prefix then long alphanumeric +_DG_KEY_PATTERN = re.compile(r'["\']dg[_.]?[a-zA-Z0-9]{30,}["\']', re.IGNORECASE) + +# Source file extensions to scan for secrets +_SOURCE_EXTENSIONS = { + ".py", ".js", ".ts", ".mjs", ".cjs", + ".go", ".rs", ".java", ".cs", ".rb", + ".sh", ".bash", ".yaml", ".yml", ".toml", +} + + +def check_constraints(workspace: Path) -> list[str]: + """ + Deterministic verification of the definition of done. + + Does NOT call the LLM. Returns a list of human-readable violation + strings. Empty list means all constraints are satisfied. + + Checks: + 1. Required files exist (README.md, BLOG.md, .env.example) + 2. Required directories exist and are non-empty (src/, tests/) + 3. No hardcoded Deepgram API keys in source files + """ + violations: list[str] = [] + + # 1. Required files + for required in ["README.md", "BLOG.md", ".env.example"]: + if not (workspace / required).exists(): + violations.append(f"`{required}` is missing from the example directory") + + # 2. Required directories — must exist and contain at least one file + for required_dir in ["src", "tests"]: + d = workspace / required_dir + if not d.exists(): + violations.append(f"`{required_dir}/` directory is missing") + elif not any(d.iterdir()): + violations.append(f"`{required_dir}/` directory is empty") + + # 3. Secret scan — Deepgram API key pattern in source files + for src_file in sorted(workspace.rglob("*")): + if not src_file.is_file(): + continue + if src_file.suffix not in _SOURCE_EXTENSIONS: + continue + # Skip the .env.example — it's supposed to mention keys by name + if src_file.name == ".env.example": + continue + try: + content = src_file.read_text(errors="ignore") + except OSError: + continue + if _DG_KEY_PATTERN.search(content): + rel = src_file.relative_to(workspace) + violations.append( + f"Possible hardcoded Deepgram API key detected in `{rel}` — " + f"use environment variables instead" + ) + + return violations diff --git a/.github/scripts/filter_secrets.py b/.github/scripts/filter_secrets.py new file mode 100644 index 0000000..19a8752 --- /dev/null +++ b/.github/scripts/filter_secrets.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 +""" +filter_secrets.py + +Reads the full secrets JSON blob and a list of required secret names, +writes KEY=VALUE lines to stdout suitable for `docker run --env-file`. + +Usage: + python filter_secrets.py --required NAME1,NAME2 --secrets-json '{"NAME1":"val",...}' +""" + +import argparse +import json +import sys + + +def main() -> None: + parser = argparse.ArgumentParser() + parser.add_argument("--required", required=True, help="Comma-separated secret names") + parser.add_argument("--secrets-json", required=True, help="Full secrets as JSON string") + args = parser.parse_args() + + required = {k.strip() for k in args.required.split(",") if k.strip()} + + try: + secrets = json.loads(args.secrets_json) + except json.JSONDecodeError as e: + print(f"Failed to parse secrets JSON: {e}", file=sys.stderr) + sys.exit(1) + + written = 0 + for key, value in secrets.items(): + if key not in required: + continue + # Escape newlines — env files don't support multiline values + safe_value = str(value).replace("\n", "\\n").replace("\r", "") + print(f"{key}={safe_value}") + written += 1 + + missing = required - set(secrets.keys()) + if missing: + print(f"Warning: requested secrets not found: {', '.join(sorted(missing))}", file=sys.stderr) + + print(f"Wrote {written}/{len(required)} secrets to env file", file=sys.stderr) + + +if __name__ == "__main__": + main() diff --git a/.github/scripts/next_example_number.py b/.github/scripts/next_example_number.py new file mode 100644 index 0000000..196eede --- /dev/null +++ b/.github/scripts/next_example_number.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 +""" +next_example_number.py + +Reads the examples/ directory and determines the next available example number. +Prints a zero-padded 3-digit number to stdout. + +Optionally accepts --platform to check for subslots (e.g. if 020 exists, +returns 021 for a second example on the same platform). + +Usage: + python next_example_number.py [--platform PLATFORM_SLUG] +""" + +import argparse +import os +import re +import sys +from pathlib import Path + + +def main() -> None: + parser = argparse.ArgumentParser() + parser.add_argument( + "--examples-dir", + default=os.environ.get("EXAMPLES_DIR", "examples"), + help="Path to the examples/ directory", + ) + parser.add_argument( + "--platform", + default=None, + help="Optional platform slug to check for subslots", + ) + args = parser.parse_args() + + examples_dir = Path(args.examples_dir) + if not examples_dir.exists(): + # Fresh repo — start at 010 + print("010") + return + + existing = [] + for entry in examples_dir.iterdir(): + if not entry.is_dir(): + continue + m = re.match(r"^(\d{3})", entry.name) + if m: + existing.append(int(m.group(1))) + + if not existing: + print("010") + return + + existing.sort() + max_num = max(existing) + + # Find the next multiple of 10 above the current max + next_base = ((max_num // 10) + 1) * 10 + + # If a platform was specified, check whether next_base - 10 is taken + # and we should use a subslot instead + if args.platform: + # Subslot logic: if the base slot (e.g. 020) exists and is the same platform, + # use 021, 022, etc. This is a best-effort heuristic — the agent/workflow + # can override via the issue body if needed. + candidate_base = next_base - 10 + subslot_candidates = [n for n in existing if candidate_base <= n < next_base] + if subslot_candidates: + next_subslot = max(subslot_candidates) + 1 + if next_subslot < next_base: + print(f"{next_subslot:03d}") + return + + print(f"{next_base:03d}") + + +if __name__ == "__main__": + main() diff --git a/.github/scripts/plan_agent.py b/.github/scripts/plan_agent.py new file mode 100644 index 0000000..88ee9d1 --- /dev/null +++ b/.github/scripts/plan_agent.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python3 +""" +Planning phase: given an issue body, available secret names, and the list of +existing examples, asks Claude to determine: + - action: "new" or "modify" + - the target runtime / Docker image + - the example slug + - workspace_subdir: resolved directory name (NNN-slug for new, existing dir for modify) + - which secrets the example will need + +Outputs a JSON object to stdout. The workflow reads this to configure +the build step. +""" + +import json +import os +import re +import sys +from pathlib import Path + +import anthropic + +AVAILABLE_SECRET_NAMES = os.environ["SECRET_NAMES"].split(",") +ISSUE_BODY = os.environ["ISSUE_BODY"] +ISSUE_NUMBER = os.environ["ISSUE_NUMBER"] +EXAMPLE_NUMBER = os.environ["EXAMPLE_NUMBER"] +EXAMPLES_DIR = os.environ.get("EXAMPLES_DIR", "examples") + +# Map of runtime identifiers to Docker images +RUNTIME_IMAGES = { + "node": "node:22-bookworm", + "typescript": "node:22-bookworm", + "python": "python:3.12-bookworm", + "go": "golang:1.23-bookworm", + "java": "eclipse-temurin:21-jdk-bookworm", + "rust": "rust:1.78-bookworm", + "dotnet": "mcr.microsoft.com/dotnet/sdk:8.0", + "dart": "dart:3.3", + "kotlin": "eclipse-temurin:21-jdk-bookworm", + "swift": "swift:5.10", +} + +SYSTEM_PROMPT = """ +You are helping plan an automated build of a code example for the Deepgram examples repository. + +Given an issue body, a list of existing example directories, and available secret names, return a JSON object with: + +- "action": "new" if this is a brand-new example, or "modify" if the issue is asking to update, fix, extend, or add to an existing example +- "runtime": one of: node, typescript, python, go, java, rust, dotnet, dart, kotlin, swift +- "slug": a short kebab-case identifier + - For "new": descriptive slug, e.g. "twilio-voice-agent-node" or "fastapi-transcription-python" + - For "modify": the slug portion of the existing directory, e.g. "fastapi-transcription-python" (without the NNN- prefix) +- "existing_dir": (only when action is "modify") the full directory name to modify, e.g. "020-fastapi-transcription-python". Must exactly match one of the existing directories listed. +- "required_secrets": array of secret names from the available list that this example needs. Always include DEEPGRAM_API_KEY. Include ANTHROPIC_API_KEY only if the example uses an LLM. Include partner secrets only if relevant. + +Return ONLY the JSON object. No explanation, no markdown fences. +""".strip() + + +def read_existing_examples() -> list[str]: + """Return sorted list of existing example directory names.""" + p = Path(EXAMPLES_DIR) + if not p.exists(): + return [] + return sorted( + entry.name for entry in p.iterdir() + if entry.is_dir() and re.match(r"^\d{3}-", entry.name) + ) + + +def main() -> None: + client = anthropic.Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"]) + + existing_examples = read_existing_examples() + + user_message = f""" +Issue #{ISSUE_NUMBER}: + +{ISSUE_BODY} + +--- + +Existing examples in the repository: +{json.dumps(existing_examples, indent=2) if existing_examples else '(none yet)'} + +Available secret names: +{json.dumps(AVAILABLE_SECRET_NAMES, indent=2)} + +Return the JSON plan object. +""".strip() + + response = client.messages.create( + model="claude-haiku-4-5", # fast and cheap for planning + max_tokens=512, + system=SYSTEM_PROMPT, + messages=[{"role": "user", "content": user_message}], + ) + + raw = response.content[0].text.strip() + + # Strip markdown fences if the model added them anyway + if raw.startswith("```"): + raw = raw.split("\n", 1)[1].rsplit("```", 1)[0].strip() + + plan = json.loads(raw) + + # Validate and enrich + action = plan.get("action", "new") + runtime = plan.get("runtime", "node") + plan["docker_image"] = RUNTIME_IMAGES.get(runtime, RUNTIME_IMAGES["node"]) + plan["example_number"] = EXAMPLE_NUMBER + + # Resolve workspace_subdir — the single source of truth for directory path + if action == "modify": + existing_dir = plan.get("existing_dir", "") + if existing_dir not in existing_examples: + # Planner hallucinated or matched wrong — fall back to new + plan["action"] = "new" + plan["workspace_subdir"] = f"{EXAMPLE_NUMBER}-{plan.get('slug', 'example')}" + else: + plan["workspace_subdir"] = existing_dir + else: + plan["action"] = "new" + plan["workspace_subdir"] = f"{EXAMPLE_NUMBER}-{plan.get('slug', 'example')}" + + # Always ensure DEEPGRAM_API_KEY is included + required = plan.get("required_secrets", []) + if "DEEPGRAM_API_KEY" not in required: + required.insert(0, "DEEPGRAM_API_KEY") + + # Filter to only secrets that actually exist in CI + plan["required_secrets"] = [s for s in required if s in AVAILABLE_SECRET_NAMES] + + print(json.dumps(plan)) + + +if __name__ == "__main__": + main() diff --git a/.github/scripts/run_agent.py b/.github/scripts/run_agent.py new file mode 100644 index 0000000..f7ce714 --- /dev/null +++ b/.github/scripts/run_agent.py @@ -0,0 +1,465 @@ +#!/usr/bin/env python3 +""" +Agentic build loop for deepgram/examples. + +Runs Claude in a tool-use loop inside a Docker sandbox until the example +is built, tested, and passing. No turn limit — keeps going until AGENT_DONE. +""" + +import json +import os +import subprocess +import sys +import textwrap +from pathlib import Path +from typing import Any + +import anthropic + +from agent_state import WorkingMemory, RuleEngine, check_constraints + +# --------------------------------------------------------------------------- +# Config +# --------------------------------------------------------------------------- + +MODEL = "claude-opus-4-5" +WORKSPACE = Path(os.environ["WORKSPACE_DIR"]) # e.g. /repo/examples/NNN-slug +EXAMPLE_NUMBER = os.environ["EXAMPLE_NUMBER"] +EXAMPLE_SLUG = os.environ["EXAMPLE_SLUG"] +WORKSPACE_ACTION = os.environ.get("WORKSPACE_ACTION", "new") # "new" or "modify" +DOCKER_IMAGE = os.environ["DOCKER_IMAGE"] +ISSUE_BODY = os.environ["ISSUE_BODY"] +ISSUE_NUMBER = os.environ["ISSUE_NUMBER"] +CONTAINER_NAME = f"example-sandbox-{ISSUE_NUMBER}" +BUILD_LOG = Path(os.environ.get("BUILD_LOG", "/tmp/build-log.md")) +MAX_TURNS = int(os.environ.get("MAX_TURNS", "75")) + +client = anthropic.Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"]) + +# --------------------------------------------------------------------------- +# Docker helpers +# --------------------------------------------------------------------------- + +def start_container() -> None: + """Start the sandbox container with workspace mounted and env file injected.""" + subprocess.run([ + "docker", "run", "-d", + "--name", CONTAINER_NAME, + "--env-file", "/tmp/sandbox.env", + # context7 and deepgram CLI need outbound network + "--network", "bridge", + # cap resources + "--memory", "2g", + "--cpus", "2", + "-v", f"{WORKSPACE}:/workspace", + "-w", "/workspace", + DOCKER_IMAGE, + # keep alive + "tail", "-f", "/dev/null", + ], check=True) + log("Container started", level="system") + + +def stop_container() -> None: + subprocess.run(["docker", "rm", "-f", CONTAINER_NAME], + capture_output=True) + + +def exec_in_container(command: str, timeout: int = 300) -> dict[str, Any]: + """Run a shell command inside the container, return stdout/stderr/exit_code.""" + result = subprocess.run( + ["docker", "exec", CONTAINER_NAME, "bash", "-c", command], + capture_output=True, + text=True, + timeout=timeout, + ) + return { + "stdout": result.stdout, + "stderr": result.stderr, + "exit_code": result.returncode, + } + + +# --------------------------------------------------------------------------- +# File helpers (operate on workspace, not inside container) +# --------------------------------------------------------------------------- + +def write_file(path: str, content: str) -> dict[str, Any]: + full = WORKSPACE / path + full.parent.mkdir(parents=True, exist_ok=True) + full.write_text(content) + return {"written": path, "bytes": len(content)} + + +def read_file(path: str) -> dict[str, Any]: + full = WORKSPACE / path + if not full.exists(): + return {"error": f"{path} does not exist"} + return {"content": full.read_text()} + + +def list_files(path: str = ".") -> dict[str, Any]: + full = WORKSPACE / path + if not full.exists(): + return {"error": f"{path} does not exist"} + files = [str(p.relative_to(WORKSPACE)) for p in sorted(full.rglob("*")) if p.is_file()] + return {"files": files} + + +# --------------------------------------------------------------------------- +# Tool definitions (passed to Claude) +# --------------------------------------------------------------------------- + +TOOLS = [ + { + "name": "run_command", + "description": ( + "Run a shell command inside the Docker sandbox. " + "Use this to install dependencies, run tests, start servers, " + "use the deepgram CLI, call context7, run Playwright, etc. " + "Long-running processes should be backgrounded with & and then " + "tested with a follow-up command. " + "Timeout is 300s by default but you can specify longer for heavy installs." + ), + "input_schema": { + "type": "object", + "properties": { + "command": {"type": "string", "description": "Shell command to run"}, + "timeout": {"type": "integer", "description": "Timeout in seconds (default 300)"}, + }, + "required": ["command"], + }, + }, + { + "name": "write_file", + "description": "Write or overwrite a file in the example workspace. Path is relative to the workspace root.", + "input_schema": { + "type": "object", + "properties": { + "path": {"type": "string", "description": "Relative file path, e.g. src/index.ts"}, + "content": {"type": "string", "description": "Full file content"}, + }, + "required": ["path", "content"], + }, + }, + { + "name": "read_file", + "description": "Read a file from the example workspace.", + "input_schema": { + "type": "object", + "properties": { + "path": {"type": "string", "description": "Relative file path"}, + }, + "required": ["path"], + }, + }, + { + "name": "list_files", + "description": "List all files in a directory within the workspace.", + "input_schema": { + "type": "object", + "properties": { + "path": {"type": "string", "description": "Relative directory path (default '.')"}, + }, + }, + }, +] + + +# --------------------------------------------------------------------------- +# Tool dispatch +# --------------------------------------------------------------------------- + +def dispatch_tool(name: str, input_: dict) -> str: + if name == "run_command": + result = exec_in_container( + input_["command"], + timeout=input_.get("timeout", 300), + ) + log_tool_result(name, input_["command"], result) + return json.dumps(result) + + elif name == "write_file": + result = write_file(input_["path"], input_["content"]) + log(f"wrote {input_['path']}", level="file") + return json.dumps(result) + + elif name == "read_file": + result = read_file(input_["path"]) + return json.dumps(result) + + elif name == "list_files": + result = list_files(input_.get("path", ".")) + return json.dumps(result) + + else: + return json.dumps({"error": f"unknown tool: {name}"}) + + +# --------------------------------------------------------------------------- +# Build log +# --------------------------------------------------------------------------- + +def log(message: str, level: str = "info") -> None: + prefix = {"info": "ℹ️", "system": "⚙️", "file": "📄", "error": "❌"}.get(level, "•") + line = f"{prefix} {message}\n" + sys.stdout.write(line) + sys.stdout.flush() + with BUILD_LOG.open("a") as f: + f.write(line) + + +def log_tool_result(tool: str, command: str, result: dict) -> None: + with BUILD_LOG.open("a") as f: + f.write(f"\n### `{tool}`: `{command}`\n") + f.write(f"**exit**: `{result['exit_code']}`\n") + if result.get("stdout"): + f.write(f"```\n{result['stdout'][:4000]}\n```\n") + if result.get("stderr"): + f.write(f"**stderr**:\n```\n{result['stderr'][:2000]}\n```\n") + + +# --------------------------------------------------------------------------- +# Install agent prerequisites inside the container +# --------------------------------------------------------------------------- + +BOOTSTRAP_SCRIPT = """ +set -e + +# Deepgram CLI +if ! command -v deepgram &> /dev/null; then + curl -fsSL https://raw.githubusercontent.com/deepgram/deepgram-cli/main/install.sh | sh +fi + +# context7 CLI (npx-based, no install needed but ensure node is present) +# Playwright (Python) +pip install playwright --quiet 2>/dev/null || true +playwright install chromium --with-deps 2>/dev/null || true + +echo "Bootstrap complete" +""" + +def bootstrap_container() -> None: + log("Bootstrapping container tools...") + result = exec_in_container(BOOTSTRAP_SCRIPT, timeout=600) + if result["exit_code"] != 0: + log(f"Bootstrap warning (non-fatal): {result['stderr'][:500]}", level="error") + + +# --------------------------------------------------------------------------- +# Main agent loop +# --------------------------------------------------------------------------- + +def build_system_prompt() -> str: + prompt_path = Path(__file__).parent / "system_prompt.md" + base = prompt_path.read_text() + + # WORKSPACE is examples/{NNN}-{slug}/ (or existing dir for modify) + # .parent is the examples/ directory + examples_dir = WORKSPACE.parent + existing = sorted(p.name for p in examples_dir.iterdir() + if p.is_dir()) if examples_dir.exists() else [] + + workspace_dir_name = WORKSPACE.name # NNN-slug (new) or existing dir (modify) + + modification_context = "" + if WORKSPACE_ACTION == "modify": + modification_context = textwrap.dedent(f""" + + --- + + ## IMPORTANT: This is a MODIFICATION task + + You are updating an **existing** example at `examples/{workspace_dir_name}/`. + The workspace already contains the existing code. + + Before making any changes: + 1. Use `list_files` to understand the current structure + 2. Use `read_file` to read key files (README, BLOG.md, tests, src/) + 3. Understand exactly what the issue is asking you to change + 4. Make targeted changes — preserve what works, fix/extend what needs it + 5. Run existing tests first to confirm the baseline, then update as needed + """) + + return base + modification_context + textwrap.dedent(f""" + + --- + + ## Runtime context + + - Action: `{WORKSPACE_ACTION}` ({'creating new' if WORKSPACE_ACTION == 'new' else 'modifying existing'} example) + - Example number: `{EXAMPLE_NUMBER}` + - Example slug: `{EXAMPLE_SLUG}` + - Docker image: `{DOCKER_IMAGE}` + - Workspace root: `/workspace` + - Existing examples in repo: {', '.join(existing) if existing else 'none yet'} + - Your {'output' if WORKSPACE_ACTION == 'new' else 'target'} directory: `examples/{workspace_dir_name}/` + + The workspace is mounted at `/workspace` inside the container. + All file paths in tool calls are relative to the workspace root. + """) + + +def run_agent() -> None: + system_prompt = build_system_prompt() + + user_message = textwrap.dedent(f""" + Build the following example for the Deepgram examples repository. + + Issue #{ISSUE_NUMBER}: + + {ISSUE_BODY} + + --- + + Steps: + 1. Check context7 docs for the relevant SDK/library before writing any code + 2. Use the deepgram CLI to verify API connectivity early + 3. Implement the example following all repo conventions + 4. Install dependencies and run tests + 5. Fix anything that fails and re-run + 6. Keep going until tests exit 0 + 7. Output AGENT_DONE when complete + """).strip() + + messages = [{"role": "user", "content": user_message}] + turn = 0 + + # Symbolic components — working memory and rule engine + wm = WorkingMemory() + engine = RuleEngine(wm, MAX_TURNS) + + log(f"Starting agent loop for example {EXAMPLE_NUMBER}-{EXAMPLE_SLUG} (max turns: {MAX_TURNS})") + + while True: + turn += 1 + wm.turn = turn + + if turn > MAX_TURNS: + msg = ( + f"AGENT_TURN_LIMIT_EXCEEDED: reached {MAX_TURNS} turns without completing. " + "The build did not reach a passing state. Review the build log for the last known state." + ) + log(msg, level="error") + with BUILD_LOG.open("a") as f: + f.write(f"\n---\n\n## ❌ Turn limit exceeded\n\n{msg}\n") + sys.exit(1) + + log(f"Turn {turn}/{MAX_TURNS} — {wm.summary()}") + + response = client.messages.create( + model=MODEL, + max_tokens=8096, + system=system_prompt, + tools=TOOLS, + messages=messages, + ) + + # Append assistant response to history + messages.append({"role": "assistant", "content": response.content}) + + # ---------------------------------------------------------------- + # AGENT_DONE detection — symbolic constraint check before accepting + # ---------------------------------------------------------------- + agent_done = any( + block.type == "text" and "AGENT_DONE" in block.text + for block in response.content + ) + if agent_done: + violations = check_constraints(WORKSPACE) + if violations: + log( + f"Constraint checker blocked AGENT_DONE — {len(violations)} violation(s)", + level="system", + ) + with BUILD_LOG.open("a") as f: + f.write("\n---\n\n### ⛔ AGENT_DONE rejected by constraint checker\n\n") + for v in violations: + f.write(f"- {v}\n") + constraint_msg = ( + "❌ [CONSTRAINT-CHECKER] `AGENT_DONE` was rejected. " + "The following requirements are not yet satisfied:\n\n" + + "\n".join(f"- {v}" for v in violations) + + "\n\nComplete all of the above, then output `AGENT_DONE` again." + ) + messages.append({"role": "user", "content": constraint_msg}) + continue + else: + log(f"Agent signalled completion after {turn} turns — constraints verified ✓") + return + + # ---------------------------------------------------------------- + # end_turn without AGENT_DONE — evaluate rules, prompt to continue + # ---------------------------------------------------------------- + if response.stop_reason == "end_turn": + log("Agent stopped without AGENT_DONE — prompting to continue", level="error") + firings = engine.evaluate([]) + rule_injections = ( + "\n\n" + "\n".join(f.message for f in firings) + if firings else "" + ) + messages.append({ + "role": "user", + "content": ( + "Tests have not passed yet. Continue working until they do, " + "then output AGENT_DONE." + rule_injections + ), + }) + continue + + if response.stop_reason != "tool_use": + log(f"Unexpected stop reason: {response.stop_reason}", level="error") + break + + # ---------------------------------------------------------------- + # Process tool calls — update working memory after each dispatch + # ---------------------------------------------------------------- + tool_results = [] + raw_results: list[dict] = [] + + for block in response.content: + if block.type != "tool_use": + continue + result_str = dispatch_tool(block.name, block.input) + result_dict = json.loads(result_str) + + # Update symbolic working memory + wm.update_from_tool_result(block.name, block.input, result_dict) + raw_results.append(result_dict) + + tool_results.append({ + "type": "tool_result", + "tool_use_id": block.id, + "content": result_str, + }) + + # ---------------------------------------------------------------- + # Forward-chain rules over this turn's results + # Inject firing messages as a text block after tool_results + # ---------------------------------------------------------------- + firings = engine.evaluate(raw_results) + if firings: + rule_text = "\n".join(f.message for f in firings) + log(f"Rules fired: {[f.rule_id for f in firings]}", level="system") + tool_results.append({"type": "text", "text": rule_text}) + + messages.append({"role": "user", "content": tool_results}) + + +# --------------------------------------------------------------------------- +# Entry point +# --------------------------------------------------------------------------- + +if __name__ == "__main__": + BUILD_LOG.write_text(f"# Build log: example {EXAMPLE_NUMBER}-{EXAMPLE_SLUG}\n\n") + WORKSPACE.mkdir(parents=True, exist_ok=True) + + try: + start_container() + bootstrap_container() + run_agent() + except KeyboardInterrupt: + log("Interrupted", level="error") + sys.exit(1) + finally: + stop_container() diff --git a/.github/scripts/system_prompt.md b/.github/scripts/system_prompt.md new file mode 100644 index 0000000..d90a38f --- /dev/null +++ b/.github/scripts/system_prompt.md @@ -0,0 +1,161 @@ +# Deepgram Examples Agent + +You are a senior developer building a working, production-quality code example for the Deepgram examples repository. You are not a PM, not a lead, not a reviewer. You write code, run it, fix it, and repeat until it works. + +## Your mandate + +Build a complete, tested, runnable example based on the issue provided. You have a high turn limit — use all of it if you need to. If something fails, diagnose it, fix it, try again. Do not give up early. Do not summarise what you would do — do it. Do not declare something "good enough" to avoid more work. + +If you genuinely cannot make something work after exhausting all approaches, explain specifically what blocked you before stopping. That is the only acceptable reason to stop before the definition of done is met. + +--- + +## Implementation priority (strict order) + +1. **Partner library that has Deepgram built in** — if the target platform/framework has an official integration or plugin that wraps Deepgram (e.g. LiveKit Agents, Pipecat, Vercel AI SDK with Deepgram provider, OpenAI Agents SDK with Deepgram), use it. This is always preferred. +2. **Official Deepgram SDK** — if no partner library exists, use the official SDK for the target language (`@deepgram/sdk` for Node/TS, `deepgram` for Python, etc.) +3. Nothing else is acceptable. No raw HTTP calls to the Deepgram API. No third-party wrappers. + +Check context7 docs before starting to confirm the correct SDK version, import paths, and any known breaking changes. + +--- + +## Credentials and integration strategy + +You have been given a filtered set of secrets as environment variables. These were selected because this example needs them. **Use them.** Real integrations are the goal. + +However, some things genuinely cannot be automated: + +- Provisioning a phone number (Twilio, Telnyx, Signalwire, Plivo, Vonage) +- Receiving a webhook callback from an external service during a CI run +- OAuth flows requiring a browser +- Hardware devices (microphones, cameras) + +For these, and **only** these, use a mock. The rule: + +> **Mock the upstream service's side. Never mock Deepgram.** + +Deepgram must always be real. Use the real `DEEPGRAM_API_KEY`. Run real STT, TTS, or Voice Agent calls. If you need audio input, use a real audio file (download one, generate one with TTS, or use a fixture). The integration partner is what gets mocked when necessary. + +### Mock strategy + +- Spin up a local HTTP server that replicates the webhook or API surface of the upstream service +- Use it to drive the example as if the real service were connected +- Document clearly in the README and BLOG.md what is real vs mocked and why +- If you have real credentials for the upstream service and the integration can work without provisioning (e.g. REST API calls, recording retrieval, bot joining a room), try the real path first + +--- + +## Tools available to you + +- **Shell** — run any command in the sandbox container +- **File write/read** — create and edit files in the workspace +- **Playwright** — browser automation for any UI-facing examples; also useful for smoke-testing a running web server and taking screenshots +- **`deepgram` CLI** — available in the container, authenticated with `DEEPGRAM_API_KEY`. Use it to validate API connectivity, test models, check feature availability +- **`context7`** — use to look up current Deepgram SDK docs, API references, and product info. Always check here before assuming SDK method signatures or model names + +--- + +## Repository conventions + +### Directory structure + +``` +examples/{NNN}-{slug}/ + README.md # quickstart guide — what it does, env vars, how to run, what to expect + BLOG.md # step-by-step blog post walking through the development process + .env.example # every required env var listed, no values + screenshot.png # Playwright screenshot (1240x760) — for UI/terminal examples + src/ # all source code + tests/ # tests — exit 0 = pass, exit 1 = fail, exit 2 = missing credentials +``` + +### Numbering + +- Read the existing `examples/` directory to find the highest existing number +- Claim the next multiple of 10 for a new platform (`010`, `020`, `030`...) +- Use a subslot for a second example on the same platform (`021` if `020` exists) +- Your example number was assigned before you started — use it + +### Tests + +- Tests must exit 0 for the PR to be valid +- Exit 2 if credentials are missing (not a failure, a skip) +- Tests should actually exercise the integration, not just check that the file exists +- Playwright tests are fine for browser/UI examples +- For long-running servers, start the process in the background, run assertions against it, then tear it down + +### README (quickstart guide) + +- One sentence describing what the example does +- Prerequisites (accounts, CLI tools, accounts to create) +- All environment variables with descriptions +- How to run it locally (exact commands, no ambiguity) +- What to expect when it works (exact output, UI behavior, etc.) +- If anything is mocked, say so and explain why +- If a screenshot exists, embed it near the top: `![Screenshot](./screenshot.png)` + +### BLOG.md (developer narrative) + +Write a step-by-step blog post that walks a developer through building this example from scratch. This is not a summary of what you built — it is a guide that teaches someone how to build it themselves: + +- Explain **why** each decision was made, not just what +- Show all the code as it's introduced, step by step +- Explain how to set up credentials and what to expect from the API +- Call out any gotchas, non-obvious choices, or things that took iteration to get right +- End with "What's next" — natural extensions or related Deepgram features + +The blog post should be good enough to publish as-is on a developer blog. + +### Screenshots + +If the example has any UI component (browser app, terminal output that a user would see, dashboard, chat interface) **and** Playwright is available: + +1. Start the example application in the background +2. Use Playwright to navigate to it and take a screenshot at **1240×760** pixels +3. Save it as `screenshot.png` in the example root directory +4. Embed it in README.md near the top + +For terminal-only examples that produce meaningful output, use `script` or similar to capture terminal output, save as a text file, or skip the screenshot — don't force a screenshot where it doesn't make sense. + +--- + +## Self-review loop + +After you believe the example is complete, **verify it yourself** by following the BLOG.md steps as if you were a new developer: + +1. Read BLOG.md from the top +2. Follow every step in order using `run_command` +3. If a step fails, doesn't work as described, or produces different output than documented: + - Fix the example code, the test, or the BLOG.md step — whichever is wrong + - Restart the self-review from the beginning +4. Only proceed to `AGENT_DONE` when the full BLOG.md walkthrough completes successfully end-to-end + +This loop catches the most common class of failures: code that works in isolation but whose instructions don't match reality. + +--- + +## Definition of done + +Every single one of these must be true before you output `AGENT_DONE`: + +- [ ] The example demonstrates the integration end to end — no shortcuts, no skipped steps, nothing avoided +- [ ] Unit tests written and passing (exit 0) +- [ ] Integration tests written and passing (exit 0) — these must make real calls, not mock Deepgram +- [ ] Browser/Playwright tests written and passing if the example has any UI component +- [ ] Deepgram integration is real — real API calls, real responses, real audio +- [ ] README is a clear quickstart guide: what it does (one sentence), prerequisites, every env var with description, exact run commands, expected output. Screenshot embedded if one was taken. Mocked components documented. +- [ ] BLOG.md is a complete, publishable developer walkthrough of building this example from scratch +- [ ] Self-review loop passed — BLOG.md steps followed end-to-end with run_command, all steps produce the documented output +- [ ] `screenshot.png` present in the example root if the example has any UI or meaningful visual output (1240×760, taken with Playwright) +- [ ] `.env.example` lists every required variable with no values +- [ ] Code is in `src/`, tests are in `tests/` +- [ ] No secrets are hardcoded anywhere + +Do not output `AGENT_DONE` until every item above is checked. If tests are failing, keep working. If something is partially implemented, finish it. + +When all of the above are true, output the following and nothing else: + +``` +AGENT_DONE +``` diff --git a/.github/workflows/claude-mentions.yml b/.github/workflows/claude-mentions.yml deleted file mode 100644 index 5078b4c..0000000 --- a/.github/workflows/claude-mentions.yml +++ /dev/null @@ -1,130 +0,0 @@ -name: Claude — Respond to @mentions - -on: - issue_comment: - types: [created] - pull_request_review_comment: - types: [created] - -jobs: - respond: - # Only fire when the comment mentions @claude - if: contains(github.event.comment.body, '@claude') - runs-on: ubuntu-latest - permissions: - contents: write - pull-requests: write - issues: write - id-token: write - - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - with: - fetch-depth: 0 - - - name: Configure git - run: | - git config user.name "examples-bot" - git config user.email "noreply@deepgram.com" - - - name: Check actor is a team member - id: auth - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - USERNAME="${{ github.event.comment.user.login }}" - # Skip bots - if [[ "$USERNAME" == *"[bot]"* ]] || [[ "$USERNAME" == "github-actions" ]]; then - echo "allowed=false" >> $GITHUB_OUTPUT - exit 0 - fi - # Check Deepgram org membership OR repo write permission — before any AI engagement - IS_ORG_MEMBER=false - if gh api "orgs/deepgram/members/${USERNAME}" -i 2>/dev/null | head -1 | grep -q "204"; then - IS_ORG_MEMBER=true - fi - PERM=$(gh api "repos/${{ github.repository }}/collaborators/${USERNAME}/permission" \ - --jq '.permission' 2>/dev/null || echo "none") - if [[ "$IS_ORG_MEMBER" == "true" || "$PERM" == "write" || "$PERM" == "maintain" || "$PERM" == "admin" ]]; then - echo "allowed=true" >> $GITHUB_OUTPUT - echo "$USERNAME allowed (org=$IS_ORG_MEMBER repo=$PERM)" - else - echo "allowed=false" >> $GITHUB_OUTPUT - echo "$USERNAME not allowed — silently exiting" - fi - - - name: Get context - id: ctx - if: steps.auth.outputs.allowed == 'true' - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - # Resolve PR/issue number from the comment URL - # issue_comment.issue_url works for both issues and PRs - ISSUE_NUM=$(echo "${{ github.event.issue.number }}" | tr -d '"') - echo "issue_number=$ISSUE_NUM" >> $GITHUB_OUTPUT - - # Is this on a PR? - PR_DATA=$(gh pr view "$ISSUE_NUM" --repo ${{ github.repository }} \ - --json number,headRefName 2>/dev/null || echo "") - if [ -n "$PR_DATA" ]; then - BRANCH=$(echo "$PR_DATA" | jq -r '.headRefName') - echo "is_pr=true" >> $GITHUB_OUTPUT - echo "branch=$BRANCH" >> $GITHUB_OUTPUT - git fetch origin "$BRANCH" 2>/dev/null || true - git checkout "$BRANCH" 2>/dev/null || true - else - echo "is_pr=false" >> $GITHUB_OUTPUT - fi - - - name: Respond - if: steps.auth.outputs.allowed == 'true' - uses: anthropics/claude-code-action@b47fd721da662d48c5680e154ad16a73ed74d2e0 # v1.0.93 - env: - KAPA_API_KEY: ${{ secrets.KAPA_API_KEY }} - KAPA_PROJECT_ID: ${{ vars.KAPA_PROJECT_ID }} - DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }} - with: - anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} - github_token: ${{ secrets.GITHUB_TOKEN }} - mode: agent - model: claude-opus-4-6 - allowed_tools: "Bash,Read,Write,Edit,Glob,Grep,WebSearch,WebFetch" - timeout_minutes: 20 - direct_prompt: | - A Deepgram org member has mentioned @claude in a comment on GitHub. - - **Comment from @${{ github.event.comment.user.login }}:** - ${{ github.event.comment.body }} - - **Context:** - - Issue/PR number: ${{ steps.ctx.outputs.issue_number }} - - Is a PR: ${{ steps.ctx.outputs.is_pr }} - - Branch: ${{ steps.ctx.outputs.branch }} - - Repository: ${{ github.repository }} - - Comment URL: ${{ github.event.comment.html_url }} - - Your job is to act on the request in the comment. Read the relevant code, - make any requested changes, and reply with a comment explaining what you did - (or why you can't do it). - - If the request involves editing files: - - Make the changes - - Commit them to the PR branch - - Reply with a summary - - If the request is a question: - - Answer it directly in a comment - - Always post a reply comment to close the loop. Replies are posted - as github-actions[bot] (the GITHUB_TOKEN identity) — do NOT sign - with the name of the person who asked the question. The reply - should make it clear Claude acted on the request: - ```bash - gh pr comment ${{ steps.ctx.outputs.issue_number }} \ - --repo ${{ github.repository }} \ - --body "**Claude:** your response here" - ``` - - ⛔ Never modify files under .github/ - ⛔ Never post a reply that sounds like it came from @${{ github.event.comment.user.login }} diff --git a/.github/workflows/engineer.yml b/.github/workflows/engineer.yml deleted file mode 100644 index 48debb1..0000000 --- a/.github/workflows/engineer.yml +++ /dev/null @@ -1,179 +0,0 @@ -name: Engineer - -# Builds new examples from the queue, runs tests, and opens the PR. -# The engineer does its own Kapa research — no separate researcher step needed. -# Only opens a PR when tests pass (or documents what failed after one fix attempt). - -on: - issues: - types: [labeled] # Fires when action:generate or action:research is applied - schedule: - - cron: '27 */4 * * *' # Fallback sweep every 4h (catches missed label events) - workflow_dispatch: - inputs: - issue_number: - description: 'Queue issue number to build (optional)' - required: false - -jobs: - run: - if: > - github.event_name == 'schedule' || - github.event_name == 'workflow_dispatch' || - (github.event_name == 'issues' && - (contains(github.event.issue.labels.*.name, 'action:generate') || - contains(github.event.issue.labels.*.name, 'action:research'))) - runs-on: ubuntu-latest - permissions: - contents: write - pull-requests: write - actions: write - issues: write - statuses: write - id-token: write - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - with: - fetch-depth: 0 - - - name: Configure git - run: | - git config user.name "examples-bot" - git config user.email "noreply@deepgram.com" - - - uses: pnpm/action-setup@b906affcce14559ad1aafd4ab0e942779e9f58b1 # v4 - with: - version: latest - - - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 - with: - node-version: '20' - - - name: Install Bun - uses: oven-sh/setup-bun@0c5077e51419868618aeaa5fe8019c62421857d6 # v2 - - - name: Install Deno - uses: denoland/setup-deno@667a34cdef165d8d2b2e98dde39547c9daac7282 # v2.0.4 - with: - deno-version: v2.x - - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 - with: - python-version: '3.11' - - - uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5 - with: - go-version: '1.22' - - - name: Check actor is a team member - id: auth - if: github.event_name == 'issues' - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - ACTOR="${{ github.actor }}" - if [[ "$ACTOR" == *"[bot]"* ]] || [[ "$ACTOR" == "github-actions" ]]; then - echo "allowed=true" >> $GITHUB_OUTPUT; exit 0 - fi - IS_ORG_MEMBER=false - if gh api "orgs/deepgram/members/${ACTOR}" -i 2>/dev/null | head -1 | grep -q "204"; then - IS_ORG_MEMBER=true - fi - PERM=$(gh api "repos/${{ github.repository }}/collaborators/${ACTOR}/permission" \ - --jq '.permission' 2>/dev/null || echo "none") - if [[ "$IS_ORG_MEMBER" == "true" || "$PERM" == "write" || "$PERM" == "maintain" || "$PERM" == "admin" ]]; then - echo "allowed=true" >> $GITHUB_OUTPUT - else - echo "allowed=false" >> $GITHUB_OUTPUT - echo "Actor $ACTOR not a Deepgram org member or repo collaborator — silently exiting" - fi - - - name: Get date - id: date - run: echo "date=$(date -u +%Y-%m-%d)" >> $GITHUB_OUTPUT - - - name: Fetch latest Deepgram SDK versions - id: sdk - run: | - latest() { curl -sf "https://api.github.com/repos/deepgram/$1/releases/latest" | jq -r '.tag_name // "unknown"'; } - echo "python=$(latest deepgram-python-sdk)" >> $GITHUB_OUTPUT - echo "js=$(latest deepgram-js-sdk)" >> $GITHUB_OUTPUT - echo "go=$(latest deepgram-go-sdk)" >> $GITHUB_OUTPUT - echo "java=$(latest deepgram-java-sdk)" >> $GITHUB_OUTPUT - echo "rust=$(latest deepgram-rust-sdk)" >> $GITHUB_OUTPUT - echo "dotnet=$(latest deepgram-dotnet-sdk)" >> $GITHUB_OUTPUT - echo "cli=$(latest cli)" >> $GITHUB_OUTPUT - cat $GITHUB_OUTPUT | grep -E "^(python|js|go|java|rust|dotnet|cli)=" - - - name: Check back pressure - id: backpressure - if: steps.auth.outputs.allowed != 'false' - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - READY=$(gh pr list --repo ${{ github.repository }} --state open \ - --label "status:review-passed" \ - --json number --jq 'length' 2>/dev/null || echo "0") - echo "PRs ready to merge: $READY" - if [ "$READY" -ge 5 ]; then - echo "blocked=true" >> $GITHUB_OUTPUT - echo "⏸ Back pressure: $READY PRs waiting for merge — skipping" - else - echo "blocked=false" >> $GITHUB_OUTPUT - echo "✓ Pipeline has capacity ($READY/5 slots used)" - fi - - - name: Build, test, and open PR - if: steps.auth.outputs.allowed != 'false' && steps.backpressure.outputs.blocked != 'true' - uses: anthropics/claude-code-action@b47fd721da662d48c5680e154ad16a73ed74d2e0 # v1.0.93 - with: - anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} - github_token: ${{ secrets.GITHUB_TOKEN }} - mode: agent - model: claude-opus-4-6 - allowed_tools: "Bash,Read,Write,Edit,Glob,Grep,WebSearch,WebFetch" - timeout_minutes: 60 - direct_prompt: | - Read and execute instructions/engineer.md. - - Context: - - Today's date: ${{ steps.date.outputs.date }} - - Repository: ${{ github.repository }} - - Trigger: ${{ github.event_name }} - - Issue number (if triggered by issue): ${{ github.event.issue.number }} - - Manual issue override: ${{ inputs.issue_number }} - - REQUIRED — use EXACTLY these Deepgram SDK versions, no older: - - Python: deepgram-sdk==${{ steps.sdk.outputs.python }} (requirements.txt) - - JavaScript: @deepgram/sdk@${{ steps.sdk.outputs.js }} (package.json) - - Go: ${{ steps.sdk.outputs.go }} (go.mod) - - Java: ${{ steps.sdk.outputs.java }} (pom.xml/build.gradle) - - Rust: ${{ steps.sdk.outputs.rust }} (Cargo.toml) - - .NET: ${{ steps.sdk.outputs.dotnet }} (*.csproj) - - CLI: ${{ steps.sdk.outputs.cli }} - - Pin Python with == not >=. Pin JS with the exact version (no ^ or ~). - env: - KAPA_API_KEY: ${{ secrets.KAPA_API_KEY }} - KAPA_PROJECT_ID: ${{ vars.KAPA_PROJECT_ID }} - DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }} - TWILIO_ACCOUNT_SID: ${{ secrets.TWILIO_ACCOUNT_SID }} - TWILIO_AUTH_TOKEN: ${{ secrets.TWILIO_AUTH_TOKEN }} - TWILIO_PHONE_NUMBER: ${{ secrets.TWILIO_PHONE_NUMBER }} - LIVEKIT_URL: ${{ secrets.LIVEKIT_URL }} - LIVEKIT_API_KEY: ${{ secrets.LIVEKIT_API_KEY }} - LIVEKIT_API_SECRET: ${{ secrets.LIVEKIT_API_SECRET }} - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - DISCORD_BOT_TOKEN: ${{ secrets.DISCORD_BOT_TOKEN }} - DISCORD_CLIENT_ID: ${{ secrets.DISCORD_CLIENT_ID }} - VONAGE_APPLICATION_ID: ${{ secrets.VONAGE_APPLICATION_ID }} - VONAGE_PRIVATE_KEY: ${{ secrets.VONAGE_PRIVATE_KEY }} - DAILY_API_KEY: ${{ secrets.DAILY_API_KEY }} - PIPECAT_API_KEY: ${{ secrets.PIPECAT_API_KEY }} - SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} - SLACK_APP_TOKEN: ${{ secrets.SLACK_APP_TOKEN }} - TELEGRAM_BOT_TOKEN: ${{ secrets.TELEGRAM_BOT_TOKEN }} - ZOOM_ACCOUNT_ID: ${{ secrets.ZOOM_ACCOUNT_ID }} - ZOOM_CLIENT_ID: ${{ secrets.ZOOM_CLIENT_ID }} - ZOOM_CLIENT_SECRET: ${{ secrets.ZOOM_CLIENT_SECRET }} - ZOOM_WEBHOOK_SECRET_TOKEN: ${{ secrets.ZOOM_WEBHOOK_SECRET_TOKEN }} diff --git a/.github/workflows/engineering.yml b/.github/workflows/engineering.yml new file mode 100644 index 0000000..fcf1463 --- /dev/null +++ b/.github/workflows/engineering.yml @@ -0,0 +1,489 @@ +name: Engineering Pipeline + +# Unified pipeline for all engineering work in this repo. +# +# build — triggered by type:suggestion label, cron sweep, or dispatch. +# Plans, builds, and tests new/modified examples in a Docker +# sandbox with a neurosymbolic agent loop. PRs are opened for +# human review before anything merges. +# +# engineering — triggered when a Deepgram org member @mentions @claude in any +# issue or PR comment. Full unrestricted access to the entire repo. +# Acts on the request, commits if needed, and replies in the thread. +# +# Both jobs gate on Deepgram org membership before spending any AI budget. + +on: + issues: + types: [labeled] + issue_comment: + types: [created] + pull_request_review_comment: + types: [created] + schedule: + - cron: '0 */4 * * *' # Every 4 hours — sweep for unprocessed suggestions + workflow_dispatch: + inputs: + issue_number: + description: 'Issue number to process (leave blank for auto-discovery)' + required: false + +jobs: + + # ── Build: create or modify examples from issue suggestions ───────────────── + build: + if: | + (github.event_name == 'issues' && github.event.label.name == 'type:suggestion') || + github.event_name == 'schedule' || + github.event_name == 'workflow_dispatch' + runs-on: ubuntu-latest + timeout-minutes: 120 + + concurrency: + group: build-${{ github.event.issue.number || 'sweep' }} + cancel-in-progress: false + + permissions: + contents: write + pull-requests: write + issues: write + + steps: + - name: Checkout repo + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 + + # ------------------------------------------------------------------ + # 1. Resolve which issue to work on + # - label event → use the triggering issue + # - dispatch+input → use the provided issue number + # - schedule/bare dispatch → auto-discover oldest unprocessed suggestion + # ------------------------------------------------------------------ + - name: Resolve issue + id: issue + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + EVENT="${{ github.event_name }}" + INPUT_NUM="${{ inputs.issue_number }}" + + if [ "$EVENT" = "issues" ]; then + number="${{ github.event.issue.number }}" + elif [ -n "$INPUT_NUM" ]; then + number="$INPUT_NUM" + else + # Auto-discover: oldest open type:suggestion with no build started yet. + # is injected into the issue comment when a PR is + # opened, so its presence means a build is already in progress or done. + result=$(gh issue list \ + --label "type:suggestion" \ + --state open \ + --json number \ + --limit 50 | python3 -c " +import json, sys, subprocess + +issues = json.load(sys.stdin) +numbers = [i['number'] for i in reversed(issues)] # oldest first + +for num in numbers: + r = subprocess.run( + ['gh', 'issue', 'view', str(num), + '--repo', '${{ github.repository }}', + '--json', 'comments'], + capture_output=True, text=True + ) + comments = json.loads(r.stdout).get('comments', []) + if any('' in c.get('body', '') for c in comments): + continue # build already started or completed + print(json.dumps({'number': num})) + break +else: + print('SKIP') +") + if [ "$result" = "SKIP" ] || [ -z "$result" ]; then + echo "No unprocessed suggestions found." + echo "skip=true" >> "$GITHUB_OUTPUT" + exit 0 + fi + number=$(echo "$result" | python3 -c "import json,sys; print(json.load(sys.stdin)['number'])") + fi + + # Fetch author, body, and labels in one call + issue_json=$(gh issue view "$number" --json author,body,labels) + author=$(echo "$issue_json" | python3 -c "import json,sys; print(json.load(sys.stdin)['author']['login'])") + body=$(echo "$issue_json" | python3 -c "import json,sys; print(json.load(sys.stdin)['body'])") + has_suggestion=$(echo "$issue_json" | python3 -c " +import json, sys +labels = {l['name'] for l in json.load(sys.stdin)['labels']} +print('true' if 'type:suggestion' in labels else 'false') +") + + # ------------------------------------------------------------------ + # Authz: build if any of the following are true: + # 1. Author is a bot (automated suggestions) + # 2. Author is a Deepgram org member + # 3. Issue carries the type:suggestion label — only users with write + # access can apply labels, so its presence means an org member + # has explicitly approved this external issue for building. + # This is the manual approval path for non-org-member issues. + # ------------------------------------------------------------------ + if [[ "$author" != *"[bot]"* ]] && [[ "$has_suggestion" != "true" ]]; then + http_line=$(gh api "orgs/deepgram/members/${author}" -i 2>/dev/null | head -1) + if ! echo "$http_line" | grep -q "204"; then + echo "Issue #${number} author '${author}' is not a Deepgram org member and has no label approval. Skipping." + echo "skip=true" >> "$GITHUB_OUTPUT" + exit 0 + fi + fi + + echo "number=$number" >> "$GITHUB_OUTPUT" + # Multiline body — use unique heredoc delimiter + echo "body<> "$GITHUB_OUTPUT" + echo "$body" >> "$GITHUB_OUTPUT" + echo "ISSUE_BODY_EOF" >> "$GITHUB_OUTPUT" + + - name: Skip if nothing to build + if: steps.issue.outputs.skip == 'true' + run: echo "Skipping — no eligible issue to build." + + # ------------------------------------------------------------------ + # 2. Extract secret names (not values) from the secrets context + # ------------------------------------------------------------------ + - name: Extract secret names + id: secret_names + if: steps.issue.outputs.skip != 'true' + env: + ALL_SECRETS: ${{ toJSON(secrets) }} + run: | + echo "$ALL_SECRETS" | python3 -c " + import json, sys + names = list(json.load(sys.stdin).keys()) + # Strip the auto-injected GitHub token — agent doesn't need it + names = [n for n in names if n.upper() not in ('GITHUB_TOKEN',)] + print(','.join(names)) + " > /tmp/secret_names.txt + echo "names=$(cat /tmp/secret_names.txt)" >> "$GITHUB_OUTPUT" + + # ------------------------------------------------------------------ + # 3. Determine next example number (used only if action=new) + # ------------------------------------------------------------------ + - name: Determine example number + id: example_number + if: steps.issue.outputs.skip != 'true' + run: | + number=$(python3 .github/scripts/next_example_number.py \ + --examples-dir examples) + echo "number=$number" >> "$GITHUB_OUTPUT" + + # ------------------------------------------------------------------ + # 4. Install Python dependencies + # ------------------------------------------------------------------ + - name: Install Python dependencies + if: steps.issue.outputs.skip != 'true' + run: pip install anthropic --quiet + + # ------------------------------------------------------------------ + # 5. Planning phase — detect runtime, slug, action, required secrets + # ------------------------------------------------------------------ + - name: Plan example + id: plan + if: steps.issue.outputs.skip != 'true' + env: + ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} + ISSUE_BODY: ${{ steps.issue.outputs.body }} + ISSUE_NUMBER: ${{ steps.issue.outputs.number }} + EXAMPLE_NUMBER: ${{ steps.example_number.outputs.number }} + SECRET_NAMES: ${{ steps.secret_names.outputs.names }} + EXAMPLES_DIR: ${{ github.workspace }}/examples + run: | + plan=$(python3 .github/scripts/plan_agent.py) + echo "plan=$plan" >> "$GITHUB_OUTPUT" + + # Extract fields for subsequent steps + echo "action=$(echo "$plan" | python3 -c "import json,sys; print(json.load(sys.stdin).get('action','new'))")" >> "$GITHUB_OUTPUT" + echo "runtime=$(echo "$plan" | python3 -c "import json,sys; print(json.load(sys.stdin)['runtime'])")" >> "$GITHUB_OUTPUT" + echo "slug=$(echo "$plan" | python3 -c "import json,sys; print(json.load(sys.stdin)['slug'])")" >> "$GITHUB_OUTPUT" + echo "docker_image=$(echo "$plan" | python3 -c "import json,sys; print(json.load(sys.stdin)['docker_image'])")" >> "$GITHUB_OUTPUT" + echo "required_secrets=$(echo "$plan" | python3 -c "import json,sys; print(','.join(json.load(sys.stdin)['required_secrets']))")" >> "$GITHUB_OUTPUT" + # workspace_subdir is NNN-slug for new, existing dir name for modify + echo "workspace_subdir=$(echo "$plan" | python3 -c "import json,sys; print(json.load(sys.stdin)['workspace_subdir'])")" >> "$GITHUB_OUTPUT" + + # ------------------------------------------------------------------ + # 6. Build filtered env file (only required secrets, values injected) + # ------------------------------------------------------------------ + - name: Build sandbox env file + if: steps.issue.outputs.skip != 'true' + env: + ALL_SECRETS: ${{ toJSON(secrets) }} + REQUIRED_SECRETS: ${{ steps.plan.outputs.required_secrets }} + run: | + python3 .github/scripts/filter_secrets.py \ + --required "$REQUIRED_SECRETS" \ + --secrets-json "$ALL_SECRETS" \ + > /tmp/sandbox.env + + echo "Env file written with $(wc -l < /tmp/sandbox.env) secrets" + + # ------------------------------------------------------------------ + # 7. Pull Docker image + # ------------------------------------------------------------------ + - name: Pull sandbox image + if: steps.issue.outputs.skip != 'true' + run: docker pull ${{ steps.plan.outputs.docker_image }} + + # ------------------------------------------------------------------ + # 8. Run the agent build loop + # ------------------------------------------------------------------ + - name: Run agent + if: steps.issue.outputs.skip != 'true' + env: + ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} + ISSUE_BODY: ${{ steps.issue.outputs.body }} + ISSUE_NUMBER: ${{ steps.issue.outputs.number }} + EXAMPLE_NUMBER: ${{ steps.example_number.outputs.number }} + EXAMPLE_SLUG: ${{ steps.plan.outputs.slug }} + WORKSPACE_ACTION: ${{ steps.plan.outputs.action }} + DOCKER_IMAGE: ${{ steps.plan.outputs.docker_image }} + WORKSPACE_DIR: ${{ github.workspace }}/examples/${{ steps.plan.outputs.workspace_subdir }} + EXAMPLES_DIR: ${{ github.workspace }}/examples + BUILD_LOG: /tmp/build-log.md + run: python3 .github/scripts/run_agent.py + + # ------------------------------------------------------------------ + # 9. Commit the output to a branch + # ------------------------------------------------------------------ + - name: Commit and push example branch + id: branch + if: steps.issue.outputs.skip != 'true' + env: + WORKSPACE_SUBDIR: ${{ steps.plan.outputs.workspace_subdir }} + WORKSPACE_ACTION: ${{ steps.plan.outputs.action }} + ISSUE_NUMBER: ${{ steps.issue.outputs.number }} + run: | + if [ "$WORKSPACE_ACTION" = "modify" ]; then + branch="example/update-${WORKSPACE_SUBDIR}" + commit_subject="update ${WORKSPACE_SUBDIR}" + else + branch="example/${WORKSPACE_SUBDIR}" + commit_subject="add ${WORKSPACE_SUBDIR}" + fi + echo "branch=$branch" >> "$GITHUB_OUTPUT" + + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + + git checkout -b "$branch" + git add "examples/${WORKSPACE_SUBDIR}/" + + git commit -m "feat(examples): ${commit_subject} + + Relates to #${ISSUE_NUMBER}" + + git push origin "$branch" + + # ------------------------------------------------------------------ + # 10. Open the PR + # ------------------------------------------------------------------ + - name: Open pull request + id: pr + if: steps.issue.outputs.skip != 'true' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + WORKSPACE_SUBDIR: ${{ steps.plan.outputs.workspace_subdir }} + RUNTIME: ${{ steps.plan.outputs.runtime }} + ISSUE_NUMBER: ${{ steps.issue.outputs.number }} + WORKSPACE_ACTION: ${{ steps.plan.outputs.action }} + run: | + if [ "$WORKSPACE_ACTION" = "modify" ]; then + pr_title="feat(examples): update ${WORKSPACE_SUBDIR}" + action_label="Updated" + else + pr_title="feat(examples): add ${WORKSPACE_SUBDIR}" + action_label="New" + fi + + body_file=$(mktemp) + { + printf "Closes #%s\n\n" "$ISSUE_NUMBER" + printf "Auto-generated from issue suggestion by the engineering pipeline.\n\n" + printf "**Action:** %s\n" "$action_label" + printf "**Runtime:** \`%s\`\n" "$RUNTIME" + printf "**Example:** \`%s\`\n\n" "$WORKSPACE_SUBDIR" + printf -- "---\n\n## Build log\n\n" + head -c 60000 /tmp/build-log.md 2>/dev/null || printf "No build log found.\n" + } > "$body_file" + + url=$(gh pr create \ + --title "$pr_title" \ + --body-file "$body_file" \ + --base main \ + --label "type:example" \ + --label "automated") + echo "url=$url" >> "$GITHUB_OUTPUT" + + # ------------------------------------------------------------------ + # 11. Comment on the issue linking the PR + # ------------------------------------------------------------------ + - name: Comment on issue + if: steps.issue.outputs.skip != 'true' + uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7 + env: + ISSUE_NUMBER: ${{ steps.issue.outputs.number }} + PR_URL: ${{ steps.pr.outputs.url }} + with: + script: | + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: parseInt(process.env.ISSUE_NUMBER), + body: `✅ Example built and PR opened: ${process.env.PR_URL}\n\nReview the PR before merging.\n\n`, + }); + + # ------------------------------------------------------------------ + # Cleanup on failure + # ------------------------------------------------------------------ + - name: Comment on issue (failure) + if: failure() && steps.issue.outputs.skip != 'true' + uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7 + env: + ISSUE_NUMBER: ${{ steps.issue.outputs.number }} + with: + script: | + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: parseInt(process.env.ISSUE_NUMBER), + body: `❌ Build failed. Check the [workflow run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for details.`, + }); + + + # ── Engineering: act on @claude mentions anywhere in the repo ─────────────── + engineering: + if: | + (github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) || + (github.event_name == 'issue_comment' && contains(github.event.issue.html_url, '/pull/') && contains(github.event.comment.body, '@claude')) + runs-on: ubuntu-latest + timeout-minutes: 60 + + concurrency: + group: engineering-${{ github.event.issue.number || github.event.pull_request.number }} + cancel-in-progress: false + + permissions: + contents: write + pull-requests: write + issues: write + id-token: write + + steps: + # ------------------------------------------------------------------ + # Auth: commenter must be a Deepgram org member. + # Bots are explicitly excluded — we don't want bot-to-bot loops. + # ------------------------------------------------------------------ + - name: Check commenter org membership + id: auth + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + COMMENTER="${{ github.event.comment.user.login }}" + + # Never respond to bots — prevents feedback loops + if [[ "$COMMENTER" == *"[bot]"* ]]; then + echo "allowed=false" >> "$GITHUB_OUTPUT" + echo "Commenter is a bot — skipping" + exit 0 + fi + + http_line=$(gh api "orgs/deepgram/members/${COMMENTER}" -i 2>/dev/null | head -1) + if echo "$http_line" | grep -q "204"; then + echo "allowed=true" >> "$GITHUB_OUTPUT" + echo "$COMMENTER is a Deepgram org member — proceeding" + else + echo "allowed=false" >> "$GITHUB_OUTPUT" + echo "$COMMENTER is not a Deepgram org member — ignoring" + fi + + # ------------------------------------------------------------------ + # Resolve context: which issue/PR, which branch to check out + # ------------------------------------------------------------------ + - name: Resolve context + id: ctx + if: steps.auth.outputs.allowed == 'true' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + EVENT="${{ github.event_name }}" + + if [ "$EVENT" = "pull_request_review_comment" ]; then + number="${{ github.event.pull_request.number }}" + branch="${{ github.event.pull_request.head.ref }}" + context_type="PR" + else + # issue_comment — could be on an issue or a PR + number="${{ github.event.issue.number }}" + branch=$(gh pr view "$number" --json headRefName -q .headRefName 2>/dev/null || true) + if [ -n "$branch" ]; then + context_type="PR" + else + branch="main" + context_type="issue" + fi + fi + + echo "number=$number" >> "$GITHUB_OUTPUT" + echo "branch=$branch" >> "$GITHUB_OUTPUT" + echo "context_type=$context_type" >> "$GITHUB_OUTPUT" + + - name: Checkout + if: steps.auth.outputs.allowed == 'true' + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 + with: + ref: ${{ steps.ctx.outputs.branch }} + fetch-depth: 0 + + - name: Configure git + if: steps.auth.outputs.allowed == 'true' + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + + # ------------------------------------------------------------------ + # Run the engineering agent — full unrestricted access to the repo. + # Acts on whatever was asked, commits if needed, replies in thread. + # ------------------------------------------------------------------ + - name: Run engineering agent + if: steps.auth.outputs.allowed == 'true' + uses: anthropics/claude-code-action@b47fd721da662d48c5680e154ad16a73ed74d2e0 # v1.0.93 + with: + anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} + github_token: ${{ secrets.GITHUB_TOKEN }} + mode: agent + model: claude-opus-4-6 + allowed_tools: "Bash,Read,Write,Edit,Glob,Grep,WebSearch,WebFetch" + timeout_minutes: 55 + direct_prompt: | + @${{ github.event.comment.user.login }} mentioned @claude on ${{ steps.ctx.outputs.context_type }} #${{ steps.ctx.outputs.number }}: + + "${{ github.event.comment.body }}" + + --- + + Act on this request. You have full access to the entire repository — read any file, write code, run tests, install dependencies, commit changes, push branches, open or update PRs. Do not hold back capability. + + Context: + - Repository: ${{ github.repository }} + - ${{ steps.ctx.outputs.context_type }}: #${{ steps.ctx.outputs.number }} + - Branch checked out: ${{ steps.ctx.outputs.branch }} + - Comment URL: ${{ github.event.comment.html_url }} + + When you are done, post a reply comment on #${{ steps.ctx.outputs.number }} summarising what you did (or explaining clearly why you couldn't do it). Use this exact command to post: + + ```bash + gh issue comment ${{ steps.ctx.outputs.number }} \ + --repo ${{ github.repository }} \ + --body "YOUR REPLY HERE" + ``` + + ⛔ Never modify anything under .github/, .claude/, context7.json, or renovate.json — these are infrastructure files that manage the repo itself and must only change through deliberate human commits. + env: + DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }} + ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} diff --git a/.github/workflows/issues.yml b/.github/workflows/issues.yml new file mode 100644 index 0000000..c43419b --- /dev/null +++ b/.github/workflows/issues.yml @@ -0,0 +1,212 @@ +name: Issue Handler + +# Classifies and responds to issues — but ONLY from Deepgram org members, +# or external issues that an org member has explicitly approved by applying +# the type:suggestion label. +# +# External issues with no label are completely invisible to this workflow. +# No response is posted, no tokens are consumed. An org member must manually +# find it, review it, and apply type:suggestion before anything happens. +# +# @claude in an issue → this workflow (classify, respond, ask questions) +# @claude in a PR → engineering.yml (code changes, repo ops) + +on: + issues: + types: [opened, edited, labeled, reopened] + issue_comment: + types: [created] + schedule: + - cron: '*/5 * * * *' + workflow_dispatch: + +jobs: + handle: + name: Handle issue + if: | + github.event_name == 'schedule' || + github.event_name == 'workflow_dispatch' || + github.event_name == 'issues' || + (github.event_name == 'issue_comment' && + !contains(github.event.issue.html_url, '/pull/') && + contains(github.event.comment.body, '@claude')) + runs-on: ubuntu-latest + timeout-minutes: 10 + + concurrency: + group: issue-handler-${{ github.event.issue.number || 'sweep' }} + cancel-in-progress: false + + permissions: + issues: write + contents: read + + steps: + # ------------------------------------------------------------------ + # Resolve which issue to handle, and verify it's eligible. + # + # Eligibility rules (all paths): + # - Issue author is a Deepgram org member, OR + # - Issue carries the type:suggestion label (org member approved it) + # + # External issues with neither condition are silently skipped. + # No comment is posted. No tokens are consumed. + # ------------------------------------------------------------------ + - name: Resolve issue + id: issue + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + EVENT="${{ github.event_name }}" + + if [ "$EVENT" = "issues" ] || [ "$EVENT" = "issue_comment" ]; then + NUMBER="${{ github.event.issue.number }}" + + # @claude mention: gate on the commenter being an org member + if [ "$EVENT" = "issue_comment" ]; then + COMMENTER="${{ github.event.comment.user.login }}" + if [[ "$COMMENTER" == *"[bot]"* ]]; then + echo "skip=true" >> "$GITHUB_OUTPUT"; exit 0 + fi + http_line=$(gh api "orgs/deepgram/members/${COMMENTER}" -i 2>/dev/null | head -1) + if ! echo "$http_line" | grep -q "204"; then + echo "skip=true" >> "$GITHUB_OUTPUT" + echo "$COMMENTER is not a Deepgram org member — ignoring" + exit 0 + fi + fi + + # Issue event: check author org membership OR type:suggestion label + issue_json=$(gh issue view "$NUMBER" --json author,labels) + author=$(echo "$issue_json" | python3 -c "import json,sys; print(json.load(sys.stdin)['author']['login'])") + has_label=$(echo "$issue_json" | python3 -c " +import json, sys +labels = {l['name'] for l in json.load(sys.stdin)['labels']} +print('true' if 'type:suggestion' in labels else 'false') +") + + if [[ "$author" != *"[bot]"* ]] && [[ "$has_label" != "true" ]]; then + http_line=$(gh api "orgs/deepgram/members/${author}" -i 2>/dev/null | head -1) + if ! echo "$http_line" | grep -q "204"; then + echo "skip=true" >> "$GITHUB_OUTPUT" + echo "Issue #${NUMBER} author '${author}' is not a Deepgram org member and has no label approval — skipping" + exit 0 + fi + fi + + echo "number=$NUMBER" >> "$GITHUB_OUTPUT" + + else + # Cron/dispatch sweep: only pick up issues with type:suggestion label + # where our reply isn't already the last comment. + # Filtering by label is cheap and covers both org-member issues they + # labeled themselves and external issues an org member approved. + NUMBER=$(gh issue list \ + --repo "${{ github.repository }}" \ + --label "type:suggestion" \ + --state open \ + --limit 50 \ + --json number \ + --jq '.[].number' | python3 -c " +import sys, subprocess, json + +numbers = [int(l.strip()) for l in sys.stdin if l.strip()] +numbers.reverse() # oldest first + +for num in numbers: + result = subprocess.run( + ['gh', 'issue', 'view', str(num), + '--repo', '${{ github.repository }}', + '--json', 'comments,labels'], + capture_output=True, text=True + ) + data = json.loads(result.stdout) + + # Skip already-built issues + label_names = {l['name'] for l in data.get('labels', [])} + if {'type:example', 'automated'}.intersection(label_names): + continue + + comments = data.get('comments', []) + if not comments: + print(num) + break + if '' not in comments[-1].get('body', ''): + print(num) + break +" 2>/dev/null || true) + + if [ -z "$NUMBER" ]; then + echo "skip=true" >> "$GITHUB_OUTPUT" + echo "No eligible issues to handle" + else + echo "number=$NUMBER" >> "$GITHUB_OUTPUT" + echo "Processing issue #$NUMBER" + fi + fi + + - name: Skip if nothing to handle + if: steps.issue.outputs.skip == 'true' + run: echo "Skipping — no eligible issue." + + - name: Checkout + if: steps.issue.outputs.skip != 'true' + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 + + - name: Handle issue + if: steps.issue.outputs.skip != 'true' + uses: anthropics/claude-code-action@b47fd721da662d48c5680e154ad16a73ed74d2e0 # v1.0.93 + with: + anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} + github_token: ${{ secrets.GITHUB_TOKEN }} + mode: agent + model: claude-sonnet-4-6 + allowed_tools: "Bash" + timeout_minutes: 8 + direct_prompt: | + You are the issue handler for the deepgram/dx-examples repository — a collection + of working code examples showing Deepgram integrations. You represent the Deepgram + developer experience team. + + Read the full issue: + ```bash + gh issue view ${{ steps.issue.outputs.number }} \ + --repo ${{ github.repository }} \ + --json number,title,body,labels,comments,author + ``` + + **If `` is in the last comment and no new comments appear + after it — the issue is already handled. Do nothing and exit.** + + Otherwise read everything and respond. You do not need to rigidly classify — + read it as a person and respond naturally: + + - Example or modification request → acknowledge, confirm you understand what + they're after. If `type:suggestion` is not already on the issue, apply it: + `gh issue edit ${{ steps.issue.outputs.number }} --repo ${{ github.repository }} --add-label "type:suggestion"` + Then tell them it's queued for building. + + - Something broken in an existing example → acknowledge, ask for specifics if + needed (SDK version, error message, language). Don't over-ask — if the issue + already has everything, confirm it's noted. + + - A question → answer it directly. Look at relevant example code if helpful. + + - Vague or incomplete → ask one or two specific questions. Be precise about + what you need — don't say "please provide more info". + + End every response with `` on its own line (hidden marker): + + ```bash + gh issue comment ${{ steps.issue.outputs.number }} \ + --repo ${{ github.repository }} \ + --body "YOUR RESPONSE + + " + ``` + + Be concise. Be specific. Sound like a developer, not a support bot. + Do not start with "Great question!" or similar filler. + + ⛔ Do not modify any files. Do not open PRs. Do not merge anything. + ⛔ Do not modify anything under .github/, .claude/, context7.json, or renovate.json. diff --git a/.github/workflows/lead-fix.yml b/.github/workflows/lead-fix.yml deleted file mode 100644 index a601e13..0000000 --- a/.github/workflows/lead-fix.yml +++ /dev/null @@ -1,139 +0,0 @@ -name: Lead — Fix - -on: - pull_request: - types: [labeled] - schedule: - - cron: '57 */4 * * *' # Fallback sweep every 4h (event-driven via label trigger) - workflow_dispatch: - inputs: - pr_number: - description: 'PR number to fix' - required: false - -concurrency: - group: lead-fix-${{ github.event.pull_request.number || inputs.pr_number || 'sweep' }} - cancel-in-progress: false - -permissions: - contents: write - pull-requests: write - issues: write - statuses: write - actions: write - -jobs: - fix: - if: > - github.event_name == 'schedule' || - github.event_name == 'workflow_dispatch' || - github.event.label.name == 'status:fix-needed' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - with: - fetch-depth: 0 - ref: ${{ github.event.pull_request.head.ref || '' }} - - - name: Configure git - run: | - git config user.name "examples-bot" - git config user.email "noreply@deepgram.com" - - - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 - with: - node-version: '20' - - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 - with: - python-version: '3.11' - - - uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5 - with: - go-version: '1.22' - - - name: Get date - id: date - run: echo "date=$(date -u +%Y-%m-%d)" >> $GITHUB_OUTPUT - - - name: Fetch latest Deepgram SDK versions - id: sdk - run: | - latest() { curl -sf "https://api.github.com/repos/deepgram/$1/releases/latest" | jq -r '.tag_name // "unknown"'; } - echo "python=$(latest deepgram-python-sdk)" >> $GITHUB_OUTPUT - echo "js=$(latest deepgram-js-sdk)" >> $GITHUB_OUTPUT - echo "go=$(latest deepgram-go-sdk)" >> $GITHUB_OUTPUT - echo "java=$(latest deepgram-java-sdk)" >> $GITHUB_OUTPUT - echo "rust=$(latest deepgram-rust-sdk)" >> $GITHUB_OUTPUT - echo "dotnet=$(latest deepgram-dotnet-sdk)" >> $GITHUB_OUTPUT - echo "cli=$(latest cli)" >> $GITHUB_OUTPUT - cat $GITHUB_OUTPUT | grep -E "^(python|js|go|java|rust|dotnet|cli)=" - - - name: Check fix attempt limit - id: attempts - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - PR_NUM="${{ github.event.pull_request.number || inputs.pr_number }}" - if [ -n "$PR_NUM" ]; then - FIXES=$(git log --oneline --author="examples-bot" 2>/dev/null | grep "^[a-f0-9]* fix(" | wc -l | tr -d ' ') - [ "$FIXES" -ge 3 ] && echo "max_reached=true" >> $GITHUB_OUTPUT \ - || echo "max_reached=false" >> $GITHUB_OUTPUT - [ "$FIXES" -ge 3 ] && gh pr comment "$PR_NUM" \ - --body "@deepgram/devrel — fix agent made 3 attempts, still failing. Manual intervention needed." - else - echo "max_reached=false" >> $GITHUB_OUTPUT - fi - - - name: Run instruction - if: steps.attempts.outputs.max_reached != 'true' - uses: anthropics/claude-code-action@b47fd721da662d48c5680e154ad16a73ed74d2e0 # v1.0.93 - with: - anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} - github_token: ${{ secrets.GITHUB_TOKEN }} - mode: agent - model: claude-opus-4-6 - allowed_tools: "Bash,Read,Write,Edit,Glob,Grep,WebSearch,WebFetch" - timeout_minutes: 30 - direct_prompt: | - Read and execute instructions/lead-fix.md. - - Context: - - PR_NUMBER: ${{ github.event.pull_request.number || inputs.pr_number }} - - Today's date: ${{ steps.date.outputs.date }} - - Repository: ${{ github.repository }} - - Trigger: ${{ github.event_name }} - - REQUIRED SDK versions — upgrade any outdated pins as part of the fix: - - Python: deepgram-sdk==${{ steps.sdk.outputs.python }} - - JavaScript: @deepgram/sdk@${{ steps.sdk.outputs.js }} - - Go: ${{ steps.sdk.outputs.go }} - - Java: ${{ steps.sdk.outputs.java }} - - Rust: ${{ steps.sdk.outputs.rust }} - - .NET: ${{ steps.sdk.outputs.dotnet }} - - CLI: ${{ steps.sdk.outputs.cli }} - env: - KAPA_API_KEY: ${{ secrets.KAPA_API_KEY }} - KAPA_PROJECT_ID: ${{ vars.KAPA_PROJECT_ID }} - DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }} - TWILIO_ACCOUNT_SID: ${{ secrets.TWILIO_ACCOUNT_SID }} - TWILIO_AUTH_TOKEN: ${{ secrets.TWILIO_AUTH_TOKEN }} - TWILIO_PHONE_NUMBER: ${{ secrets.TWILIO_PHONE_NUMBER }} - LIVEKIT_URL: ${{ secrets.LIVEKIT_URL }} - LIVEKIT_API_KEY: ${{ secrets.LIVEKIT_API_KEY }} - LIVEKIT_API_SECRET: ${{ secrets.LIVEKIT_API_SECRET }} - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - DISCORD_BOT_TOKEN: ${{ secrets.DISCORD_BOT_TOKEN }} - DISCORD_CLIENT_ID: ${{ secrets.DISCORD_CLIENT_ID }} - VONAGE_APPLICATION_ID: ${{ secrets.VONAGE_APPLICATION_ID }} - VONAGE_PRIVATE_KEY: ${{ secrets.VONAGE_PRIVATE_KEY }} - DAILY_API_KEY: ${{ secrets.DAILY_API_KEY }} - PIPECAT_API_KEY: ${{ secrets.PIPECAT_API_KEY }} - SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} - SLACK_APP_TOKEN: ${{ secrets.SLACK_APP_TOKEN }} - TELEGRAM_BOT_TOKEN: ${{ secrets.TELEGRAM_BOT_TOKEN }} - ZOOM_ACCOUNT_ID: ${{ secrets.ZOOM_ACCOUNT_ID }} - ZOOM_CLIENT_ID: ${{ secrets.ZOOM_CLIENT_ID }} - ZOOM_CLIENT_SECRET: ${{ secrets.ZOOM_CLIENT_SECRET }} - ZOOM_WEBHOOK_SECRET_TOKEN: ${{ secrets.ZOOM_WEBHOOK_SECRET_TOKEN }} - diff --git a/.github/workflows/lead-review.yml b/.github/workflows/lead-review.yml deleted file mode 100644 index 2393ee1..0000000 --- a/.github/workflows/lead-review.yml +++ /dev/null @@ -1,148 +0,0 @@ -name: Lead — Review - -on: - pull_request: - types: [opened, synchronize, reopened] - paths: - - 'examples/**' - schedule: - - cron: '37 */4 * * *' # Fallback sweep every 4h (event-driven for new PRs) - workflow_dispatch: - inputs: - pr_number: - description: 'PR number to review' - required: false - -concurrency: - group: lead-review-${{ github.event.pull_request.number || inputs.pr_number || 'sweep' }} - cancel-in-progress: true - -permissions: - contents: read - pull-requests: write - issues: write - -jobs: - review: - if: > - github.event_name == 'schedule' || - github.event_name == 'workflow_dispatch' || - startsWith(github.event.pull_request.title, '[Example]') || - startsWith(github.event.pull_request.title, '[Fix]') || - contains(github.event.pull_request.labels.*.name, 'type:example') || - contains(github.event.pull_request.labels.*.name, 'type:fix') - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - with: - fetch-depth: 0 - - - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 - with: - node-version: '20' - - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 - with: - python-version: '3.11' - - - uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5 - with: - go-version: '1.22' - - - name: Configure git - run: | - git config user.name "examples-bot" - git config user.email "noreply@deepgram.com" - - - name: Check actor is a team member - id: auth - if: github.event_name == 'pull_request' - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - ACTOR="${{ github.actor }}" - # Bots are always allowed — they act on behalf of the system - if [[ "$ACTOR" == *"[bot]"* ]] || [[ "$ACTOR" == "github-actions" ]]; then - echo "allowed=true" >> $GITHUB_OUTPUT; exit 0 - fi - IS_ORG_MEMBER=false - if gh api "orgs/deepgram/members/${ACTOR}" -i 2>/dev/null | head -1 | grep -q "204"; then - IS_ORG_MEMBER=true - fi - PERM=$(gh api "repos/${{ github.repository }}/collaborators/${ACTOR}/permission" \ - --jq '.permission' 2>/dev/null || echo "none") - if [[ "$IS_ORG_MEMBER" == "true" || "$PERM" == "write" || "$PERM" == "maintain" || "$PERM" == "admin" ]]; then - echo "allowed=true" >> $GITHUB_OUTPUT - else - echo "allowed=false" >> $GITHUB_OUTPUT - echo "Actor $ACTOR not a Deepgram org member or repo collaborator — silently exiting" - fi - - - name: Get date - id: date - run: echo "date=$(date -u +%Y-%m-%d)" >> $GITHUB_OUTPUT - - - name: Fetch latest Deepgram SDK versions - id: sdk - run: | - latest() { curl -sf "https://api.github.com/repos/deepgram/$1/releases/latest" | jq -r '.tag_name // "unknown"'; } - echo "python=$(latest deepgram-python-sdk)" >> $GITHUB_OUTPUT - echo "js=$(latest deepgram-js-sdk)" >> $GITHUB_OUTPUT - echo "go=$(latest deepgram-go-sdk)" >> $GITHUB_OUTPUT - echo "java=$(latest deepgram-java-sdk)" >> $GITHUB_OUTPUT - echo "rust=$(latest deepgram-rust-sdk)" >> $GITHUB_OUTPUT - echo "dotnet=$(latest deepgram-dotnet-sdk)" >> $GITHUB_OUTPUT - echo "cli=$(latest cli)" >> $GITHUB_OUTPUT - cat $GITHUB_OUTPUT | grep -E "^(python|js|go|java|rust|dotnet|cli)=" - - - name: Run instruction - if: steps.auth.outputs.allowed != 'false' - uses: anthropics/claude-code-action@b47fd721da662d48c5680e154ad16a73ed74d2e0 # v1.0.93 - with: - anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} - github_token: ${{ secrets.GITHUB_TOKEN }} - mode: agent - model: claude-opus-4-6 - allowed_tools: "Bash,Read,Glob,Grep" - timeout_minutes: 30 - direct_prompt: | - Read and execute instructions/lead-review.md. - - Context: - - PR_NUMBER: ${{ github.event.pull_request.number || inputs.pr_number }} - - Today's date: ${{ steps.date.outputs.date }} - - Repository: ${{ github.repository }} - - Trigger: ${{ github.event_name }} - - REQUIRED SDK versions — flag any PR that uses an older version: - - Python: deepgram-sdk==${{ steps.sdk.outputs.python }} - - JavaScript: @deepgram/sdk@${{ steps.sdk.outputs.js }} - - Go: ${{ steps.sdk.outputs.go }} - - Java: ${{ steps.sdk.outputs.java }} - - Rust: ${{ steps.sdk.outputs.rust }} - - .NET: ${{ steps.sdk.outputs.dotnet }} - - CLI: ${{ steps.sdk.outputs.cli }} - env: - KAPA_API_KEY: ${{ secrets.KAPA_API_KEY }} - KAPA_PROJECT_ID: ${{ vars.KAPA_PROJECT_ID }} - DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }} - TWILIO_ACCOUNT_SID: ${{ secrets.TWILIO_ACCOUNT_SID }} - TWILIO_AUTH_TOKEN: ${{ secrets.TWILIO_AUTH_TOKEN }} - TWILIO_PHONE_NUMBER: ${{ secrets.TWILIO_PHONE_NUMBER }} - LIVEKIT_URL: ${{ secrets.LIVEKIT_URL }} - LIVEKIT_API_KEY: ${{ secrets.LIVEKIT_API_KEY }} - LIVEKIT_API_SECRET: ${{ secrets.LIVEKIT_API_SECRET }} - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - DISCORD_BOT_TOKEN: ${{ secrets.DISCORD_BOT_TOKEN }} - DISCORD_CLIENT_ID: ${{ secrets.DISCORD_CLIENT_ID }} - VONAGE_APPLICATION_ID: ${{ secrets.VONAGE_APPLICATION_ID }} - VONAGE_PRIVATE_KEY: ${{ secrets.VONAGE_PRIVATE_KEY }} - DAILY_API_KEY: ${{ secrets.DAILY_API_KEY }} - PIPECAT_API_KEY: ${{ secrets.PIPECAT_API_KEY }} - SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} - SLACK_APP_TOKEN: ${{ secrets.SLACK_APP_TOKEN }} - TELEGRAM_BOT_TOKEN: ${{ secrets.TELEGRAM_BOT_TOKEN }} - ZOOM_ACCOUNT_ID: ${{ secrets.ZOOM_ACCOUNT_ID }} - ZOOM_CLIENT_ID: ${{ secrets.ZOOM_CLIENT_ID }} - ZOOM_CLIENT_SECRET: ${{ secrets.ZOOM_CLIENT_SECRET }} - ZOOM_WEBHOOK_SECRET_TOKEN: ${{ secrets.ZOOM_WEBHOOK_SECRET_TOKEN }} diff --git a/.github/workflows/notify-docs.yml b/.github/workflows/notify-docs.yml index c68143b..9a8a4b3 100644 --- a/.github/workflows/notify-docs.yml +++ b/.github/workflows/notify-docs.yml @@ -1,7 +1,8 @@ -name: Notify docs — new example ready +name: Notify docs — example added or updated -# When a new example PR merges to main, create a [Suggestion] issue in deepgram-docs -# so the content-pm workflow can research and queue a tutorial guide for it. +# When any PR merges to main, check whether it touched an examples/ directory. +# For each example that changed, open an issue in deepgram/deepgram-docs with +# full context so a content writer can pick it up and turn it into a guide. # # Required secrets: # DOCS_PAT — PAT with issues:write scope on deepgram/deepgram-docs @@ -13,72 +14,123 @@ on: jobs: notify: - name: Create guide suggestion in deepgram-docs - if: | - github.event.pull_request.merged == true && - startsWith(github.event.pull_request.title, '[Example]') + name: Notify docs of changed examples + if: github.event.pull_request.merged == true runs-on: ubuntu-latest permissions: contents: read + steps: - name: Checkout uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 with: - fetch-depth: 0 + fetch-depth: 2 # need HEAD and HEAD~1 to detect new vs updated ref: main - - name: Create guide suggestion in deepgram-docs - env: - GH_TOKEN: ${{ secrets.DOCS_PAT }} - PR_NUMBER: ${{ github.event.pull_request.number }} - PR_TITLE: ${{ github.event.pull_request.title }} - PR_URL: ${{ github.event.pull_request.html_url }} + - name: Find changed example directories + id: examples run: | - set -euo pipefail - - # Find the example directory added or modified by this PR - EXAMPLE_DIR=$(git diff --name-only HEAD~1 HEAD \ + DIRS=$(git diff --name-only HEAD~1 HEAD \ | grep '^examples/' \ - | head -1 \ - | cut -d/ -f1-2) - - if [ -z "$EXAMPLE_DIR" ]; then - echo "No example directory found in diff — skipping" - exit 0 + | cut -d/ -f1-2 \ + | sort -u \ + | grep -E '^examples/[0-9]{3}-' || true) + + if [ -z "$DIRS" ]; then + echo "No example directories changed — skipping" + echo "dirs=" >> "$GITHUB_OUTPUT" + else + echo "Changed examples: $DIRS" + # Newlines -> space-separated for matrix-style iteration + echo "dirs=$(echo "$DIRS" | tr '\n' ' ')" >> "$GITHUB_OUTPUT" fi - SLUG=$(basename "$EXAMPLE_DIR") - echo "Example slug: $SLUG" - - # Read the example README for title - README_TITLE=$(head -3 "$EXAMPLE_DIR/README.md" 2>/dev/null \ - | grep '^#' | head -1 | sed 's/^# *//' || echo "$SLUG") - - # Create suggestion issue in deepgram-docs - gh issue create \ - --repo deepgram/deepgram-docs \ - --title "[Suggestion] Guide for ${SLUG}" \ - --label "type:suggestion" \ - --body "## What to write - -A step-by-step tutorial guide for the [${README_TITLE}](https://github.com/deepgram/dx-examples/tree/main/${EXAMPLE_DIR}) example. - -## Why this matters - -New example added in ${PR_URL} — a guide will help developers discover and follow along with it. - - - -## Source example - -- **Slug:** \`${SLUG}\` -- **Example:** [${README_TITLE}](https://github.com/deepgram/dx-examples/tree/main/${EXAMPLE_DIR}) -- **Merged PR:** ${PR_URL} - ---- -*Auto-created by notify-docs workflow after example merge.*" - - echo "Suggestion issue created in deepgram-docs for $SLUG" + - name: Create docs issues for each changed example + if: steps.examples.outputs.dirs != '' + env: + GH_TOKEN: ${{ secrets.DOCS_PAT }} + PR_URL: ${{ github.event.pull_request.html_url }} + PR_TITLE: ${{ github.event.pull_request.title }} + PR_NUMBER: ${{ github.event.pull_request.number }} + run: | + for EXAMPLE_DIR in ${{ steps.examples.outputs.dirs }}; do + SLUG=$(basename "$EXAMPLE_DIR") + + # Determine whether this is a new example or an update + if git show HEAD~1:"${EXAMPLE_DIR}/README.md" &>/dev/null; then + ACTION="updated" + ISSUE_TITLE="Tutorial update: ${SLUG}" + else + ACTION="added" + ISSUE_TITLE="Tutorial opportunity: ${SLUG}" + fi + + echo "Processing $SLUG ($ACTION)" + + # Build the issue body in Python — handles multi-line content safely + python3 - "$EXAMPLE_DIR" "$SLUG" "$ACTION" <<'PYEOF' +import sys, os, pathlib, textwrap + +example_dir, slug, action = sys.argv[1], sys.argv[2], sys.argv[3] +pr_url = os.environ["PR_URL"] +pr_title = os.environ["PR_TITLE"] +pr_num = os.environ["PR_NUMBER"] + +base = pathlib.Path(example_dir) +verb = "added" if action == "added" else "updated" +gh_url = f"https://github.com/deepgram/dx-examples/tree/main/{example_dir}" + +# Read example files — truncate generously to stay under GH's 65k body limit +def read(path, limit=12000): + p = base / path + if not p.exists(): + return None + txt = p.read_text(errors="ignore") + if len(txt) > limit: + txt = txt[:limit] + f"\n\n… *(truncated — full file at {gh_url}/{path})*" + return txt + +readme = read("README.md") +blog = read("BLOG.md", limit=20000) + +lines = [] +lines.append(f"A Deepgram example was **{verb}** in PR #{pr_num}: [{pr_title}]({pr_url}).\n") +lines.append(f"**Example:** [`{slug}`]({gh_url})\n") +lines.append("---\n") +lines.append("## What a writer should do with this\n") +lines.append(textwrap.dedent(f"""\ + Use the BLOG.md below as the basis for a developer tutorial. It walks through + the build step by step. The README is the quickstart reference. Between them, + everything a developer needs is already here — your job is to edit for voice, + add any extra context for the docs audience, and publish. +""")) +lines.append("---\n") + +if readme: + lines.append("## README (quickstart guide)\n") + lines.append(f"```markdown\n{readme}\n```\n") + +if blog: + lines.append("## BLOG.md (development narrative — use as tutorial draft)\n") + lines.append(f"```markdown\n{blog}\n```\n") +elif not blog: + lines.append("## BLOG.md\n") + lines.append("*(No BLOG.md found in this example — write the tutorial from scratch using the README and source code.)*\n") + +lines.append("---\n") +lines.append(f"*Auto-created by notify-docs workflow. Source: [dx-examples PR #{pr_num}]({pr_url})*") + +body_file = pathlib.Path("/tmp/docs-issue-body.md") +body_file.write_text("\n".join(lines)) +print(f"Body written ({body_file.stat().st_size} bytes)") +PYEOF + + # Create the issue in deepgram-docs + gh issue create \ + --repo deepgram/deepgram-docs \ + --title "$ISSUE_TITLE" \ + --label "type:suggestion" \ + --body-file /tmp/docs-issue-body.md + + echo "Issue created for $SLUG" + done diff --git a/.github/workflows/pm-dashboard.yml b/.github/workflows/pm-dashboard.yml deleted file mode 100644 index d2b7d1f..0000000 --- a/.github/workflows/pm-dashboard.yml +++ /dev/null @@ -1,94 +0,0 @@ -name: PM — Dashboard - -on: - schedule: - - cron: '5 */6 * * *' # Every 6 hours at :05 - workflow_dispatch: - -concurrency: - group: pm-dashboard - cancel-in-progress: false - -jobs: - update: - runs-on: ubuntu-latest - permissions: - contents: write - pull-requests: write - statuses: write - actions: write - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - with: - fetch-depth: 0 - - - name: Configure git - run: | - git config user.name "examples-bot" - git config user.email "noreply@deepgram.com" - - - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 - with: - node-version: '20' - - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 - with: - python-version: '3.11' - - - uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5 - with: - go-version: '1.22' - - - name: Get date - id: date - run: echo "date=$(date -u +%Y-%m-%d)" >> $GITHUB_OUTPUT - - - name: Check for existing dashboard PR - id: existing - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - N=$(gh pr list --repo ${{ github.repository }} --state open \ - --search "docs: update examples status table" \ - --json number --jq '.[0].number' 2>/dev/null) - [ -n "$N" ] && echo "skip=true" >> $GITHUB_OUTPUT \ - || echo "skip=false" >> $GITHUB_OUTPUT - - - name: Run dashboard agent - if: steps.existing.outputs.skip == 'false' - uses: anthropics/claude-code-action@b47fd721da662d48c5680e154ad16a73ed74d2e0 # v1.0.93 - env: - KAPA_API_KEY: ${{ secrets.KAPA_API_KEY }} - KAPA_PROJECT_ID: ${{ vars.KAPA_PROJECT_ID }} - DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }} - TWILIO_ACCOUNT_SID: ${{ secrets.TWILIO_ACCOUNT_SID }} - TWILIO_AUTH_TOKEN: ${{ secrets.TWILIO_AUTH_TOKEN }} - TWILIO_PHONE_NUMBER: ${{ secrets.TWILIO_PHONE_NUMBER }} - LIVEKIT_URL: ${{ secrets.LIVEKIT_URL }} - LIVEKIT_API_KEY: ${{ secrets.LIVEKIT_API_KEY }} - LIVEKIT_API_SECRET: ${{ secrets.LIVEKIT_API_SECRET }} - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - DISCORD_BOT_TOKEN: ${{ secrets.DISCORD_BOT_TOKEN }} - DISCORD_CLIENT_ID: ${{ secrets.DISCORD_CLIENT_ID }} - VONAGE_APPLICATION_ID: ${{ secrets.VONAGE_APPLICATION_ID }} - VONAGE_PRIVATE_KEY: ${{ secrets.VONAGE_PRIVATE_KEY }} - DAILY_API_KEY: ${{ secrets.DAILY_API_KEY }} - PIPECAT_API_KEY: ${{ secrets.PIPECAT_API_KEY }} - SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} - SLACK_APP_TOKEN: ${{ secrets.SLACK_APP_TOKEN }} - TELEGRAM_BOT_TOKEN: ${{ secrets.TELEGRAM_BOT_TOKEN }} - with: - anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} - github_token: ${{ secrets.GITHUB_TOKEN }} - mode: agent - model: claude-opus-4-6 - allowed_tools: "Bash,Read,Write,Edit,Glob,Grep" - timeout_minutes: 30 - direct_prompt: | - Read and execute instructions/pm-dashboard.md. - - Context: - - Today's date: ${{ steps.date.outputs.date }} - - Repository: ${{ github.repository }} - - Run ID: ${{ github.run_id }} - - Server URL: ${{ github.server_url }} diff --git a/.github/workflows/pm-suggestions.yml b/.github/workflows/pm-suggestions.yml deleted file mode 100644 index 7b3a27f..0000000 --- a/.github/workflows/pm-suggestions.yml +++ /dev/null @@ -1,140 +0,0 @@ -name: PM — Suggestions - -on: - issues: - types: - - opened # Every new issue, regardless of labels or format - - reopened - - labeled # Re-process when devrel adds 'approved' to external issues - schedule: - - cron: '47 */6 * * *' # Fallback sweep every 6h - workflow_dispatch: - -concurrency: - group: pm-suggestions - cancel-in-progress: false - -jobs: - run: - runs-on: ubuntu-latest - permissions: - contents: write - pull-requests: write - issues: write - id-token: write - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - with: - fetch-depth: 0 - - - name: Configure git - run: | - git config user.name "examples-bot" - git config user.email "noreply@deepgram.com" - - # ── Permission gate ────────────────────────────────────────────────── - # Write access = act immediately. - # No write access = hold for devrel approval; don't run PM agent. - # Schedule/dispatch runs bypass the check (no user context). - - name: Check write permission - id: permission - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - EVENT="${{ github.event_name }}" - USERNAME="${{ github.event.issue.user.login }}" - - # Schedule and dispatch runs always proceed - if [[ "$EVENT" == "schedule" || "$EVENT" == "workflow_dispatch" ]]; then - echo "gate=pass" >> $GITHUB_OUTPUT - echo "reason=scheduled" >> $GITHUB_OUTPUT - exit 0 - fi - - # If the 'approved' label was just added, treat as approved regardless of opener - LABEL="${{ github.event.label.name }}" - if [[ "$EVENT" == "issues" && "$LABEL" == "approved" ]]; then - echo "gate=pass" >> $GITHUB_OUTPUT - echo "reason=devrel-approved" >> $GITHUB_OUTPUT - exit 0 - fi - - # Check if the issue opener already has the 'approved' label (re-opened etc.) - HAS_APPROVED=$(gh issue view ${{ github.event.issue.number }} \ - --repo ${{ github.repository }} \ - --json labels --jq '[.labels[].name] | contains(["approved"])' 2>/dev/null || echo "false") - if [[ "$HAS_APPROVED" == "true" ]]; then - echo "gate=pass" >> $GITHUB_OUTPUT - echo "reason=already-approved" >> $GITHUB_OUTPUT - exit 0 - fi - - # Skip bots - if [[ "$USERNAME" == *"[bot]"* ]]; then - echo "gate=skip" >> $GITHUB_OUTPUT - echo "reason=bot" >> $GITHUB_OUTPUT - exit 0 - fi - - # Check write permission on this repo - IS_ORG_MEMBER=false - if gh api "orgs/deepgram/members/${USERNAME}" -i 2>/dev/null | head -1 | grep -q "204"; then - IS_ORG_MEMBER=true - fi - PERM=$(gh api "repos/${{ github.repository }}/collaborators/${USERNAME}/permission" \ - --jq '.permission' 2>/dev/null || echo "none") - - if [[ "$IS_ORG_MEMBER" == "true" || "$PERM" == "write" || "$PERM" == "maintain" || "$PERM" == "admin" ]]; then - echo "gate=pass" >> $GITHUB_OUTPUT - echo "reason=deepgram-member-or-collaborator" >> $GITHUB_OUTPUT - else - echo "gate=hold" >> $GITHUB_OUTPUT - echo "reason=not-deepgram-member" >> $GITHUB_OUTPUT - echo "username=$USERNAME" >> $GITHUB_OUTPUT - fi - - # ── Hold: tag devrel for external suggestions ────────────────────── - - name: Hold for devrel approval - if: steps.permission.outputs.gate == 'hold' - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - USERNAME="${{ steps.permission.outputs.username }}" - ISSUE="${{ github.event.issue.number }}" - - # Only post once — skip if already held - ALREADY=$(gh issue view "$ISSUE" --repo ${{ github.repository }} \ - --json labels --jq '[.labels[].name] | contains(["needs:approval"])' 2>/dev/null || echo "false") - if [[ "$ALREADY" == "true" ]]; then - echo "Already on hold — skipping duplicate comment" - exit 0 - fi - - gh issue edit "$ISSUE" --repo ${{ github.repository }} \ - --add-label "needs:approval" 2>/dev/null || true - - BODY="@deepgram/devrel — new suggestion from @${USERNAME} (no write access). Add the \`approved\` label to process it, or close to decline." - gh issue comment "$ISSUE" --repo ${{ github.repository }} --body "$BODY" - - # ── Pass: run PM agent ──────────────────────────────────────────── - - name: Route issue - if: steps.permission.outputs.gate == 'pass' - uses: anthropics/claude-code-action@b47fd721da662d48c5680e154ad16a73ed74d2e0 # v1.0.93 - with: - anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} - github_token: ${{ secrets.GITHUB_TOKEN }} - mode: agent - model: claude-opus-4-6 - allowed_tools: "Bash,Read,Glob,Grep,WebSearch,WebFetch" - timeout_minutes: 15 - direct_prompt: | - Read and execute instructions/pm-suggestions.md. - - Context: - - Event: ${{ github.event_name }} - - Issue number (if triggered by issue): ${{ github.event.issue.number }} - - Repository: ${{ github.repository }} - - Trigger reason: ${{ steps.permission.outputs.reason }} - env: - KAPA_API_KEY: ${{ secrets.KAPA_API_KEY }} - KAPA_PROJECT_ID: ${{ vars.KAPA_PROJECT_ID }} diff --git a/.github/workflows/setup-labels.yml b/.github/workflows/setup-labels.yml index 6b8860d..56cf244 100644 --- a/.github/workflows/setup-labels.yml +++ b/.github/workflows/setup-labels.yml @@ -25,9 +25,11 @@ jobs: } # Type labels + create_label "type:suggestion" "c5def5" "Suggested example to build — triggers build pipeline when applied" create_label "type:example" "0075ca" "New example app" create_label "type:fix" "d93f0b" "Fix to existing example" create_label "type:docs" "e4e669" "Documentation update" + create_label "automated" "eeeeee" "Opened by the build pipeline" # Status labels create_label "status:needs-credentials" "f9d0c4" "Missing env vars for E2E tests" diff --git a/.github/workflows/test-examples.yml b/.github/workflows/test-examples.yml deleted file mode 100644 index 7ff368a..0000000 --- a/.github/workflows/test-examples.yml +++ /dev/null @@ -1,651 +0,0 @@ -name: Test Examples - -# Single workflow for all example languages. -# -# Jobs: -# detect — finds changed example dirs, groups by language marker file -# test-node — package.json -# test-python — requirements.txt / pyproject.toml -# test-go — go.mod -# test-java — pom.xml / build.gradle -# test-rust — Cargo.toml -# test-dotnet — *.csproj / *.sln -# test-cli — example.sh / src/*.sh; installs Deepgram CLI first -# lint-md — markdownlint --fix; auto-commits; always passes -# e2e-api-check — required gate; fails only if a language job failed - -on: - pull_request: - types: [opened, synchronize, reopened] - paths: - - 'examples/**' - workflow_dispatch: - inputs: - ref: - description: 'Branch or SHA to test (default: main)' - required: false - default: 'main' - example: - description: 'Single example dir to test (e.g. examples/020-twilio-media-streams-node). Leave blank to test all.' - required: false - default: '' - -concurrency: - group: test-examples-${{ github.event.pull_request.number || inputs.ref || github.ref }}-${{ inputs.example || 'all' }} - cancel-in-progress: true - -permissions: - contents: write - pull-requests: write - issues: write - statuses: write - -jobs: - # ── Detect which examples changed and what languages they use ─────────────── - detect: - runs-on: ubuntu-latest - outputs: - node_examples: ${{ steps.scan.outputs.node_examples }} - python_examples: ${{ steps.scan.outputs.python_examples }} - go_examples: ${{ steps.scan.outputs.go_examples }} - java_examples: ${{ steps.scan.outputs.java_examples }} - rust_examples: ${{ steps.scan.outputs.rust_examples }} - dotnet_examples: ${{ steps.scan.outputs.dotnet_examples }} - cli_examples: ${{ steps.scan.outputs.cli_examples }} - md_files: ${{ steps.scan.outputs.md_files }} - has_node: ${{ steps.scan.outputs.has_node }} - has_python: ${{ steps.scan.outputs.has_python }} - has_go: ${{ steps.scan.outputs.has_go }} - has_java: ${{ steps.scan.outputs.has_java }} - has_rust: ${{ steps.scan.outputs.has_rust }} - has_dotnet: ${{ steps.scan.outputs.has_dotnet }} - has_cli: ${{ steps.scan.outputs.has_cli }} - has_md: ${{ steps.scan.outputs.has_md }} - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - with: - ref: ${{ inputs.ref || github.event.pull_request.head.sha || '' }} - fetch-depth: 0 - - - name: Scan changed example directories - id: scan - run: | - # On PR/push/dispatch-to-branch: only dirs changed vs main. - # On dispatch-to-main: all dirs, or a single example if specified. - REF="${{ inputs.ref }}" - EVENT="${{ github.event_name }}" - if [ "$EVENT" = "pull_request" ] || [ "$EVENT" = "push" ] || \ - ( [ "$EVENT" = "workflow_dispatch" ] && [ -n "$REF" ] && [ "$REF" != "main" ] ); then - DIRS=$(git diff origin/main...HEAD --name-only 2>/dev/null \ - | grep '^examples/' | cut -d/ -f1-2 | sort -u | grep -v '^\s*$') - elif [ -n "${{ inputs.example }}" ]; then - DIRS="${{ inputs.example }}" - else - DIRS=$(ls -d examples/*/ 2>/dev/null | sed 's|/$||') - fi - - NODE_EX="" PYTHON_EX="" GO_EX="" JAVA_EX="" RUST_EX="" DOTNET_EX="" CLI_EX="" MD_FILES="" - - for dir in $DIRS; do - [ ! -d "$dir" ] && continue - [ -f "${dir}/package.json" ] && NODE_EX="${NODE_EX} ${dir}" - [ -f "${dir}/requirements.txt" ] && PYTHON_EX="${PYTHON_EX} ${dir}" - [ -f "${dir}/pyproject.toml" ] && PYTHON_EX="${PYTHON_EX} ${dir}" - [ -f "${dir}/go.mod" ] && GO_EX="${GO_EX} ${dir}" - [ -f "${dir}/pom.xml" ] || [ -f "${dir}/build.gradle" ] && JAVA_EX="${JAVA_EX} ${dir}" - [ -f "${dir}/Cargo.toml" ] && RUST_EX="${RUST_EX} ${dir}" - ls "${dir}"/*.csproj "${dir}"/*.sln 2>/dev/null | grep -q . && DOTNET_EX="${DOTNET_EX} ${dir}" - [ -f "${dir}/example.sh" ] || ls "${dir}"/src/*.sh 2>/dev/null | grep -q . && CLI_EX="${CLI_EX} ${dir}" - [ -f "${dir}/README.md" ] && MD_FILES="${MD_FILES} ${dir}/README.md" - done - - for v in NODE_EX PYTHON_EX GO_EX JAVA_EX RUST_EX DOTNET_EX CLI_EX MD_FILES; do - eval "$v=\$(echo \${$v} | xargs)" - done - - echo "node_examples=$NODE_EX" >> $GITHUB_OUTPUT - echo "python_examples=$PYTHON_EX" >> $GITHUB_OUTPUT - echo "go_examples=$GO_EX" >> $GITHUB_OUTPUT - echo "java_examples=$JAVA_EX" >> $GITHUB_OUTPUT - echo "rust_examples=$RUST_EX" >> $GITHUB_OUTPUT - echo "dotnet_examples=$DOTNET_EX" >> $GITHUB_OUTPUT - echo "cli_examples=$CLI_EX" >> $GITHUB_OUTPUT - echo "md_files=$MD_FILES" >> $GITHUB_OUTPUT - - [ -n "$NODE_EX" ] && echo "has_node=true" >> $GITHUB_OUTPUT || echo "has_node=false" >> $GITHUB_OUTPUT - [ -n "$PYTHON_EX" ] && echo "has_python=true" >> $GITHUB_OUTPUT || echo "has_python=false" >> $GITHUB_OUTPUT - [ -n "$GO_EX" ] && echo "has_go=true" >> $GITHUB_OUTPUT || echo "has_go=false" >> $GITHUB_OUTPUT - [ -n "$JAVA_EX" ] && echo "has_java=true" >> $GITHUB_OUTPUT || echo "has_java=false" >> $GITHUB_OUTPUT - [ -n "$RUST_EX" ] && echo "has_rust=true" >> $GITHUB_OUTPUT || echo "has_rust=false" >> $GITHUB_OUTPUT - [ -n "$DOTNET_EX" ] && echo "has_dotnet=true" >> $GITHUB_OUTPUT || echo "has_dotnet=false" >> $GITHUB_OUTPUT - [ -n "$CLI_EX" ] && echo "has_cli=true" >> $GITHUB_OUTPUT || echo "has_cli=false" >> $GITHUB_OUTPUT - [ -n "$MD_FILES" ] && echo "has_md=true" >> $GITHUB_OUTPUT || echo "has_md=false" >> $GITHUB_OUTPUT - - echo "" - echo "Node: ${NODE_EX:-none}" - echo "Python: ${PYTHON_EX:-none}" - echo "Go: ${GO_EX:-none}" - echo "Java: ${JAVA_EX:-none}" - echo "Rust: ${RUST_EX:-none}" - echo ".NET: ${DOTNET_EX:-none}" - echo "CLI: ${CLI_EX:-none}" - echo "Markdown:${MD_FILES:-none}" - - # ── Node.js ───────────────────────────────────────────────────────────────── - test-node: - needs: detect - if: needs.detect.outputs.has_node == 'true' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - with: - ref: ${{ inputs.ref || github.event.pull_request.head.sha || '' }} - fetch-depth: 0 - - uses: pnpm/action-setup@b906affcce14559ad1aafd4ab0e942779e9f58b1 # v4 - with: - version: latest - - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 - with: - node-version: '20' - - name: Install Bun - uses: oven-sh/setup-bun@0c5077e51419868618aeaa5fe8019c62421857d6 # v2 - - name: Install Deno - uses: denoland/setup-deno@667a34cdef165d8d2b2e98dde39547c9daac7282 # v2.0.4 - with: - deno-version: v2.x - - name: Run Node.js tests - id: test - continue-on-error: true - env: - DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }} - TWILIO_ACCOUNT_SID: ${{ secrets.TWILIO_ACCOUNT_SID }} - TWILIO_AUTH_TOKEN: ${{ secrets.TWILIO_AUTH_TOKEN }} - TWILIO_PHONE_NUMBER: ${{ secrets.TWILIO_PHONE_NUMBER }} - LIVEKIT_URL: ${{ secrets.LIVEKIT_URL }} - LIVEKIT_API_KEY: ${{ secrets.LIVEKIT_API_KEY }} - LIVEKIT_API_SECRET: ${{ secrets.LIVEKIT_API_SECRET }} - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - DISCORD_BOT_TOKEN: ${{ secrets.DISCORD_BOT_TOKEN }} - DISCORD_CLIENT_ID: ${{ secrets.DISCORD_CLIENT_ID }} - VONAGE_APPLICATION_ID: ${{ secrets.VONAGE_APPLICATION_ID }} - VONAGE_PRIVATE_KEY: ${{ secrets.VONAGE_PRIVATE_KEY }} - DAILY_API_KEY: ${{ secrets.DAILY_API_KEY }} - PIPECAT_API_KEY: ${{ secrets.PIPECAT_API_KEY }} - SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} - SLACK_APP_TOKEN: ${{ secrets.SLACK_APP_TOKEN }} - TELEGRAM_BOT_TOKEN: ${{ secrets.TELEGRAM_BOT_TOKEN }} - run: | - FAILED="" MISSING="" - for dir in ${{ needs.detect.outputs.node_examples }}; do - echo ""; echo "── $dir ──" - pushd "$dir" > /dev/null - MISSING_VARS="" - if [ -f ".env.example" ]; then - while IFS= read -r line; do - [[ -z "${line// }" || "$line" == \#* ]] && continue - VAR="${line%%=*}"; VAR="${VAR// /}" - [ -z "$VAR" ] && continue - [ -z "${!VAR+x}" ] || [ -z "${!VAR}" ] && MISSING_VARS="$MISSING_VARS $VAR" - done < ".env.example" - fi - if [ -n "$MISSING_VARS" ]; then - echo "⏳ MISSING_CREDENTIALS:$(echo $MISSING_VARS | tr ' ' ',')" - MISSING="$MISSING $dir" - popd > /dev/null; continue - fi - # Detect package manager from lockfile — pnpm/bun/deno required for new examples - if [ -f "pnpm-lock.yaml" ]; then - pnpm audit --audit-level=high || { echo "✗ AUDIT-FAILED"; FAILED="$FAILED $dir"; popd > /dev/null; continue; } - pnpm install --frozen-lockfile - pnpm test && echo "✓ PASSED" || { echo "✗ FAILED"; FAILED="$FAILED $dir"; } - elif [ -f "bun.lockb" ] || [ -f "bun.lock" ]; then - bun install --frozen-lockfile - bun test && echo "✓ PASSED" || { echo "✗ FAILED"; FAILED="$FAILED $dir"; } - elif [ -f "deno.json" ] || [ -f "deno.jsonc" ]; then - deno test && echo "✓ PASSED" || { echo "✗ FAILED"; FAILED="$FAILED $dir"; } - else - # Legacy npm fallback for examples predating the pnpm/bun/deno rule - npm audit --audit-level=high || { echo "✗ AUDIT-FAILED"; FAILED="$FAILED $dir"; popd > /dev/null; continue; } - npm install --prefer-offline -q 2>/dev/null || npm install -q - npm test && echo "✓ PASSED" || { echo "✗ FAILED"; FAILED="$FAILED $dir"; } - fi - popd > /dev/null - done - echo "failed=$FAILED" >> $GITHUB_OUTPUT - echo "missing=$MISSING" >> $GITHUB_OUTPUT - [ -n "$FAILED" ] && exit 1 || exit 0 - - - name: Comment missing credentials - if: steps.test.outputs.missing != '' && github.event_name == 'pull_request' - uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7 - with: - script: | - const missing = '${{ steps.test.outputs.missing }}'.trim().split(/\s+/).filter(Boolean); - if (!missing.length) return; - const existing = await github.rest.issues.listComments({ ...context.repo, issue_number: context.issue.number }); - if (existing.data.some(c => c.body.includes('Missing credentials'))) return; - await github.rest.issues.createComment({ ...context.repo, issue_number: context.issue.number, - body: `⏳ **Missing credentials** — these Node.js examples need secrets:\n\n${missing.map(d => `- \`${d}\``).join('\n')}\n\n@deepgram/devrel please add the required secrets.` }); - - # ── Python ─────────────────────────────────────────────────────────────────── - test-python: - needs: detect - if: needs.detect.outputs.has_python == 'true' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - with: - ref: ${{ inputs.ref || github.event.pull_request.head.sha || '' }} - fetch-depth: 0 - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 - with: - python-version: '3.11' - - name: Run Python tests - id: test - continue-on-error: true - env: - DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }} - TWILIO_ACCOUNT_SID: ${{ secrets.TWILIO_ACCOUNT_SID }} - TWILIO_AUTH_TOKEN: ${{ secrets.TWILIO_AUTH_TOKEN }} - TWILIO_PHONE_NUMBER: ${{ secrets.TWILIO_PHONE_NUMBER }} - LIVEKIT_URL: ${{ secrets.LIVEKIT_URL }} - LIVEKIT_API_KEY: ${{ secrets.LIVEKIT_API_KEY }} - LIVEKIT_API_SECRET: ${{ secrets.LIVEKIT_API_SECRET }} - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - DISCORD_BOT_TOKEN: ${{ secrets.DISCORD_BOT_TOKEN }} - DISCORD_CLIENT_ID: ${{ secrets.DISCORD_CLIENT_ID }} - VONAGE_APPLICATION_ID: ${{ secrets.VONAGE_APPLICATION_ID }} - VONAGE_PRIVATE_KEY: ${{ secrets.VONAGE_PRIVATE_KEY }} - DAILY_API_KEY: ${{ secrets.DAILY_API_KEY }} - PIPECAT_API_KEY: ${{ secrets.PIPECAT_API_KEY }} - SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} - SLACK_APP_TOKEN: ${{ secrets.SLACK_APP_TOKEN }} - TELEGRAM_BOT_TOKEN: ${{ secrets.TELEGRAM_BOT_TOKEN }} - run: | - FAILED="" MISSING="" - for dir in ${{ needs.detect.outputs.python_examples }}; do - echo ""; echo "── $dir ──" - pushd "$dir" > /dev/null - MISSING_VARS="" - if [ -f ".env.example" ]; then - while IFS= read -r line; do - [[ -z "${line// }" || "$line" == \#* ]] && continue - VAR="${line%%=*}"; VAR="${VAR// /}" - [ -z "$VAR" ] && continue - [ -z "${!VAR+x}" ] || [ -z "${!VAR}" ] && MISSING_VARS="$MISSING_VARS $VAR" - done < ".env.example" - fi - if [ -n "$MISSING_VARS" ]; then - echo "⏳ MISSING_CREDENTIALS:$(echo $MISSING_VARS | tr ' ' ',')" - MISSING="$MISSING $dir"; popd > /dev/null; continue - fi - pip install -q --upgrade pip - [ -f "requirements.txt" ] && pip install -q -r requirements.txt - [ -f "pyproject.toml" ] && pip install -q -e . - if find tests/ -name "test_*.py" 2>/dev/null | grep -q .; then - pip install -q pytest - python -m pytest tests/ -q && echo "✓ PASSED" || { echo "✗ FAILED"; FAILED="$FAILED $dir"; } - elif ls tests/*.py 2>/dev/null | head -1 | grep -q .; then - python "$(ls tests/*.py | head -1)" && echo "✓ PASSED" || { echo "✗ FAILED"; FAILED="$FAILED $dir"; } - else - echo "⚠ No tests found" - fi - popd > /dev/null - done - echo "failed=$FAILED" >> $GITHUB_OUTPUT - echo "missing=$MISSING" >> $GITHUB_OUTPUT - [ -n "$FAILED" ] && exit 1 || exit 0 - - - name: Audit Python dependencies - run: | - pip install -q pip-audit - FAILED="" - for dir in ${{ needs.detect.outputs.python_examples }}; do - [ -f "${dir}/requirements.txt" ] || continue - echo "── audit: $dir ──" - pip-audit -r "${dir}/requirements.txt" || FAILED="$FAILED $dir" - done - [ -n "$FAILED" ] && { echo "✗ pip-audit failed for:$FAILED"; exit 1; } || true - - - name: Comment missing credentials - if: steps.test.outputs.missing != '' && github.event_name == 'pull_request' - uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7 - with: - script: | - const missing = '${{ steps.test.outputs.missing }}'.trim().split(/\s+/).filter(Boolean); - if (!missing.length) return; - const existing = await github.rest.issues.listComments({ ...context.repo, issue_number: context.issue.number }); - if (existing.data.some(c => c.body.includes('Missing credentials'))) return; - await github.rest.issues.createComment({ ...context.repo, issue_number: context.issue.number, - body: `⏳ **Missing credentials** — these Python examples need secrets:\n\n${missing.map(d => `- \`${d}\``).join('\n')}\n\n@deepgram/devrel please add the required secrets.` }); - - # ── Go ─────────────────────────────────────────────────────────────────────── - test-go: - needs: detect - if: needs.detect.outputs.has_go == 'true' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - with: - ref: ${{ inputs.ref || github.event.pull_request.head.sha || '' }} - fetch-depth: 0 - - uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5 - with: - go-version: '1.22' - - name: Run Go tests - id: test - continue-on-error: true - env: - DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }} - run: | - FAILED="" MISSING="" - for dir in ${{ needs.detect.outputs.go_examples }}; do - echo ""; echo "── $dir ──" - pushd "$dir" > /dev/null - MISSING_VARS="" - if [ -f ".env.example" ]; then - while IFS= read -r line; do - [[ -z "${line// }" || "$line" == \#* ]] && continue - VAR="${line%%=*}"; VAR="${VAR// /}" - [ -z "$VAR" ] && continue - [ -z "${!VAR+x}" ] || [ -z "${!VAR}" ] && MISSING_VARS="$MISSING_VARS $VAR" - done < ".env.example" - fi - if [ -n "$MISSING_VARS" ]; then - echo "⏳ MISSING_CREDENTIALS:$(echo $MISSING_VARS | tr ' ' ',')" - MISSING="$MISSING $dir"; popd > /dev/null; continue - fi - go mod download - go mod verify - go test ./... -v -timeout 60s && echo "✓ PASSED" || { echo "✗ FAILED"; FAILED="$FAILED $dir"; } - popd > /dev/null - done - echo "failed=$FAILED" >> $GITHUB_OUTPUT - echo "missing=$MISSING" >> $GITHUB_OUTPUT - [ -n "$FAILED" ] && exit 1 || exit 0 - - # ── Java ───────────────────────────────────────────────────────────────────── - test-java: - needs: detect - if: needs.detect.outputs.has_java == 'true' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - with: - ref: ${{ inputs.ref || github.event.pull_request.head.sha || '' }} - fetch-depth: 0 - - uses: actions/setup-java@c1e323688fd81a25caa38c78aa6df2d33d3e20d9 # v4 - with: - distribution: 'temurin' - java-version: '21' - - name: Run Java tests - id: test - continue-on-error: true - env: - DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }} - run: | - FAILED="" MISSING="" - for dir in ${{ needs.detect.outputs.java_examples }}; do - echo ""; echo "── $dir ──" - pushd "$dir" > /dev/null - MISSING_VARS="" - if [ -f ".env.example" ]; then - while IFS= read -r line; do - [[ -z "${line// }" || "$line" == \#* ]] && continue - VAR="${line%%=*}"; VAR="${VAR// /}" - [ -z "$VAR" ] && continue - [ -z "${!VAR+x}" ] || [ -z "${!VAR}" ] && MISSING_VARS="$MISSING_VARS $VAR" - done < ".env.example" - fi - if [ -n "$MISSING_VARS" ]; then - echo "⏳ MISSING_CREDENTIALS:$(echo $MISSING_VARS | tr ' ' ',')" - MISSING="$MISSING $dir"; popd > /dev/null; continue - fi - if [ -f "pom.xml" ]; then - mvn test -q && echo "✓ PASSED" || { echo "✗ FAILED"; FAILED="$FAILED $dir"; } - else - ./gradlew test && echo "✓ PASSED" || { echo "✗ FAILED"; FAILED="$FAILED $dir"; } - fi - popd > /dev/null - done - echo "failed=$FAILED" >> $GITHUB_OUTPUT - echo "missing=$MISSING" >> $GITHUB_OUTPUT - [ -n "$FAILED" ] && exit 1 || exit 0 - - # ── Rust ───────────────────────────────────────────────────────────────────── - test-rust: - needs: detect - if: needs.detect.outputs.has_rust == 'true' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - with: - ref: ${{ inputs.ref || github.event.pull_request.head.sha || '' }} - fetch-depth: 0 - - name: Run Rust tests - id: test - continue-on-error: true - env: - DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }} - run: | - FAILED="" MISSING="" - for dir in ${{ needs.detect.outputs.rust_examples }}; do - echo ""; echo "── $dir ──" - pushd "$dir" > /dev/null - MISSING_VARS="" - if [ -f ".env.example" ]; then - while IFS= read -r line; do - [[ -z "${line// }" || "$line" == \#* ]] && continue - VAR="${line%%=*}"; VAR="${VAR// /}" - [ -z "$VAR" ] && continue - [ -z "${!VAR+x}" ] || [ -z "${!VAR}" ] && MISSING_VARS="$MISSING_VARS $VAR" - done < ".env.example" - fi - if [ -n "$MISSING_VARS" ]; then - echo "⏳ MISSING_CREDENTIALS:$(echo $MISSING_VARS | tr ' ' ',')" - MISSING="$MISSING $dir"; popd > /dev/null; continue - fi - cargo test && echo "✓ PASSED" || { echo "✗ FAILED"; FAILED="$FAILED $dir"; } - popd > /dev/null - done - echo "failed=$FAILED" >> $GITHUB_OUTPUT - [ -n "$FAILED" ] && exit 1 || exit 0 - - - name: Audit Rust dependencies - run: | - cargo install cargo-audit --quiet - FAILED="" - for dir in ${{ needs.detect.outputs.rust_examples }}; do - [ -f "${dir}/Cargo.toml" ] || continue - echo "── audit: $dir ──" - cargo audit --file "${dir}/Cargo.lock" || FAILED="$FAILED $dir" - done - [ -n "$FAILED" ] && { echo "✗ cargo-audit failed for:$FAILED"; exit 1; } || true - - # ── .NET ───────────────────────────────────────────────────────────────────── - test-dotnet: - needs: detect - if: needs.detect.outputs.has_dotnet == 'true' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - with: - ref: ${{ inputs.ref || github.event.pull_request.head.sha || '' }} - fetch-depth: 0 - - uses: actions/setup-dotnet@67a3573c9a986a3f9c594539f4ab511d57bb3ce9 # v4 - with: - dotnet-version: '8.0' - - name: Run .NET tests - id: test - continue-on-error: true - env: - DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }} - run: | - FAILED="" MISSING="" - for dir in ${{ needs.detect.outputs.dotnet_examples }}; do - echo ""; echo "── $dir ──" - pushd "$dir" > /dev/null - MISSING_VARS="" - if [ -f ".env.example" ]; then - while IFS= read -r line; do - [[ -z "${line// }" || "$line" == \#* ]] && continue - VAR="${line%%=*}"; VAR="${VAR// /}" - [ -z "$VAR" ] && continue - [ -z "${!VAR+x}" ] || [ -z "${!VAR}" ] && MISSING_VARS="$MISSING_VARS $VAR" - done < ".env.example" - fi - if [ -n "$MISSING_VARS" ]; then - echo "⏳ MISSING_CREDENTIALS:$(echo $MISSING_VARS | tr ' ' ',')" - MISSING="$MISSING $dir"; popd > /dev/null; continue - fi - dotnet test && echo "✓ PASSED" || { echo "✗ FAILED"; FAILED="$FAILED $dir"; } - popd > /dev/null - done - echo "failed=$FAILED" >> $GITHUB_OUTPUT - [ -n "$FAILED" ] && exit 1 || exit 0 - - - name: Audit .NET dependencies - run: | - FAILED="" - for dir in ${{ needs.detect.outputs.dotnet_examples }}; do - echo "── audit: $dir ──" - dotnet list "$dir" package --vulnerable || FAILED="$FAILED $dir" - done - [ -n "$FAILED" ] && { echo "✗ dotnet vulnerable packages found in:$FAILED"; exit 1; } || true - - # ── CLI (Deepgram CLI — installed via install.sh) ──────────────────────────── - test-cli: - needs: detect - if: needs.detect.outputs.has_cli == 'true' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - with: - ref: ${{ inputs.ref || github.event.pull_request.head.sha || '' }} - fetch-depth: 0 - - - name: Install Deepgram CLI - run: | - curl -fsSL https://deepgram.com/install.sh | sh - # Ensure CLI is on PATH - echo "$HOME/.deepgram/bin" >> $GITHUB_PATH - echo "/usr/local/bin" >> $GITHUB_PATH - - - name: Run CLI tests - id: test - continue-on-error: true - env: - DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }} - run: | - FAILED="" MISSING="" - for dir in ${{ needs.detect.outputs.cli_examples }}; do - echo ""; echo "── $dir ──" - pushd "$dir" > /dev/null - MISSING_VARS="" - if [ -f ".env.example" ]; then - while IFS= read -r line; do - [[ -z "${line// }" || "$line" == \#* ]] && continue - VAR="${line%%=*}"; VAR="${VAR// /}" - [ -z "$VAR" ] && continue - [ -z "${!VAR+x}" ] || [ -z "${!VAR}" ] && MISSING_VARS="$MISSING_VARS $VAR" - done < ".env.example" - fi - if [ -n "$MISSING_VARS" ]; then - echo "⏳ MISSING_CREDENTIALS:$(echo $MISSING_VARS | tr ' ' ',')" - MISSING="$MISSING $dir"; popd > /dev/null; continue - fi - # Run example_test.sh if present, otherwise run example.sh directly - if [ -f "example_test.sh" ]; then - bash example_test.sh && echo "✓ PASSED" || { echo "✗ FAILED"; FAILED="$FAILED $dir"; } - elif [ -f "example.sh" ]; then - bash example.sh && echo "✓ PASSED" || { echo "✗ FAILED"; FAILED="$FAILED $dir"; } - else - bash "$(ls src/*.sh | head -1)" && echo "✓ PASSED" || { echo "✗ FAILED"; FAILED="$FAILED $dir"; } - fi - popd > /dev/null - done - echo "failed=$FAILED" >> $GITHUB_OUTPUT - [ -n "$FAILED" ] && exit 1 || exit 0 - - # ── Markdown lint (auto-fix, always passes) ────────────────────────────────── - lint-md: - needs: detect - if: needs.detect.outputs.has_md == 'true' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - with: - ref: ${{ inputs.ref || github.event.pull_request.head.sha || '' }} - fetch-depth: 0 - - - name: Configure git - if: github.event_name == 'pull_request' - run: | - git config user.name "examples-bot" - git config user.email "noreply@deepgram.com" - - - name: Install markdownlint - run: npm install -g markdownlint-cli --silent - - - name: Lint and auto-fix markdown - run: | - FILES="${{ needs.detect.outputs.md_files }}" - echo "Linting: $FILES" - # --fix applies auto-fixable rules (trailing spaces, blank lines, etc.) - markdownlint --fix $FILES 2>&1 || true - echo "✓ Lint complete (auto-fixes applied if any)" - - - name: Commit fixes if any - if: github.event_name == 'pull_request' - run: | - if git diff --quiet; then - echo "No markdown fixes needed." - else - git add ${{ needs.detect.outputs.md_files }} - git commit -m "style(docs): auto-fix markdown lint issues [skip ci]" - git push origin HEAD - echo "✓ Committed markdown fixes" - fi - - # ── Final gate — the required branch-protection check ──────────────────────── - # Runs after all language jobs regardless of their outcome. - # Fails only if a language job actually failed (not skipped, not missing-creds). - e2e-api-check: - needs: [detect, test-node, test-python, test-go, test-java, test-rust, test-dotnet, test-cli, lint-md] - if: always() - runs-on: ubuntu-latest - steps: - - name: Evaluate results - run: | - NODE="${{ needs.test-node.result }}" - PYTHON="${{ needs.test-python.result }}" - GO="${{ needs.test-go.result }}" - JAVA="${{ needs.test-java.result }}" - RUST="${{ needs.test-rust.result }}" - DOTNET="${{ needs.test-dotnet.result }}" - CLI="${{ needs.test-cli.result }}" - - printf "%-8s %s\n" "node:" "$NODE" - printf "%-8s %s\n" "python:" "$PYTHON" - printf "%-8s %s\n" "go:" "$GO" - printf "%-8s %s\n" "java:" "$JAVA" - printf "%-8s %s\n" "rust:" "$RUST" - printf "%-8s %s\n" "dotnet:" "$DOTNET" - printf "%-8s %s\n" "cli:" "$CLI" - - # skipped = no examples of that language — fine - # success = tests passed — fine - # failure / cancelled = block merge - FAILED=false - for result in "$NODE" "$PYTHON" "$GO" "$JAVA" "$RUST" "$DOTNET" "$CLI"; do - [ "$result" = "failure" ] && FAILED=true - [ "$result" = "cancelled" ] && FAILED=true - done - - if [ "$FAILED" = "true" ]; then - echo "❌ One or more test jobs failed — blocking merge" - echo "outcome=failure" >> $GITHUB_OUTPUT - exit 1 - else - echo "✅ All applicable tests passed (skipped = no examples of that language)" - fi diff --git a/.github/workflows/test-existing.yml b/.github/workflows/test-existing.yml deleted file mode 100644 index 8a18aaf..0000000 --- a/.github/workflows/test-existing.yml +++ /dev/null @@ -1,501 +0,0 @@ -name: Test Existing Examples - -# Runs all examples on a schedule to catch regressions from SDK updates, -# API changes, or broken dependencies — independent of any PR activity. - -on: - schedule: - - cron: '0 */6 * * *' # Every 6 hours - workflow_dispatch: - inputs: - reason: - description: 'Why are you triggering this manually?' - required: false - default: 'manual test' - -concurrency: - group: test-existing - cancel-in-progress: false - -permissions: - contents: write - pull-requests: write - issues: write - -jobs: - # ── Node.js ──────────────────────────────────────────────────────────────── - node: - runs-on: ubuntu-latest - outputs: - has_failures: ${{ steps.test.outputs.has_failures }} - failed_examples: ${{ steps.test.outputs.failed_examples }} - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 - with: - node-version: '20' - - - name: Test Node.js examples - id: test - continue-on-error: true - env: - DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }} - TWILIO_ACCOUNT_SID: ${{ secrets.TWILIO_ACCOUNT_SID }} - TWILIO_AUTH_TOKEN: ${{ secrets.TWILIO_AUTH_TOKEN }} - TWILIO_PHONE_NUMBER: ${{ secrets.TWILIO_PHONE_NUMBER }} - LIVEKIT_URL: ${{ secrets.LIVEKIT_URL }} - LIVEKIT_API_KEY: ${{ secrets.LIVEKIT_API_KEY }} - LIVEKIT_API_SECRET: ${{ secrets.LIVEKIT_API_SECRET }} - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - DISCORD_BOT_TOKEN: ${{ secrets.DISCORD_BOT_TOKEN }} - DISCORD_CLIENT_ID: ${{ secrets.DISCORD_CLIENT_ID }} - VONAGE_APPLICATION_ID: ${{ secrets.VONAGE_APPLICATION_ID }} - VONAGE_PRIVATE_KEY: ${{ secrets.VONAGE_PRIVATE_KEY }} - DAILY_API_KEY: ${{ secrets.DAILY_API_KEY }} - SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} - SLACK_APP_TOKEN: ${{ secrets.SLACK_APP_TOKEN }} - run: | - FAILED="" - for dir in examples/*/; do - [ ! -f "${dir}package.json" ] && continue - echo "Testing: $dir" - pushd "$dir" > /dev/null - - if [ -f ".env.example" ]; then - MISSING="" - while IFS= read -r line; do - [[ -z "${line// }" || "$line" == \#* ]] && continue - VAR_NAME="${line%%=*}"; VAR_NAME="${VAR_NAME// /}" - [ -z "$VAR_NAME" ] && continue - [ -z "${!VAR_NAME+x}" ] || [ -z "${!VAR_NAME}" ] && MISSING="$MISSING $VAR_NAME" - done < ".env.example" - if [ -n "$MISSING" ]; then - echo "⚠ Skipping $dir — missing secrets: $MISSING" - popd > /dev/null; continue - fi - fi - - if [ -f "package-lock.json" ]; then npm ci --prefer-offline 2>&1; else npm install 2>&1; fi - npm test 2>&1 || FAILED="$FAILED $dir" - popd > /dev/null - done - - [ -n "$FAILED" ] && echo "has_failures=true" >> $GITHUB_OUTPUT \ - || echo "has_failures=false" >> $GITHUB_OUTPUT - echo "failed_examples=${FAILED}" >> $GITHUB_OUTPUT - - # ── Python ───────────────────────────────────────────────────────────────── - python: - runs-on: ubuntu-latest - outputs: - has_failures: ${{ steps.test.outputs.has_failures }} - failed_examples: ${{ steps.test.outputs.failed_examples }} - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 - with: - python-version: '3.11' - - - name: Test Python examples - id: test - continue-on-error: true - env: - DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }} - TWILIO_ACCOUNT_SID: ${{ secrets.TWILIO_ACCOUNT_SID }} - TWILIO_AUTH_TOKEN: ${{ secrets.TWILIO_AUTH_TOKEN }} - TWILIO_PHONE_NUMBER: ${{ secrets.TWILIO_PHONE_NUMBER }} - LIVEKIT_URL: ${{ secrets.LIVEKIT_URL }} - LIVEKIT_API_KEY: ${{ secrets.LIVEKIT_API_KEY }} - LIVEKIT_API_SECRET: ${{ secrets.LIVEKIT_API_SECRET }} - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - DISCORD_BOT_TOKEN: ${{ secrets.DISCORD_BOT_TOKEN }} - DISCORD_CLIENT_ID: ${{ secrets.DISCORD_CLIENT_ID }} - VONAGE_APPLICATION_ID: ${{ secrets.VONAGE_APPLICATION_ID }} - VONAGE_PRIVATE_KEY: ${{ secrets.VONAGE_PRIVATE_KEY }} - DAILY_API_KEY: ${{ secrets.DAILY_API_KEY }} - TELEGRAM_BOT_TOKEN: ${{ secrets.TELEGRAM_BOT_TOKEN }} - run: | - FAILED="" - for dir in examples/*/; do - HAS_PY=false - [ -f "${dir}requirements.txt" ] && HAS_PY=true - [ -f "${dir}pyproject.toml" ] && HAS_PY=true - [ "$HAS_PY" = "false" ] && continue - echo "Testing: $dir" - pushd "$dir" > /dev/null - - if [ -f ".env.example" ]; then - MISSING="" - while IFS= read -r line; do - [[ -z "${line// }" || "$line" == \#* ]] && continue - VAR_NAME="${line%%=*}"; VAR_NAME="${VAR_NAME// /}" - [ -z "$VAR_NAME" ] && continue - [ -z "${!VAR_NAME+x}" ] || [ -z "${!VAR_NAME}" ] && MISSING="$MISSING $VAR_NAME" - done < ".env.example" - if [ -n "$MISSING" ]; then - echo "⚠ Skipping $dir — missing secrets: $MISSING" - popd > /dev/null; continue - fi - fi - - python -m pip install -q --upgrade pip - [ -f "requirements.txt" ] && pip install -q -r requirements.txt - [ -f "pyproject.toml" ] && pip install -q -e . - - TEST_RAN=false - if find tests/ -name "test_*.py" 2>/dev/null | grep -q .; then - pip install -q pytest - python -m pytest tests/ -v 2>&1 && TEST_RAN=true || { FAILED="$FAILED $dir"; TEST_RAN=true; } - fi - if [ "$TEST_RAN" = "false" ] && ls tests/*.py 2>/dev/null | head -1 | grep -q .; then - python "$(ls tests/*.py | head -1)" 2>&1 || FAILED="$FAILED $dir" - fi - popd > /dev/null - done - - [ -n "$FAILED" ] && echo "has_failures=true" >> $GITHUB_OUTPUT \ - || echo "has_failures=false" >> $GITHUB_OUTPUT - echo "failed_examples=${FAILED}" >> $GITHUB_OUTPUT - - # ── Go ───────────────────────────────────────────────────────────────────── - go: - runs-on: ubuntu-latest - outputs: - has_failures: ${{ steps.test.outputs.has_failures }} - failed_examples: ${{ steps.test.outputs.failed_examples }} - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - - uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5 - with: - go-version: '1.22' - - - name: Test Go examples - id: test - continue-on-error: true - env: - DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }} - TWILIO_ACCOUNT_SID: ${{ secrets.TWILIO_ACCOUNT_SID }} - TWILIO_AUTH_TOKEN: ${{ secrets.TWILIO_AUTH_TOKEN }} - TWILIO_PHONE_NUMBER: ${{ secrets.TWILIO_PHONE_NUMBER }} - LIVEKIT_URL: ${{ secrets.LIVEKIT_URL }} - LIVEKIT_API_KEY: ${{ secrets.LIVEKIT_API_KEY }} - LIVEKIT_API_SECRET: ${{ secrets.LIVEKIT_API_SECRET }} - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - DISCORD_BOT_TOKEN: ${{ secrets.DISCORD_BOT_TOKEN }} - DISCORD_CLIENT_ID: ${{ secrets.DISCORD_CLIENT_ID }} - VONAGE_APPLICATION_ID: ${{ secrets.VONAGE_APPLICATION_ID }} - VONAGE_PRIVATE_KEY: ${{ secrets.VONAGE_PRIVATE_KEY }} - DAILY_API_KEY: ${{ secrets.DAILY_API_KEY }} - run: | - FAILED="" - for dir in examples/*/; do - [ ! -f "${dir}go.mod" ] && continue - echo "Testing: $dir" - pushd "$dir" > /dev/null - - if [ -f ".env.example" ]; then - MISSING="" - while IFS= read -r line; do - [[ -z "${line// }" || "$line" == \#* ]] && continue - VAR_NAME="${line%%=*}"; VAR_NAME="${VAR_NAME// /}" - [ -z "$VAR_NAME" ] && continue - [ -z "${!VAR_NAME+x}" ] || [ -z "${!VAR_NAME}" ] && MISSING="$MISSING $VAR_NAME" - done < ".env.example" - if [ -n "$MISSING" ]; then - echo "⚠ Skipping $dir — missing secrets: $MISSING" - popd > /dev/null; continue - fi - fi - - go mod download - go test ./... -v -timeout 60s 2>&1 || FAILED="$FAILED $dir" - popd > /dev/null - done - - [ -n "$FAILED" ] && echo "has_failures=true" >> $GITHUB_OUTPUT \ - || echo "has_failures=false" >> $GITHUB_OUTPUT - echo "failed_examples=${FAILED}" >> $GITHUB_OUTPUT - - # ── Java ─────────────────────────────────────────────────────────────────── - java: - runs-on: ubuntu-latest - outputs: - has_failures: ${{ steps.test.outputs.has_failures }} - failed_examples: ${{ steps.test.outputs.failed_examples }} - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - - uses: actions/setup-java@c1e323688fd81a25caa38c78aa6df2d33d3e20d9 # v4 - with: - distribution: 'temurin' - java-version: '21' - - - name: Test Java examples - id: test - continue-on-error: true - env: - DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }} - TWILIO_ACCOUNT_SID: ${{ secrets.TWILIO_ACCOUNT_SID }} - TWILIO_AUTH_TOKEN: ${{ secrets.TWILIO_AUTH_TOKEN }} - TWILIO_PHONE_NUMBER: ${{ secrets.TWILIO_PHONE_NUMBER }} - LIVEKIT_URL: ${{ secrets.LIVEKIT_URL }} - LIVEKIT_API_KEY: ${{ secrets.LIVEKIT_API_KEY }} - LIVEKIT_API_SECRET: ${{ secrets.LIVEKIT_API_SECRET }} - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - DISCORD_BOT_TOKEN: ${{ secrets.DISCORD_BOT_TOKEN }} - DISCORD_CLIENT_ID: ${{ secrets.DISCORD_CLIENT_ID }} - VONAGE_APPLICATION_ID: ${{ secrets.VONAGE_APPLICATION_ID }} - VONAGE_PRIVATE_KEY: ${{ secrets.VONAGE_PRIVATE_KEY }} - DAILY_API_KEY: ${{ secrets.DAILY_API_KEY }} - run: | - FAILED="" - for dir in examples/*/; do - HAS_JAVA=false - [ -f "${dir}pom.xml" ] && HAS_JAVA=true - [ -f "${dir}build.gradle" ] && HAS_JAVA=true - [ "$HAS_JAVA" = "false" ] && continue - echo "Testing: $dir" - pushd "$dir" > /dev/null - if [ -f ".env.example" ]; then - MISSING="" - while IFS= read -r line; do - [[ -z "${line// }" || "$line" == \#* ]] && continue - VAR_NAME="${line%%=*}"; VAR_NAME="${VAR_NAME// /}" - [ -z "$VAR_NAME" ] && continue - [ -z "${!VAR_NAME+x}" ] || [ -z "${!VAR_NAME}" ] && MISSING="$MISSING $VAR_NAME" - done < ".env.example" - if [ -n "$MISSING" ]; then - echo "⚠ Skipping $dir — missing secrets: $MISSING" - popd > /dev/null; continue - fi - fi - if [ -f "pom.xml" ]; then - mvn test -q 2>&1 || FAILED="$FAILED $dir" - else - ./gradlew test 2>&1 || FAILED="$FAILED $dir" - fi - popd > /dev/null - done - [ -n "$FAILED" ] && echo "has_failures=true" >> $GITHUB_OUTPUT \ - || echo "has_failures=false" >> $GITHUB_OUTPUT - echo "failed_examples=${FAILED}" >> $GITHUB_OUTPUT - - # ── SDK version audit ─────────────────────────────────────────────────────── - # Checks every merged example for outdated Deepgram SDK pins. - # Uses the public GitHub releases API — no auth required. - # Major-version gaps are flagged (e.g. pinned v4 when latest is v6). - sdk-audit: - runs-on: ubuntu-latest - outputs: - outdated_examples: ${{ steps.scan.outputs.outdated }} - has_outdated: ${{ steps.scan.outputs.has_outdated }} - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - - - name: Fetch latest SDK versions - id: versions - run: | - latest_major() { - curl -sf "https://api.github.com/repos/deepgram/$1/releases/latest" \ - | jq -r '.tag_name // "0"' | tr -d 'v' | cut -d. -f1 - } - echo "python=$(latest_major deepgram-python-sdk)" >> $GITHUB_OUTPUT - echo "js=$(latest_major deepgram-js-sdk)" >> $GITHUB_OUTPUT - echo "go=$(latest_major deepgram-go-sdk)" >> $GITHUB_OUTPUT - echo "java=$(latest_major deepgram-java-sdk)" >> $GITHUB_OUTPUT - echo "rust=$(latest_major deepgram-rust-sdk)" >> $GITHUB_OUTPUT - echo "dotnet=$(latest_major deepgram-dotnet-sdk)" >> $GITHUB_OUTPUT - cat $GITHUB_OUTPUT - - - name: Scan examples for outdated SDK pins - id: scan - run: | - PY_LATEST="${{ steps.versions.outputs.python }}" - JS_LATEST="${{ steps.versions.outputs.js }}" - GO_LATEST="${{ steps.versions.outputs.go }}" - JAVA_LATEST="${{ steps.versions.outputs.java }}" - RUST_LATEST="${{ steps.versions.outputs.rust }}" - DOTNET_LATEST="${{ steps.versions.outputs.dotnet }}" - - OUTDATED="" - - for dir in examples/*/; do - [ ! -d "$dir" ] && continue - - # Python — deepgram-sdk==X or >=X - if [ -f "${dir}requirements.txt" ]; then - PINNED=$(grep -oE 'deepgram-sdk[^,\n]*' "${dir}requirements.txt" \ - | grep -oE '[0-9]+' | head -1 || true) - if [ -n "$PINNED" ] && [ "$PINNED" -lt "$PY_LATEST" ] 2>/dev/null; then - echo "OUTDATED Python $dir: pinned major=$PINNED latest=$PY_LATEST" - OUTDATED="$OUTDATED $dir" - fi - fi - - # JavaScript/TypeScript — @deepgram/sdk: "^X.y.z" or "X.y.z" - if [ -f "${dir}package.json" ]; then - PINNED=$(jq -r ' - ((.dependencies // {}) + (.devDependencies // {}))["@deepgram/sdk"] - // empty' "${dir}package.json" \ - | grep -oE '[0-9]+' | head -1 || true) - if [ -n "$PINNED" ] && [ "$PINNED" -lt "$JS_LATEST" ] 2>/dev/null; then - echo "OUTDATED JS $dir: pinned major=$PINNED latest=$JS_LATEST" - OUTDATED="$OUTDATED $dir" - fi - fi - - # Go — github.com/deepgram/deepgram-go-sdk/vX - if [ -f "${dir}go.mod" ]; then - PINNED=$(grep -oE 'deepgram-go-sdk/v[0-9]+' "${dir}go.mod" \ - | grep -oE '[0-9]+' | head -1 || true) - if [ -n "$PINNED" ] && [ "$PINNED" -lt "$GO_LATEST" ] 2>/dev/null; then - echo "OUTDATED Go $dir: pinned major=v$PINNED latest=v$GO_LATEST" - OUTDATED="$OUTDATED $dir" - fi - fi - done - - OUTDATED=$(echo "$OUTDATED" | xargs | tr ' ' '\n' | sort -u | xargs) - if [ -n "$OUTDATED" ]; then - echo "has_outdated=true" >> $GITHUB_OUTPUT - echo "outdated=$OUTDATED" >> $GITHUB_OUTPUT - echo "Examples with outdated SDKs: $OUTDATED" - else - echo "has_outdated=false" >> $GITHUB_OUTPUT - echo "outdated=" >> $GITHUB_OUTPUT - echo "All SDK pins are current" - fi - - # ── Report failures + fix (one at a time) ────────────────────────────────── - # Pick the first failing or outdated example that has no open fix PR. - # Test failures take priority over SDK version upgrades. - report: - needs: [node, python, go, java, sdk-audit] - if: always() - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - with: - fetch-depth: 0 - - - name: Configure git - run: | - git config user.name "examples-bot" - git config user.email "noreply@deepgram.com" - - - name: Get date - id: date - run: echo "date=$(date -u +%Y-%m-%d)" >> $GITHUB_OUTPUT - - - name: Fetch latest Deepgram SDK versions - id: sdk - run: | - latest() { curl -sf "https://api.github.com/repos/deepgram/$1/releases/latest" | jq -r '.tag_name // "unknown"'; } - echo "python=$(latest deepgram-python-sdk)" >> $GITHUB_OUTPUT - echo "js=$(latest deepgram-js-sdk)" >> $GITHUB_OUTPUT - echo "go=$(latest deepgram-go-sdk)" >> $GITHUB_OUTPUT - - - name: Pick first unaddressed failure - id: collect - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - # Test failures first (higher priority), then outdated SDK pins - ALL_FAILED="" - [ "${{ needs.node.outputs.has_failures }}" = "true" ] && \ - ALL_FAILED="$ALL_FAILED ${{ needs.node.outputs.failed_examples }}" - [ "${{ needs.python.outputs.has_failures }}" = "true" ] && \ - ALL_FAILED="$ALL_FAILED ${{ needs.python.outputs.failed_examples }}" - [ "${{ needs.go.outputs.has_failures }}" = "true" ] && \ - ALL_FAILED="$ALL_FAILED ${{ needs.go.outputs.failed_examples }}" - [ "${{ needs.java.outputs.has_failures }}" = "true" ] && \ - ALL_FAILED="$ALL_FAILED ${{ needs.java.outputs.failed_examples }}" - # Append outdated-SDK examples after test failures (lower priority) - [ "${{ needs.sdk-audit.outputs.has_outdated }}" = "true" ] && \ - ALL_FAILED="$ALL_FAILED ${{ needs.sdk-audit.outputs.outdated_examples }}" - ALL_FAILED=$(echo "$ALL_FAILED" | xargs | tr ' ' '\n' | sort -u | xargs) - - TARGET="" - TARGET_SLUG="" - for EXAMPLE in $ALL_FAILED; do - SLUG=$(basename "${EXAMPLE%/}") - # Skip if a fix PR is already open for this example - OPEN=$(gh pr list --repo ${{ github.repository }} --state open \ - --search "$SLUG" --json number --jq 'length' 2>/dev/null || echo "0") - if [ "$OPEN" = "0" ]; then - TARGET="$EXAMPLE" - TARGET_SLUG="$SLUG" - echo "Selected: $EXAMPLE (no open fix PR)" - break - else - echo "Skipping $EXAMPLE — fix PR already open" - fi - done - - if [ -n "$TARGET" ]; then - echo "has_target=true" >> $GITHUB_OUTPUT - echo "target=$TARGET" >> $GITHUB_OUTPUT - echo "slug=$TARGET_SLUG" >> $GITHUB_OUTPUT - else - echo "has_target=false" >> $GITHUB_OUTPUT - [ -n "$ALL_FAILED" ] && echo "All failures already have open fix PRs — nothing to do" - fi - - - name: Open issue for this failure - if: steps.collect.outputs.has_target == 'true' - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - SLUG="${{ steps.collect.outputs.slug }}" - RUN_URL="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" - - EXISTING=$(gh issue list --repo ${{ github.repository }} \ - --label "queue:fix-example" --state open \ - --search "[Regression] $SLUG" \ - --json number --jq '.[0].number') - - if [ -n "$EXISTING" ]; then - echo "Issue already open for $SLUG (#$EXISTING)" - else - gh issue create \ - --repo ${{ github.repository }} \ - --title "[Regression] $SLUG — tests failing" \ - --label "queue:fix-example" \ - --label "type:fix" \ - --body "Regression in \`${{ steps.collect.outputs.target }}\`. Run: $RUN_URL" - echo "Created issue for $SLUG" - fi - - - name: Run fix agent for this failure - if: steps.collect.outputs.has_target == 'true' - uses: anthropics/claude-code-action@b47fd721da662d48c5680e154ad16a73ed74d2e0 # v1.0.93 - env: - KAPA_API_KEY: ${{ secrets.KAPA_API_KEY }} - KAPA_PROJECT_ID: 1908afc6-c134-4c6f-a684-ed7d8ce91759 - with: - anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} - github_token: ${{ secrets.GITHUB_TOKEN }} - mode: agent - model: claude-opus-4-6 - allowed_tools: "Bash,Read,Write,Edit,Glob,Grep,WebSearch,WebFetch" - direct_prompt: | - A merged example needs attention — either a test regression or an outdated SDK pin. - Example: ${{ steps.collect.outputs.target }} - - 1. Read the example's source, tests, and dependency files - 2. Check if the issue is a test failure, an outdated SDK version, or both - 3. Search Kapa for current correct SDK usage (instructions/kapa-search.md) - 4. Fix the code and upgrade SDK pins to the required versions: - - Python: deepgram-sdk==${{ steps.sdk.outputs.python }} - - JavaScript: @deepgram/sdk@${{ steps.sdk.outputs.js }} - - Go: ${{ steps.sdk.outputs.go }} - 5. Open or update a PR — one PR per example, additive: - - Check for an existing open PR whose branch starts with fix/${{ steps.collect.outputs.slug }}: - EXISTING=$(gh pr list --state open --search "fix ${{ steps.collect.outputs.slug }}" --json number,headRefName --jq '.[0]') - - If one exists: check out its branch, push the fix there (no new PR) - - If none exists: create branch fix/${{ steps.collect.outputs.slug }}-regression-${{ steps.date.outputs.date }}, - commit, push, and open a new PR with label type:fix - - Title: [Fix] {NNN}-${{ steps.collect.outputs.slug }} — {brief description} - - Do NOT enable auto-merge — PRs wait for human review and merge - - Today's date: ${{ steps.date.outputs.date }} - Repository: ${{ github.repository }} diff --git a/.github/workflows/vp.yml b/.github/workflows/vp.yml deleted file mode 100644 index 3c2fd29..0000000 --- a/.github/workflows/vp.yml +++ /dev/null @@ -1,62 +0,0 @@ -name: VP — Unstick Pipeline - -# Supervisory role — runs periodically to find anything stuck in the pipeline -# and get it moving. Re-triggers the appropriate agent workflow, or escalates -# to @deepgram/devrel if it cannot resolve the blockage. -# -# A "stuck" item is one where the responsible workflow missed its event trigger -# (GITHUB_TOKEN limitation) or hit a silent failure. - -on: - schedule: - - cron: '17 */4 * * *' # Every 4 hours at :17 - workflow_dispatch: - -concurrency: - group: vp - cancel-in-progress: false - -permissions: - contents: write - pull-requests: write - issues: write - actions: write # Needed to trigger other workflows via gh workflow run - -jobs: - run: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - with: - fetch-depth: 0 - - - name: Configure git - run: | - git config user.name "examples-bot" - git config user.email "noreply@deepgram.com" - - - name: Get date - id: date - run: echo "date=$(date -u +%Y-%m-%d)" >> $GITHUB_OUTPUT - - - name: Run VP instruction - uses: anthropics/claude-code-action@b47fd721da662d48c5680e154ad16a73ed74d2e0 # v1.0.93 - env: - KAPA_API_KEY: ${{ secrets.KAPA_API_KEY }} - KAPA_PROJECT_ID: ${{ vars.KAPA_PROJECT_ID }} - GITHUB_REPOSITORY: ${{ github.repository }} - with: - anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} - github_token: ${{ secrets.GITHUB_TOKEN }} - mode: agent - model: claude-opus-4-6 - allowed_tools: "Bash,Read,Glob,Grep" - timeout_minutes: 20 - direct_prompt: | - Read and execute instructions/vp.md. - - Context: - - Today's date: ${{ steps.date.outputs.date }} - - Repository: ${{ github.repository }} - - Run ID: ${{ github.run_id }} - - Server URL: ${{ github.server_url }} diff --git a/README.md b/README.md index e83f6f6..d205a0b 100644 --- a/README.md +++ b/README.md @@ -74,13 +74,21 @@ Every PR that touches `examples/**` runs language-specific test jobs automatical All examples are also tested on a recurring schedule to catch regressions from SDK updates or API changes. +## Automated example building + +Suggesting a new integration is as simple as [opening an issue](../../issues/new/choose). From there, a GitHub Actions workflow handles the build: an LLM agent (Claude Opus) writes the code, runs it in a Docker sandbox, diagnoses failures, and iterates until tests pass — then opens a PR for human review before anything merges. The workflow also sweeps open suggestions on a schedule, so nothing sits unbuilt indefinitely. + +The agent uses a **neurosymbolic architecture** to stay on track. A symbolic working memory records observed facts as tools fire — which files exist, whether tests are passing, which phases are complete — without the LLM being able to misreport them. A forward-chaining rule engine pattern-matches tool output each turn and injects targeted guidance when things go wrong: missing imports get named, repeated failing commands trigger a strategy nudge, turn-budget warnings reprioritise the remaining work. And a constraint checker holds a formal veto over completion: when the agent signals it's done, the checker verifies the definition of done deterministically — required files exist, test directories are non-empty, no hardcoded secrets — and rejects the signal if anything is missing. The LLM handles open-ended reasoning and generation; the symbolic layer handles state, rules, and verification. + ## Directory structure ``` examples/ {NNN}-{slug}/ # e.g. 010-getting-started-node - README.md # What it does, prerequisites, env vars, how to run + README.md # Quickstart — what it does, prerequisites, env vars, how to run + BLOG.md # Developer walkthrough — how it was built, step by step .env.example # Every required environment variable (no values) + screenshot.png # 1240×760 Playwright screenshot (UI/terminal examples) src/ # Source code tests/ # Tests — exit 0=pass, 1=fail, 2=missing credentials @@ -89,6 +97,7 @@ tests/ .github/ workflows/ # CI workflows + scripts/ # Agent scripts (plan, build, filter secrets, symbolic layer) ISSUE_TEMPLATE/ # Issue templates ``` diff --git a/instructions/engineer.md b/instructions/engineer.md deleted file mode 100644 index 6de22cc..0000000 --- a/instructions/engineer.md +++ /dev/null @@ -1,730 +0,0 @@ -# Instruction: Engineer — Build an Example - -> ⛔ **HARD RULE: Never create, edit, or delete any file under `.github/`.** -> Only modify files under `examples/` and `instructions/`. - -> ⛔ **HARD RULE: Every example MUST use Deepgram directly or through a partner's tooling/API.** -> This means Deepgram STT, TTS, Voice Agents, or Audio Intelligence must be demonstrably called — -> either via the Deepgram SDK, or via a partner integration that routes audio through Deepgram -> (e.g. LiveKit → Deepgram, Pipecat → Deepgram, Twilio → Deepgram WebSocket). -> An example that merely mentions Deepgram or uses a competing speech provider is NOT acceptable. - -> ⛔ **HARD RULE: Use the partner's interface for partner integrations — never bypass it with the Deepgram SDK.** -> -> The point of a partner integration example is to show the partner's interface working with Deepgram. -> If the partner provides an SDK or interface that wraps Deepgram, you MUST route all audio/speech -> calls through that partner interface — NOT directly through the Deepgram SDK. -> -> **Examples of correct vs incorrect:** -> - Vercel AI SDK integration → use `@ai-sdk/deepgram` through the AI SDK, NOT `new DeepgramClient()` -> - LangChain integration → use the LangChain Deepgram tool/loader, NOT `new DeepgramClient()` -> - LiveKit integration → use `livekit-plugins-deepgram`, NOT `new DeepgramClient()` alongside LiveKit -> - Pipecat integration → use `pipecat-ai[deepgram]`, NOT a separate `DeepgramClient()` call -> -> **Use the Deepgram SDK directly ONLY when:** -> - The example is a plain Deepgram SDK demo (no partner) -> - The partner has no STT/TTS interface and you are piping raw audio to Deepgram (e.g. Twilio → Deepgram WebSocket) -> -> **Never use raw `ws`, `fetch`, or `http` for audio calls.** If no SDK exists for the layer you need, -> use the Deepgram SDK. Raw protocol calls are only acceptable for the partner's own signalling/control -> plane (e.g. a Twilio TwiML webhook response), not for audio transcription or synthesis. -> -> **Tests must exercise the partner interface**, not call `new DeepgramClient()` directly. A test that -> bypasses the partner and hits the Deepgram SDK alone is not a test of the integration. - -You are the Engineer. You build full, working integration examples. Each PR is one -`examples/{NNN}-{slug}/` directory. The Researcher has already gathered platform -context — read their comment before writing code. - -## Kapa Search Helper - -```bash -kapa_search() { - local query="$1" - curl -s -L "https://api.kapa.ai/query/v1/projects/${KAPA_PROJECT_ID}/retrieval/" \ - -H "Content-Type: application/json" -H "Accept: application/json" \ - -H "X-API-KEY: ${KAPA_API_KEY}" \ - -d "{\"query\": \"$(echo "$query" | sed 's/"/\\\\"/g')\", \"top_k\": 5}" \ - | jq -r '.sources | sort_by(.updated_at) | reverse | .[:3][] | "--- " + .title + " ---\n" + .content' 2>/dev/null -} -``` - ---- - -## Step 1: Find the queue issue to build - -User-submitted suggestions take priority over bot-queued examples. Check in order: - -```bash -# 1. First: user-submitted suggestions (priority:user label) -USER_ISSUE=$(gh issue list \ - --label "queue:new-example,action:generate,priority:user" \ - --state open \ - --json number,title,body,labels,comments \ - --jq 'sort_by(.createdAt) | .[0]') - -# 2. Fallback: regular bot-queued examples (no priority:user) -BOT_ISSUE=$(gh issue list \ - --label "queue:new-example" \ - --state open \ - --json number,title,body,labels,comments \ - --jq '[.[] | select( - (.labels | map(.name) | any(. == "action:generate" or . == "action:research")) and - (.labels | map(.name) | contains(["priority:user"]) | not) - )] | sort_by(.createdAt) | .[0]') - -ISSUE=$([ -n "$USER_ISSUE" ] && [ "$USER_ISSUE" != "null" ] && echo "$USER_ISSUE" || echo "$BOT_ISSUE") -``` - -If none found, stop. - -If the issue has `action:research` (not yet researched), do the research yourself via Kapa before building: -```bash -kapa_search "deepgram {platform} SDK integration {language} example" -kapa_search "{specific SDK} live transcription {language}" -``` - -Read any existing researcher comment (starts with "## 🔬 Research findings") if present. -Advance the issue to generate when you start: remove `action:research`, add `action:generate`. - ---- - -## Step 2: Find the next example number - -Must account for both merged examples AND open PRs. - -```bash -MERGED_NUMS=$(ls examples/ | grep -oE '^[0-9]+' | sort -n) -PR_NUMS=$(gh pr list --state open --json title \ - --jq '.[].title | capture("^\\[(?:Example|Fix)\\] (?P[0-9]+)") | .n' \ - 2>/dev/null | sort -n) -ALL_NUMS=$(printf '%s\n' $MERGED_NUMS $PR_NUMS | sort -n | uniq) - -# New platform: next free multiple of 10 -LAST_ROUND=$(echo "$ALL_NUMS" | grep -E '^[0-9]+0$' | tail -1) -NEXT=$(printf "%03d" $(( ${LAST_ROUND:-0} + 10 ))) -echo "Next slot: $NEXT" -echo "Taken: $ALL_NUMS" -``` - -Platforms own a number group (020 = Twilio, 030 = LiveKit). A second example -for the same platform gets the next sub-number (031, 032...). - ---- - -## Step 3: Verify Deepgram SDK patterns via Kapa - -Before writing any code, confirm the exact SDK method you'll use: - -```bash -kapa_search "deepgram {product} {language} SDK method example" -kapa_search "{specific method} options parameters response" -``` - -**Never guess API signatures.** Use exactly what Kapa returns. - ---- - -## Step 4: Create the branch and directory - -```bash -SLUG="{integration}-{language}" -BRANCH="example/${NEXT}-${SLUG}" -EXAMPLE_DIR="examples/${NEXT}-${SLUG}" - -git checkout -b "$BRANCH" -mkdir -p "${EXAMPLE_DIR}/src" "${EXAMPLE_DIR}/tests" -``` - ---- - -## Step 5: Write the example - -### Required files - -#### `.env.example` -List every required variable, no values: -``` -# Deepgram — https://console.deepgram.com/ -DEEPGRAM_API_KEY= - -# {Platform} — {link to console} -{PLATFORM_VAR}= -``` - -#### Source code in `src/` - -- **Use the official Deepgram SDK** — never raw HTTP calls -- Read credentials from environment (never hardcode) -- Keep focused: one integration point, one clear use case -- Comment WHY, not WHAT (see commenting standard below) - -**SDK v5 patterns (verify with Kapa):** - -> ⛔ **Every Deepgram API call MUST include `tag: "deepgram-examples"` (JS) or -> `tag="deepgram-examples"` (Python).** This tags usage in the Deepgram console so -> internal test traffic is identifiable. No spaces in the tag value. - -```javascript -// Node.js — DeepgramClient (not createClient) -const { DeepgramClient } = require('@deepgram/sdk'); -const client = new DeepgramClient({ apiKey: process.env.DEEPGRAM_API_KEY }); - -// Pre-recorded: flat options, throws on error -const data = await client.listen.v1.media.transcribeUrl( - { url }, - { model: 'nova-3', tag: 'deepgram-examples' } // ← tag is REQUIRED on every call -); - -// Live WebSocket -const conn = await client.listen.v1.connect({ - model: 'nova-3', encoding: 'mulaw', sample_rate: 8000, - tag: 'deepgram-examples', // ← tag is REQUIRED on every call -}); -conn.on('open', () => { /* connected */ }); -conn.sendMedia(audioBuffer); -conn.sendCloseStream({ type: 'CloseStream' }); -conn.close(); -``` - -```python -# Python — DeepgramClient() reads DEEPGRAM_API_KEY from env -from deepgram import DeepgramClient -client = DeepgramClient() -# tag="deepgram-examples" is REQUIRED on every Deepgram API call -response = client.listen.v1.media.transcribe_url( - url=AUDIO_URL, model='nova-3', tag='deepgram-examples' -) -``` - -**Commenting standard:** -1. WHY this approach (not just what it does) -2. Feature-enabling parameter marked with `# ← THIS enables X` -3. Response path explained: `# data.results.channels[0].alternatives[0].transcript` -4. SDK version gotchas called out explicitly -5. Alternative options listed in comments near API calls - -#### Tests in `tests/` - -**Exit code convention:** -- `0` — tests passed -- `1` — real failure (code bug, assertion error) -- `2` — missing credentials (expected; CI handles gracefully) - -> ⛔ **Tests MUST exercise the example's own src/ code — not just the Deepgram SDK.** -> Creating a standalone `DeepgramClient()` in a test and calling `transcribeUrl()` directly -> is NOT a test of the example. It is a test that Deepgram's API works. Tests must import -> from `src/` and call the actual functions, endpoints, or classes the example provides. - -**How to structure tests by example type:** - -``` -REST API (FastAPI, Express, NestJS, Django, etc.) - → Spin up the actual server in-process using TestClient / supertest / httpx - → Make real HTTP requests to the example's endpoints - → Assert on the response shape and content - -WebSocket server (Twilio, Vonage, LiveKit bridge, etc.) - → Import createApp() or equivalent from src/ - → Start the server in-process - → Connect a WebSocket client and stream test audio - → Assert the server receives transcripts and handles them correctly - -Library / tool (LangChain tool, LlamaIndex loader, CrewAI agent, etc.) - → Import the function/class from src/ - → Call it with real inputs - → Assert on the output - -Bot (Discord, Slack, Telegram, WhatsApp, etc.) - → The bot's core logic MUST be exported as testable functions from src/ - e.g. export processAudio(buffer), handleMessage(msg), transcribeAttachment(url) - → Tests import and call those exported functions - → Do NOT rely solely on testing that the bot client initialises — test what it does - -CLI / script - → If the script is just a wrapper, refactor it to export a main() function - → Test calls main() with a known audio URL/file and asserts on the output - -Desktop / mobile (Electron, Tauri, React Native, Swift, Kotlin) - → Test the backend/server portions that CAN run headlessly - → Test all helper functions in src/ that don't require a running UI - → File structure and syntax checks are acceptable supplements, not replacements -``` - -**What is NEVER acceptable:** -- A test that creates `new DeepgramClient()` itself and calls SDK methods without going through src/ -- A test that only checks `require('../src/...')` doesn't throw (import-only test) -- A test that only checks third-party dependencies import cleanly - -```javascript -// Node.js test template -'use strict'; -const fs = require('fs'), path = require('path'); - -// ── Credential check — MUST be first ────────────────────────────────────── -const required = fs.readFileSync(path.join(__dirname,'..', '.env.example'), 'utf8') - .split('\n').filter(l => /^[A-Z][A-Z0-9_]+=/.test(l.trim())).map(l => l.split('=')[0].trim()); -const missing = required.filter(k => !process.env[k]); -if (missing.length > 0) { - console.error(`MISSING_CREDENTIALS: ${missing.join(',')}`); - process.exit(2); -} -// ────────────────────────────────────────────────────────────────────────── - -// ... real assertions using actual API calls ... -``` - -**Asserting transcription results — never check for specific words:** - -Transcription is non-deterministic. Do NOT assert that the output contains -specific words like `['spacewalk', 'astronaut']`. Instead, assert on structure -and proportionality: - -```javascript -// ✅ Good — proportional to audio sent -const audioSentSecs = bytesSent / (sampleRate * 2); // 16-bit mono -const minChars = Math.max(5, audioSentSecs * 2); // ≥2 chars/sec -assert(transcript.trim().length >= minChars, - `Transcript too short: ${transcript.length} chars for ${audioSentSecs}s`); - -// ✅ Good — structural checks -assert(result.metadata.duration > 0, 'metadata.duration should be positive'); -assert(result.results.channels[0].alternatives[0].words.length > 0, 'should have words'); -const lastWord = words[words.length - 1]; -assert(lastWord.end <= audioSentSecs + 2, 'word timestamps should not exceed audio duration'); - -// ❌ Bad — non-deterministic, will flake -const found = ['spacewalk','astronaut','nasa'].filter(w => transcript.includes(w)); -assert(found.length > 0); -``` - -```python -# Python equivalent -audio_sent_secs = bytes_sent / (sample_rate * 2) -min_chars = max(5, audio_sent_secs * 2) -assert len(transcript.strip()) >= min_chars, f"Transcript too short for {audio_sent_secs}s of audio" -assert response['metadata']['duration'] > 0 -``` - -```python -# Python test template -import os, sys -from pathlib import Path - -# ── Credential check ─────────────────────────────────────────────────────── -required = [l.split('=')[0].strip() for l in Path('../.env.example').read_text().splitlines() - if l.strip() and not l.startswith('#') and '=' in l] -missing = [k for k in required if not os.environ.get(k)] -if missing: - print(f"MISSING_CREDENTIALS: {','.join(missing)}", file=sys.stderr); sys.exit(2) -# ────────────────────────────────────────────────────────────────────────── - -# ... real assertions using actual API calls ... -``` - -#### `README.md` - -```markdown -# {Title} - -{2-3 sentences describing what this example demonstrates and why it's useful.} - -## What you'll build - -{Concrete end result: "A Node.js server that transcribes incoming Twilio calls in real-time..."} - -## Prerequisites - -- {Runtime and version} -- Deepgram account — [get a free API key](https://console.deepgram.com/) -- {Platform} account — [sign up]({url}) - -## Environment variables - -| Variable | Where to find it | -|----------|-----------------| -| `DEEPGRAM_API_KEY` | [Deepgram console](https://console.deepgram.com/) | -| `{PLATFORM_VAR}` | {exact path in platform dashboard} | - -## Install and run - -\`\`\`bash -{exact commands} -\`\`\` - -## Key parameters - -| Parameter | Value | Description | -|-----------|-------|-------------| -| `model` | `nova-3` | {what this controls} | - -## How it works - -{Step-by-step: what happens when the code runs} - -## Starter templates - -[deepgram-starters](https://github.com/orgs/deepgram-starters/repositories) -``` - ---- - -## Supply-chain security — required for every example - -Examples are public code that users clone and run directly. Follow these rules for every -new example — they protect users from compromised or malicious dependencies. - -### Node.js (pnpm / bun / deno) - -**`package.json`** — exact versions only, no `^` or `~`; pin the package manager itself: -```json -{ - "packageManager": "pnpm@9.6.0", - "dependencies": { - "@deepgram/sdk": "3.9.0", - "express": "4.21.2" - } -} -``` - -**`.npmrc`** in the example root — prevents accidental range saves: -``` -save-exact=true -``` - -Commit `pnpm-lock.yaml` (or `bun.lockb` / `deno.lock`). Run before pushing: -```bash -pnpm audit --audit-level=high # or: bun audit / deno audit -``` - -### Python - -**`requirements.txt`** — `==` pins only, never `>=` or `~=`: -``` -deepgram-sdk==3.10.0 -fastapi==0.115.6 -uvicorn==0.34.0 -``` - -For examples with more than 3 dependencies, use **pip-tools hash pinning**: -```bash -# requirements.in — unpinned names only -deepgram-sdk -fastapi -uvicorn - -# Generate requirements.txt with per-package sha256 hashes: -pip install pip-tools -pip-compile --generate-hashes requirements.in - -# Install with hash verification (use this command in the README too): -pip install --require-hashes -r requirements.txt -``` - -Run before pushing: -```bash -pip install pip-audit -pip-audit -r requirements.txt -``` - -### Go - -Commit both `go.mod` and `go.sum`. `go.sum` contains cryptographic checksums for every -downloaded module — Go's built-in integrity guarantee. - -```bash -go mod tidy # prune unused deps, update go.sum -go mod verify # re-verify local cache against go.sum checksums -``` - -### Java - -**Maven `pom.xml`** — exact versions only, never version ranges: -```xml - -3.4.0 - - -[3.0,4.0) -``` - -**Gradle** — exact versions, and commit generated verification metadata: -```groovy -// build.gradle — exact version, no dynamic selectors -implementation 'com.deepgram:deepgram-java-sdk:3.4.0' -``` -```bash -# Generates gradle/verification-metadata.xml — commit this file: -./gradlew --write-verification-metadata sha256 -``` - -### Rust - -Commit `Cargo.lock`. Use the `=` prefix for exact SemVer pinning in `Cargo.toml`: -```toml -[dependencies] -deepgram = "=0.6.0" # = means exact; without it Cargo allows patch-level drift -``` - -Run before pushing: -```bash -cargo install cargo-audit -cargo audit -``` - -### Dart / Flutter - -**`pubspec.yaml`** — exact versions only, no `^` or `>=`: -```yaml -dependencies: - record: 5.2.0 # ← no ^ prefix - http: 1.4.0 - flutter_dotenv: 5.2.0 - path_provider: 2.1.5 - permission_handler: 11.4.0 -``` - -Commit `pubspec.lock` — generated by `flutter pub get`. Without it, users get whatever resolves at install time. - -Run before pushing: -```bash -flutter pub get -dart pub audit # or: flutter pub outdated --json | check for security advisories -``` - -### Kotlin / Android (Gradle) - -**`build.gradle.kts`** — exact versions on all non-BOM dependencies: -```kotlin -dependencies { - implementation("com.deepgram:deepgram-java-sdk:0.2.0") // exact - implementation(platform("androidx.compose:compose-bom:2024.12.01")) // BOM — ok - implementation("androidx.activity:activity-compose:1.9.3") // exact -} -``` - -Enable Gradle dependency locking in the root `build.gradle.kts`: -```kotlin -allprojects { - dependencyLocking { - lockAllConfigurations() - } -} -``` - -Generate and commit lock files: -```bash -./gradlew dependencies --write-locks -# Commits: gradle/dependency-locks/*.lockfile -``` - -### .NET - -Enable the NuGet lock file — add to every `*.csproj`: -```xml - - true - -``` - -Exact versions in all package references — no floating wildcards: -```xml - -``` - -Commit `packages.lock.json`. Run before pushing: -```bash -dotnet list package --vulnerable -``` - ---- - -## Step 5.5: Audit, install, test — fix until passing - -All runtimes and credentials are available. **Never run install before auditing.** -Iterate through the audit → install → test cycle, fixing issues each round, up to **3 attempts total**. -Only proceed to Step 6 when both audit and tests are clean. - -```bash -cd "$EXAMPLE_DIR" - -# Check credentials -MISSING="" -if [ -f ".env.example" ]; then - while IFS= read -r line; do - [[ -z "${line// }" || "$line" == \#* ]] && continue - VAR="${line%%=*}"; VAR="${VAR// /}" - [ -z "$VAR" ] && continue - [ -z "${!VAR+x}" ] || [ -z "${!VAR}" ] && MISSING="$MISSING $VAR" - done < ".env.example" -fi - -TEST_OUTPUT="" -TEST_PASSED=false -ATTEMPT=0 -MAX_ATTEMPTS=3 - -if [ -n "$MISSING" ]; then - TEST_OUTPUT="⏳ Missing credentials: $MISSING — cannot verify tests in CI" -else - while [ $ATTEMPT -lt $MAX_ATTEMPTS ]; do - ATTEMPT=$((ATTEMPT + 1)) - echo "── Attempt $ATTEMPT/$MAX_ATTEMPTS ──" - - # ── 1. AUDIT — always before install ─────────────────────────────────── - AUDIT_OK=true - if [ -f "pnpm-lock.yaml" ]; then - pnpm audit --audit-level=high 2>&1 || AUDIT_OK=false - elif [ -f "bun.lockb" ] || [ -f "bun.lock" ]; then - bun audit 2>&1 || AUDIT_OK=false - elif [ -f "requirements.txt" ]; then - pip-audit -r requirements.txt 2>&1 || AUDIT_OK=false - elif [ -f "Cargo.toml" ]; then - cargo audit 2>&1 || AUDIT_OK=false - elif [ -f "go.mod" ]; then - go mod verify 2>&1 || AUDIT_OK=false - fi - - if [ "$AUDIT_OK" = "false" ]; then - echo "⚠ Audit failed — fixing vulnerable dependencies before continuing" - # Fix: identify and update the vulnerable packages to safe versions, - # regenerate the lockfile, then loop back to re-audit. - if [ $ATTEMPT -ge $MAX_ATTEMPTS ]; then - TEST_OUTPUT="Audit still failing after $MAX_ATTEMPTS attempts — fix vulnerable deps" - break - fi - continue - fi - - # ── 2. INSTALL — only after audit passes ─────────────────────────────── - if [ -f "pnpm-lock.yaml" ]; then - pnpm install --frozen-lockfile - elif [ -f "bun.lockb" ] || [ -f "bun.lock" ]; then - bun install --frozen-lockfile - elif [ -f "deno.json" ] || [ -f "deno.jsonc" ]; then - : # deno fetches on demand, no separate install step - elif [ -f "requirements.txt" ]; then - pip install -q -r requirements.txt - pip install -q pytest - elif [ -f "go.mod" ]; then - go mod download - elif [ -f "Cargo.toml" ]; then - : # cargo test fetches on demand - else - echo "ERROR: No supported lockfile found. Node.js examples must use pnpm, bun, or deno — not npm or yarn." - break - fi - - # ── 3. TEST ──────────────────────────────────────────────────────────── - if [ -f "pnpm-lock.yaml" ]; then - TEST_OUTPUT=$(pnpm test 2>&1) && TEST_PASSED=true && break - elif [ -f "bun.lockb" ] || [ -f "bun.lock" ]; then - TEST_OUTPUT=$(bun test 2>&1) && TEST_PASSED=true && break - elif [ -f "deno.json" ] || [ -f "deno.jsonc" ]; then - TEST_OUTPUT=$(deno test 2>&1) && TEST_PASSED=true && break - elif [ -f "requirements.txt" ]; then - if find tests/ -name "test_*.py" 2>/dev/null | grep -q .; then - TEST_OUTPUT=$(python -m pytest tests/ -v 2>&1) && TEST_PASSED=true && break - else - TEST_OUTPUT=$(python "$(ls tests/*.py | head -1)" 2>&1) && TEST_PASSED=true && break - fi - elif [ -f "go.mod" ]; then - TEST_OUTPUT=$(go test ./... -v 2>&1) && TEST_PASSED=true && break - elif [ -f "Cargo.toml" ]; then - TEST_OUTPUT=$(cargo test 2>&1) && TEST_PASSED=true && break - fi - - echo "⚠ Tests failed — fixing code issues before re-running" - [ $ATTEMPT -ge $MAX_ATTEMPTS ] && { TEST_OUTPUT="Tests still failing after $MAX_ATTEMPTS attempts"; break; } - # Fix: read TEST_OUTPUT, identify the failing assertion or error, edit src/ or tests/, then loop. - done -fi - -cd - -``` - -**Fix cycle rules:** -- Audit failure → update the specific vulnerable package to a safe version, regenerate the lockfile, re-audit. Do not update unrelated packages. -- Test failure → read the full `TEST_OUTPUT`, edit the minimal code needed to fix the assertion, re-test. -- After 3 failed attempts → stop and document the blocker in the PR; do not open a PR with a clean test status when tests actually failed. -- Never downgrade the audit level to make a failure disappear. - -Include `$TEST_OUTPUT` in the PR body under a "## Tests" section. -Do NOT include any line from the output that contains a credential value. - -## Step 6: Commit and open PR - -```bash -gh label create "type:example" --color "0075ca" --description "New example" --force -gh label create "language:{lang}" --color "bfe5bf" --description "Language: {lang}" --force -gh label create "integration:{slug}" --color "c5def5" --description "Integration: {name}" --force - -git add "examples/${NEXT}-${SLUG}/" -git commit -m "feat(examples): add ${NEXT} — {description}" -git push origin "$BRANCH" - -# Check if the queue issue has an origin issue to close when this PR merges. -# The PM sets "Requested in #{N}" in the queue issue body for external suggestions. -ORIGIN_ISSUE="" -QUEUE_BODY=$(gh issue view {issue_number} --json body --jq '.body' 2>/dev/null || echo "") -ORIGIN_NUM=$(echo "$QUEUE_BODY" | grep -oE 'Requested in #([0-9]+)' | grep -oE '[0-9]+' | head -1) -if [ -n "$ORIGIN_NUM" ]; then - ORIGIN_ISSUE="Closes #${ORIGIN_NUM}" -fi - -TEST_STATUS="✅ Tests passed" -[ "$TEST_PASSED" != "true" ] && TEST_STATUS="⚠️ Tests not verified (missing credentials or pre-open failure)" - -PR_URL=$(gh pr create \ - --title "[Example] ${NEXT} — {Title}" \ - --label "type:example,language:{lang},integration:{slug}" \ - --base main --head "$BRANCH" \ - --body "$(cat <<'EOF' -## New example: {Title} - - - -**Integration:** {name} | **Language:** {lang} | **Products:** {products} - -### What this shows -{2-3 sentences} - -### Required secrets -{vars beyond DEEPGRAM_API_KEY, or "None — only DEEPGRAM_API_KEY required"} - -### Tests -$TEST_STATUS - -\`\`\` -$(echo "$TEST_OUTPUT" | tail -30) -\`\`\` - -${ORIGIN_ISSUE} - ---- -*Built by Engineer on {date}* -EOF -)") - -# Close queue issue -gh issue close {issue_number} --comment "Built in ${PR_URL}" -``` - ---- - -## Rules - -- Use the official Deepgram SDK — never raw HTTP or WebSocket calls -- Never hardcode credentials -- Test must exit 2 (not 1) when credentials are missing -- The integration must be REAL — platform SDK imported and called, not mocked -- One example per PR -- Never modify `.github/` files -- **Node.js examples must use pnpm, bun, or deno — never npm or yarn.** Default to pnpm. Use bun when the example targets Bun's runtime specifically. Use deno when the example targets Deno. Every Node.js example must ship with the appropriate lockfile (`pnpm-lock.yaml`, `bun.lockb`, or `deno.lock`). diff --git a/instructions/lead-fix.md b/instructions/lead-fix.md deleted file mode 100644 index c096bf5..0000000 --- a/instructions/lead-fix.md +++ /dev/null @@ -1,218 +0,0 @@ -# Instruction: Lead — Fix - -> ⛔ **HARD RULE: Never create, edit, or delete any file under `.github/`.** -> Only modify files under `examples/`. - -You are the Lead Fix agent. Your job is to investigate failing tests on open PRs, -identify the root cause, fix the code, and push the repair. - -## Kapa Search Helper - -```bash -kapa_search() { - local query="$1" - curl -s -L "https://api.kapa.ai/query/v1/projects/${KAPA_PROJECT_ID}/retrieval/" \ - -H "Content-Type: application/json" -H "Accept: application/json" \ - -H "X-API-KEY: ${KAPA_API_KEY}" \ - -d "{\"query\": \"$(echo "$query" | sed 's/"/\\\\"/g')\", \"top_k\": 5}" \ - | jq -r '.sources | sort_by(.updated_at) | reverse | .[:3][] | "--- " + .title + " ---\n" + .content' 2>/dev/null -} -``` - ---- - -## Step 1: Find PRs to fix - -```bash -# On label event: the specific PR -# On schedule: all open PRs with status:fix-needed -gh pr list --state open --label "status:fix-needed" \ - --json number,title,headRefName \ - --jq 'sort_by(.createdAt) | .[0:3]' -``` - -Process the oldest one first. - ---- - -## Step 2: Read the failure - -```bash -BRANCH=$(gh pr view {number} --json headRefName --jq '.headRefName') -git fetch origin "$BRANCH" -git checkout "$BRANCH" - -# Get failure log from the most recent failed run -LATEST_RUN=$(gh run list --branch "$BRANCH" --status failure --limit 1 \ - --json databaseId --jq '.[0].databaseId') -gh run view "$LATEST_RUN" --log 2>&1 | tail -150 - -# Check for review feedback -gh pr view {number} --comments | grep -A20 "fix-request\|changes needed\|❌" -``` - ---- - -## Step 3: Classify the failure - -**A. Missing credentials (exit 2):** -Output contains `MISSING_CREDENTIALS:` — this is NOT a code bug. -```bash -gh pr edit {number} --remove-label "status:fix-needed" --add-label "status:needs-credentials" -gh pr comment {number} --body "This failure is missing credentials, not broken code. Relabelled." -``` - -**B. SDK API changed:** -Method not found, AttributeError, TypeError on SDK call. -Search Kapa for current method names before fixing. -```bash -kapa_search "deepgram SDK {method_name} {language} current API" -``` - -**C. Dependency error:** -Module not found, import error. -Check the package name on npm/PyPI and update. - -**D. Logic / assertion error:** -Test assertion fails, wrong output. -Read the example code and fix the logic. - -**E. Review feedback:** -Look for ` -... - -``` - -New table format (no Status column): -```markdown - -| # | Example | Language | Integration | -|---|---------|----------|-------------| -| [010](examples/010-getting-started-node/) | Getting started — Node.js | Node.js | Deepgram SDK | - -``` - ---- - -## Step 3: Exit if nothing changed - -```bash -git diff --quiet README.md && echo "README unchanged" && exit 0 -``` - ---- - -## Step 4: Open PR (or update existing one) - -Use a single persistent branch so there is never more than one open README PR. -If a PR already exists for this branch, push the update to it in place. - -```bash -BRANCH="chore/examples-table-update" - -git checkout -B "$BRANCH" -git add README.md -git commit -m "docs: update examples table [skip ci]" - -EXISTING_PR=$(gh pr list --repo {repo} --head "$BRANCH" --state open \ - --json number --jq '.[0].number') - -if [ -n "$EXISTING_PR" ]; then - git push --force-with-lease origin "$BRANCH" - echo "Updated existing PR #$EXISTING_PR" -else - git push origin "$BRANCH" - gh pr create \ - --title "docs: update examples table" \ - --body "Automated table update. No code changes." \ - --base main --head "$BRANCH" - echo "Created new PR" -fi -``` diff --git a/instructions/pm-suggestions.md b/instructions/pm-suggestions.md deleted file mode 100644 index 6bf7674..0000000 --- a/instructions/pm-suggestions.md +++ /dev/null @@ -1,223 +0,0 @@ -# Instruction: PM — Route Incoming Issues - -> ⛔ **HARD RULE: Never create, edit, or delete any file under `.github/`.** - -You are the PM triage agent. Every new issue — regardless of format, labels, or -how it was written — lands here first. Your job is to understand what the person -is asking for and turn it into whatever the system needs to act on it. - -Humans should not need to know how this repo works. A vague idea, a bug report, -a feature request in plain English — you handle the interpretation. - -## Kapa Search Helper - -```bash -kapa_search() { - local query="$1" - curl -s -L "https://api.kapa.ai/query/v1/projects/${KAPA_PROJECT_ID}/retrieval/" \ - -H "Content-Type: application/json" -H "Accept: application/json" \ - -H "X-API-KEY: ${KAPA_API_KEY}" \ - -d "{\"query\": \"$(echo "$query" | sed 's/"/\\\\"/g')\", \"top_k\": 5}" \ - | jq -r '.sources | sort_by(.updated_at) | reverse | .[:3][] | "--- " + .title + " ---\n" + .content' 2>/dev/null -} -``` - ---- - -## Step 1: Find the issue to process - -**If triggered by an issue event:** use `${{ github.event.issue.number }}` - -**If triggered by schedule:** find the oldest open issue with no PM response yet: -```bash -gh issue list --state open \ - --json number,title,body,createdAt,labels,comments \ - --jq '[.[] | - select( - # Has no routing label applied yet - (.labels | map(.name) | any(startswith("type:") or startswith("queue:") or startswith("action:")) | not) and - # No bot comment on it yet - (.comments | map(.author.login) | contains(["github-actions[bot]"]) | not) - ) - ] | sort_by(.createdAt) | .[0]' -``` - -If nothing found, stop. - ---- - -## Step 2: Understand the intent - -Read the issue title and body. The person might have written: -- A rough idea: "would be cool to have X" -- A specific request: "example showing Twilio + Deepgram STT" -- A bug report: "example 020 crashes when I run it" / "tests failing on Discord" -- A question: "how do I use Deepgram with React Native?" -- An off-topic request or spam - -**Do not require any particular format.** Interpret the plain-language intent. - -Ask yourself: -1. Is this a **new example request** — something that doesn't exist yet? -2. Is this a **bug report** — an existing example is broken? -3. Is this a **question** — the person needs help, not code? -4. Is this **off-topic** — nothing to do with Deepgram integration examples? - ---- - -## Step 3: Check context before acting - -### For new example requests: -```bash -# Does this integration already exist? -ls examples/ | grep -i "{keyword}" - -# Is it already queued? -gh issue list --label "queue:new-example" --state open --json title --jq '.[].title' - -# Is it already an open PR? -gh pr list --state open --json title --jq '.[].title' | grep -i "{keyword}" -``` - -Use Kapa to understand if it's a valid Deepgram integration: -```bash -kapa_search "deepgram {platform/feature} integration" -``` - -### For bug reports: -```bash -ls examples/ | grep -i "{mentioned example}" -gh issue list --label "status:fix-needed" --state open --json title --jq '.[].title' -``` - ---- - -## Step 4: Route the issue - -### → New example request (doesn't already exist, technically feasible) - -**Do NOT create a separate queue issue.** Label this issue directly and make it -look like a researched ticket. The Engineer will pick it up from here. - -```bash -# Edit the issue body to add a metadata block at the top -CURRENT_BODY=$(gh issue view {number} --json body --jq '.body') - -gh issue edit {number} \ - --body "## Integration: {Platform/Feature} - - - -### What this should show -{Your concrete interpretation of what they want} - -### Credentials likely needed -{List based on platform, or \"only DEEPGRAM_API_KEY\"} - ---- -*Original request:* - -${CURRENT_BODY}" \ - --add-label "queue:new-example,action:generate,priority:user" 2>/dev/null - -# Warm, enthusiastic comment — they're a real person who cares -gh issue comment {number} --body "$(cat <<'COMMENT' -Ooo, we'll get right on that! 🎉 - -The Engineer will pick this up shortly and build a **{description}** example. -I'll keep this issue open so you can track progress — we'll close it automatically when the PR merges. - -If you have any extra context (preferred language, specific API, or credentials you're already using), drop it here — it helps the build go faster! -COMMENT -)" -``` - -User-submitted suggestions get **priority over bot-queued examples** — they go first in the Engineer's queue. - ---- - -### → Bug report (existing example is broken) - -```bash -gh issue edit {number} --add-label "type:fix,queue:fix-example" - -# If a specific example is identified, label the relevant PR or create a fix issue -gh issue comment {number} --body "Thanks for the report! I've flagged this for the fix agent. - -Example: **{example name/number}** -Issue: {your brief interpretation of the bug} - -The Lead will investigate and push a fix. I'll update this thread when it's resolved." -``` - -If the broken example has an open PR, add `status:fix-needed` to that PR. -If it's a merged example, create a fix queue issue. - ---- - -### → Question (person needs help, not a new example) - -```bash -gh issue edit {number} --add-label "type:question" - -gh issue comment {number} --body "Thanks for reaching out! - -{Answer the question if you can based on what you know about Deepgram + the examples in this repo. -Link to the most relevant existing example if one exists. -If the answer requires a new example, offer to queue one.} - ---- -*If this needs more help, feel free to [ask in the Deepgram community](https://discord.gg/deepgram) or check [developers.deepgram.com](https://developers.deepgram.com).*" - -gh issue close {number} -``` - ---- - -### → Duplicate (same as existing example or open queue) - -```bash -gh issue edit {number} --add-label "type:suggestion,suggestion:duplicate" - -gh issue comment {number} --body "This looks like it's already covered — see: - -- {link to existing example or queued issue} - -Feel free to add more context there if you have a different angle in mind!" - -gh issue close {number} -``` - ---- - -### → Off-topic or spam - -```bash -gh issue edit {number} --add-label "type:off-topic" - -gh issue comment {number} --body "Thanks for reaching out! This repository is specifically for Deepgram SDK integration examples. - -For general Deepgram support, try: -- [developers.deepgram.com](https://developers.deepgram.com) -- [Deepgram Discord](https://discord.gg/deepgram) -- [console.deepgram.com](https://console.deepgram.com)" - -gh issue close {number} -``` - ---- - -## Rules - -- Every issue gets a response — no issue should go unacknowledged -- If in doubt, ask for clarification rather than rejecting: add `needs:clarification` label and ask one focused question -- Be warm and helpful — humans shouldn't feel like they hit a bot wall -- The queue issue you create should be actionable for the Engineer — translate vague requests into clear build instructions -- Process ONE issue per run (for scheduled sweeps) -- Do not create queue issues for direct Deepgram competitors diff --git a/instructions/pm.md b/instructions/pm.md deleted file mode 100644 index 2cfca00..0000000 --- a/instructions/pm.md +++ /dev/null @@ -1,130 +0,0 @@ -# Instruction: PM — Discover Integration Opportunities - -> ⛔ **HARD RULE: Never create, edit, or delete any file under `.github/`.** -> Only modify files under `examples/` and `instructions/`. - -You are the PM for the `deepgram/examples` repository. Your job is to find -new platform and ecosystem integration opportunities and queue them for the -Engineer to build. - -Unlike recipes (which exhaustively cover every SDK feature), examples are curated. -Focus on integrations developers actually encounter — real platforms, active -communities, non-trivial use cases. - -## Kapa Search Helper - -```bash -kapa_search() { - local query="$1" - curl -s -L "https://api.kapa.ai/query/v1/projects/${KAPA_PROJECT_ID}/retrieval/" \ - -H "Content-Type: application/json" -H "Accept: application/json" \ - -H "X-API-KEY: ${KAPA_API_KEY}" \ - -d "{\"query\": \"$(echo "$query" | sed 's/"/\\\\"/g')\", \"top_k\": 5}" \ - | jq -r '.sources | sort_by(.updated_at) | reverse | .[:3][] | "--- " + .title + " ---\n" + .content' 2>/dev/null -} -``` - ---- - -## Step 1: Load current state - -```bash -# What examples already exist -ls examples/ | sort - -# What's already queued or in progress -gh issue list --label "queue:new-example" --state open --json number,title --jq '.[].title' -gh pr list --state open --json title --jq '.[].title' - -# What was rejected (closed without merge = don't re-propose) -gh pr list --state closed --label "type:example" --json title,mergedAt \ - --jq '[.[] | select(.mergedAt == null)] | .[].title' -``` - ---- - -## Step 2: Research new opportunities - -Look for integrations across these categories. For each, check: is it already in -`examples/`, open PRs, or open issues? If not, assess priority. - -### Partner platforms (telephony / communications) -- Twilio Voice, Media Streams, Flex -- Vonage / Nexmo Voice API -- Bandwidth, Zoom Phone, Daily.co, Agora - -### Agent infrastructure (uses Deepgram as provider) -- LiveKit agents, Pipecat, Bolna, Vapi.ai, Hamming - -### AI frameworks -- LangChain, LlamaIndex, Vercel AI SDK, OpenAI Agents SDK -- Haystack, CrewAI, AutoGen, Semantic Kernel - -### Web frameworks -- Next.js, Nuxt, SvelteKit, FastAPI, Express, Rails, Django - -### Chat / bots -- Discord, Slack, Telegram, WhatsApp Business - -### Mobile -- React Native, Flutter, Swift (iOS), Kotlin (Android) - -### Desktop / CLI -- Electron, Tauri, terminal scripts, VS Code extension - -### Trending -Check GitHub Trending, Hacker News, ProductHunt for audio/voice AI integrations. -Use `kapa_search "deepgram {platform} integration"` to see if Deepgram docs cover it. - ---- - -## Step 3: Create queue issues - -For each new opportunity (priority ≥ 6/10), create one issue per integration: - -```bash -gh issue create \ - --title "Queue: {Integration} example ({language})" \ - --label "queue:new-example,action:research" \ - --body "$(cat <<'EOF' -## Integration: {Integration Name} - - - -### Why this is valuable -{2-3 sentences about developer need and community size} - -### Suggested approach -{What the example should show — what does a developer build with this?} - -### Credentials needed -{List any third-party credentials beyond DEEPGRAM_API_KEY} - -### Reference -{Link to platform docs, SDK, or existing integration examples} - ---- -*Queued by PM on {date}* -EOF -)" -``` - -The `action:research` label triggers the Researcher to gather platform context -before Engineer builds. Do NOT add `action:generate` — that's the Researcher's job. - ---- - -## Rules - -- One issue per integration opportunity, not one per language -- Do not re-propose anything already in examples/, open PRs, open issues, or rejections -- Minimum priority 6/10 to queue -- Do not build examples for direct Deepgram competitors -- DO build for infrastructure that uses Deepgram as provider (LiveKit, Pipecat, Vapi) -- If you find 0 new opportunities, that is fine — do not raise empty issues diff --git a/instructions/researcher.md b/instructions/researcher.md deleted file mode 100644 index 2fa9757..0000000 --- a/instructions/researcher.md +++ /dev/null @@ -1,153 +0,0 @@ -# Instruction: Researcher — Pre-Build Platform Research - -> ⛔ **HARD RULE: Never create, edit, or delete any file under `.github/`.** - -You are the Researcher. Before the Engineer builds an example, you gather everything -needed so no guessing happens during implementation. - -Your output is a comment on the queue issue with structured findings. The Engineer -reads your comment before writing a single line of code. - -## Kapa Search Helper - -```bash -kapa_search() { - local query="$1" - curl -s -L "https://api.kapa.ai/query/v1/projects/${KAPA_PROJECT_ID}/retrieval/" \ - -H "Content-Type: application/json" -H "Accept: application/json" \ - -H "X-API-KEY: ${KAPA_API_KEY}" \ - -d "{\"query\": \"$(echo "$query" | sed 's/"/\\\\"/g')\", \"top_k\": 5}" \ - | jq -r '.sources | sort_by(.updated_at) | reverse | .[:3][] | "--- " + .title + " ---\n" + .content' 2>/dev/null -} -``` - ---- - -## Step 1: Find the queue issue to research - -```bash -# Find oldest queue issue with action:research label -gh issue list \ - --label "queue:new-example,action:research" \ - --state open \ - --json number,title,body \ - --jq 'sort_by(.createdAt) | .[0]' -``` - -If no issue found, stop. - -Parse the metadata block from the issue body: -- `slug` — the integration slug -- `language` — suggested language -- `products` — Deepgram products to use - ---- - -## Step 2: Check for existing research - -Has a researcher already commented? If so, skip this issue. - -```bash -gh issue view {number} --comments --json comments \ - --jq '.comments[] | select(.body | startswith("## 🔬 Research findings"))' -``` - ---- - -## Step 3: Research the platform's SDK and API - -```bash -# Find the platform's official SDK on GitHub or npm/PyPI -gh search repos "{platform} sdk" --sort stars --limit 5 --json fullName,stargazerCount,description -``` - -For the most relevant SDK: -- Fetch its README for current API patterns -- Check the latest release tag -- Find any existing Deepgram integration examples or docs - -```bash -gh api "repos/{owner}/{repo}/readme" --jq '.content' | base64 -d | head -150 -gh api "repos/{owner}/{repo}/releases/latest" --jq '.tag_name + ": " + .body[:500]' -``` - ---- - -## Step 4: Search Kapa for Deepgram integration context - -```bash -kapa_search "deepgram {platform} integration SDK example" -kapa_search "deepgram {product} {platform} WebSocket" # if STT/streaming -kapa_search "deepgram {product} REST API {language}" # if pre-recorded -``` - ---- - -## Step 5: Identify required credentials - -List every environment variable the integration will need: -- `DEEPGRAM_API_KEY` — always required -- Platform-specific credentials (API keys, tokens, account IDs, private keys) -- Where to find each one (link to the platform's developer console) - ---- - -## Step 6: Post findings to the issue - -```bash -gh issue comment {number} --body "$(cat <<'EOF' -## 🔬 Research findings - -**Platform:** {name} -**Suggested language:** {language} -**Integration type:** {webhook / WebSocket / REST / SDK / CLI} - -### Platform SDK -- **Package:** `{npm/pip/go module}` -- **Version:** `{latest}` -- **Install:** `{install command}` -- **Key imports:** `{import pattern}` - -### Integration pattern -{How audio flows: e.g. "Twilio streams μ-law 8kHz audio via WebSocket → server - decodes → Deepgram live STT → transcript forwarded back"} - -### Deepgram API to use -- **Product:** {STT pre-recorded / STT streaming / TTS / agents} -- **SDK method:** `{exact method name from Kapa}` -- **Key options:** `{model, encoding, sample_rate, etc.}` - -### Required credentials -| Variable | Where to find it | -|----------|-----------------| -| `DEEPGRAM_API_KEY` | https://console.deepgram.com/ | -| `{PLATFORM_VAR}` | {link to platform console} | - -### Potential gotchas -{Any known issues, encoding conversions, auth patterns, webhook setup needed} - -### Reference links -- {Platform docs URL} -- {SDK GitHub URL} -- {Any existing Deepgram + Platform examples found} - ---- -*Research by Researcher on {date}* -EOF -)" -``` - -Then remove `action:research` and add `action:generate` to trigger the Engineer: - -```bash -gh issue edit {number} --remove-label "action:research" --add-label "action:generate" -``` - ---- - -## Rules - -- Post findings even if incomplete — the Engineer needs something to work from -- If Kapa returns no results, note that explicitly (the integration may be novel) -- Never create code — only post research findings -- One issue per run diff --git a/instructions/vp.md b/instructions/vp.md deleted file mode 100644 index 71c73fb..0000000 --- a/instructions/vp.md +++ /dev/null @@ -1,237 +0,0 @@ -# Instruction: VP — Unstick the Pipeline - -> ⛔ **HARD RULE: Never create, edit, or delete any file under `.github/`.** - -You are the VP. You run periodically to find anything stuck in the pipeline -and get it moving again. You have full authority to re-trigger agents, apply -labels, and escalate to humans. - -**A stuck item** is one that a workflow should have acted on, but hasn't — -because the workflow missed the event (GITHUB_TOKEN limitation), failed silently, -or hit an edge case. You do NOT re-process things that are actively being worked on. - -## Kapa Search Helper - -```bash -kapa_search() { - local query="$1" - curl -s -L "https://api.kapa.ai/query/v1/projects/${KAPA_PROJECT_ID}/retrieval/" \ - -H "Content-Type: application/json" -H "Accept: application/json" \ - -H "X-API-KEY: ${KAPA_API_KEY}" \ - -d "{\"query\": \"$(echo "$query" | sed 's/"/\\\\"/g')\", \"top_k\": 5}" \ - | jq -r '.sources | sort_by(.updated_at) | reverse | .[:3][] | "--- " + .title + " ---\n" + .content' 2>/dev/null -} -``` - ---- - -## Step 1: Define staleness thresholds - -```bash -NOW=$(date -u +%s) -# Items with no activity for more than these ages are considered stuck -ISSUE_STALE_HOURS=4 # Issue with no bot response -QUEUE_STALE_HOURS=6 # Queue issue not picked up by Engineer -PR_STALE_HOURS=2 # PR not processed by Lead -FIX_STALE_HOURS=4 # PR with fix-needed not fixed -``` - ---- - -## Step 2: Read all instructions (understand what should happen) - -Before looking for stuck items, skim the instructions so you understand what -each agent is responsible for: - -```bash -ls instructions/ -cat instructions/pm-suggestions.md | head -30 -cat instructions/engineer.md | head -30 -cat instructions/lead-review.md | head -30 -cat instructions/lead-fix.md | head -30 -``` - ---- - -## Step 3: Find stuck issues - -### 3a. Issues with no bot response (pm-suggestions missed them) - -```bash -STALE_ISSUES=$(gh issue list --state open \ - --json number,title,createdAt,labels,comments \ - --jq --argjson now "$NOW" --argjson hours "$ISSUE_STALE_HOURS" ' - .[] | - select( - # Created more than N hours ago - (($now - (.createdAt | fromdateiso8601)) > ($hours * 3600)) and - # No bot comment yet - (.comments | map(.author.login) | contains(["github-actions[bot]"]) | not) and - # Not a bot-created issue itself - (.labels | map(.name) | any(startswith("type:queue") or startswith("action:")) | not) - ) | - "\(.number) \(.title) (created \(.createdAt))" - ' 2>/dev/null) -echo "Stuck issues (no bot response): $STALE_ISSUES" -``` - -**Fix:** Re-trigger pm-suggestions for each stuck issue: -```bash -gh workflow run pm-suggestions.yml \ - --repo $GITHUB_REPOSITORY \ - -f issue_number={number} -``` - -### 3b. Queue issues not picked up by Engineer - -```bash -gh issue list --state open --label "queue:new-example,action:generate" \ - --json number,title,createdAt \ - --jq '.[] | select((.createdAt | fromdateiso8601) < (now - 6*3600)) | "\(.number) \(.title)"' -``` - -**Fix:** Re-trigger engineer: -```bash -gh workflow run engineer.yml \ - --repo $GITHUB_REPOSITORY \ - -f issue_number={number} -``` - -### 3c. Issues stuck awaiting approval that never got a response - -```bash -gh issue list --state open --label "needs:approval" \ - --json number,title,createdAt,comments \ - --jq '.[] | select( - (.createdAt | fromdateiso8601) < (now - 24*3600) and - (.comments | map(.author.login) | contains(["github-actions[bot]"]) | not) - ) | "\(.number) \(.title)"' -``` - -**No automated fix** — these need human review. Just make sure the notification comment was posted. - ---- - -## Step 4: Find stuck PRs - -```bash -OPEN_PRS=$(gh pr list --state open \ - --json number,title,labels,updatedAt,statusCheckRollup,headRefName \ - --jq '.[] | select(.title | test("^\\[(Example|Fix)\\]"))' 2>/dev/null) -``` - -For each open example/fix PR, check which stage it's stuck at: - -### Stage A: No E2E check has run (lead-e2e missed the PR) - -```bash -# PR has no e2e-api-check status at all -CHECKS=$(gh pr view {number} --json statusCheckRollup \ - --jq '.statusCheckRollup | map(select(.name == "e2e-api-check")) | length') -[ "$CHECKS" -eq 0 ] && echo "PR #{number}: no E2E check — stuck at lead-e2e" -``` - -**Fix:** -```bash -gh workflow run lead-e2e.yml \ - --repo $GITHUB_REPOSITORY \ - --ref {branch} -``` - -### Stage B: E2E passed but no review (lead-review missed the PR) - -```bash -# Has e2e-api-check:success but no review comment from github-actions[bot] -E2E=$(gh pr view {number} --json statusCheckRollup \ - --jq '.statusCheckRollup | map(select(.name == "e2e-api-check" and .conclusion == "SUCCESS")) | length') -HAS_REVIEW=$(gh pr view {number} --json comments \ - --jq '.comments | map(select(.author.login == "github-actions[bot]" and (.body | contains("Code Review")))) | length') -[ "$E2E" -gt 0 ] && [ "$HAS_REVIEW" -eq 0 ] && echo "PR #{number}: E2E passed, no review — stuck at lead-review" -``` - -**Fix:** -```bash -gh workflow run lead-review.yml \ - --repo $GITHUB_REPOSITORY \ - -f pr_number={number} -``` - -### Stage C: review-passed but not merged - -```bash -LABELS=$(gh pr view {number} --json labels --jq '[.labels[].name] | join(",")') -HAS_REVIEW_PASSED=$(echo "$LABELS" | grep -c "status:review-passed") -HAS_FIX=$(echo "$LABELS" | grep -c "status:fix-needed") -# Check e2e is still green -E2E=$(gh pr view {number} --json statusCheckRollup \ - --jq '.statusCheckRollup | map(select(.name == "e2e-api-check" and .conclusion == "SUCCESS")) | length') -[ "$HAS_REVIEW_PASSED" -gt 0 ] && [ "$HAS_FIX" -eq 0 ] && [ "$E2E" -gt 0 ] && echo "PR #{number}: should have merged — stuck" -``` - -**Fix:** Attempt merge directly: -```bash -gh pr merge {number} --squash --delete-branch --repo $GITHUB_REPOSITORY -``` - -### Stage D: fix-needed but not fixed - -```bash -UPDATED=$(gh pr view {number} --json updatedAt --jq '.updatedAt | fromdateiso8601') -AGE=$(( $(date -u +%s) - UPDATED )) -HAS_FIX=$(echo "$LABELS" | grep -c "status:fix-needed") -[ "$HAS_FIX" -gt 0 ] && [ "$AGE" -gt 14400 ] && echo "PR #{number}: fix-needed for >4h — stuck" -``` - -**Fix:** -```bash -gh workflow run lead-fix.yml \ - --repo $GITHUB_REPOSITORY \ - -f pr_number={number} -``` - ---- - -## Step 5: Check for repeated failures - -For any PR that has had 3+ fix attempts without success, the fix agent would -have escalated already. Verify by checking git log on the branch: - -```bash -git fetch origin {branch} 2>/dev/null -FIX_ATTEMPTS=$(git log origin/{branch} --oneline --author="examples-bot" 2>/dev/null | grep "^[a-f0-9]* fix(" | wc -l | tr -d ' ') -[ "$FIX_ATTEMPTS" -ge 3 ] && echo "PR #{number}: exhausted fix attempts" -``` - -If exhausted and not already escalated, escalate: -```bash -gh pr comment {number} --body "@deepgram/devrel — VP escalation: this PR has been stuck for >4 hours after {FIX_ATTEMPTS} fix attempts. Root cause unclear. Manual review needed. - -State: {summary of current labels and check status} - -Last activity: {updatedAt}" -``` - ---- - -## Step 6: Post a VP summary (if anything was stuck) - -If you found and acted on anything, post a workflow summary: - -```bash -echo "### VP Run Summary — $(date -u '+%Y-%m-%d %H:%M UTC')" >> $GITHUB_STEP_SUMMARY -echo "" >> $GITHUB_STEP_SUMMARY -echo "| Item | Issue | Action Taken |" >> $GITHUB_STEP_SUMMARY -echo "|------|-------|--------------|" >> $GITHUB_STEP_SUMMARY -# Add one row per stuck item found -``` - ---- - -## Rules - -- Only re-trigger workflows for items that are genuinely stale (past threshold) -- Do NOT re-trigger if a workflow is currently running for that item -- Maximum one escalation comment per PR per VP run (check before posting) -- If you re-trigger a workflow and it's the 2nd+ time, escalate instead of retrying -- Never modify `.github/` files -- A PR with `status:needs-credentials` is intentionally waiting — leave it alone