From 7fc082273522eb0adff69785bcc4ee0fc4ff39a3 Mon Sep 17 00:00:00 2001 From: eli schleifer Date: Tue, 31 Mar 2026 09:44:54 -0700 Subject: [PATCH 1/2] Add --- CONTRIBUTING.md | 34 ++ LICENSE | 21 + README.md | 55 +++ docs.json | 291 +++++++++++++ favicon.svg | 5 + flaky-tests/dashboard.mdx | 85 ++++ flaky-tests/detection.mdx | 83 ++++ flaky-tests/detection/flag-as-flaky.mdx | 54 +++ .../detection/pass-on-retry-monitor.mdx | 51 +++ flaky-tests/detection/threshold-monitor.mdx | 232 +++++++++++ flaky-tests/flaky-tests.mdx | 47 +++ flaky-tests/get-started.mdx | 59 +++ flaky-tests/get-started/ci-providers.mdx | 15 + .../ci-providers/atlassian-bamboo.mdx | 205 +++++++++ .../ci-providers/azure-devops-pipelines.mdx | 169 ++++++++ .../ci-providers/bitbucket-pipelines.mdx | 173 ++++++++ .../get-started/ci-providers/buildkite.mdx | 142 +++++++ .../get-started/ci-providers/circleci.mdx | 152 +++++++ .../get-started/ci-providers/droneci.mdx | 166 ++++++++ .../ci-providers/github-actions.mdx | 371 +++++++++++++++++ .../get-started/ci-providers/gitlab.mdx | 158 +++++++ .../get-started/ci-providers/jenkins.mdx | 155 +++++++ .../get-started/ci-providers/otherci.mdx | 170 ++++++++ .../get-started/ci-providers/semaphoreci.mdx | 206 +++++++++ .../get-started/ci-providers/travisci.mdx | 133 ++++++ flaky-tests/get-started/frameworks.mdx | 9 + .../get-started/frameworks/android.mdx | 162 ++++++++ flaky-tests/get-started/frameworks/bazel.mdx | 133 ++++++ flaky-tests/get-started/frameworks/behave.mdx | 130 ++++++ .../get-started/frameworks/cypress.mdx | 136 ++++++ .../get-started/frameworks/dart-test.mdx | 124 ++++++ .../get-started/frameworks/googletest.mdx | 128 ++++++ .../get-started/frameworks/gotestsum.mdx | 190 +++++++++ flaky-tests/get-started/frameworks/gradle.mdx | 144 +++++++ .../get-started/frameworks/jasmine.mdx | 154 +++++++ flaky-tests/get-started/frameworks/jest.mdx | 142 +++++++ flaky-tests/get-started/frameworks/karma.mdx | 141 +++++++ flaky-tests/get-started/frameworks/kotest.mdx | 224 ++++++++++ flaky-tests/get-started/frameworks/maven.mdx | 156 +++++++ .../get-started/frameworks/minitest.mdx | 139 +++++++ flaky-tests/get-started/frameworks/mocha.mdx | 143 +++++++ .../get-started/frameworks/nightwatch.mdx | 141 +++++++ flaky-tests/get-started/frameworks/nunit.mdx | 128 ++++++ .../frameworks/other-test-frameworks.mdx | 89 ++++ flaky-tests/get-started/frameworks/pest.mdx | 126 ++++++ .../get-started/frameworks/phpunit.mdx | 126 ++++++ .../get-started/frameworks/playwright.mdx | 150 +++++++ flaky-tests/get-started/frameworks/pytest.mdx | 126 ++++++ .../frameworks/robot-framework.mdx | 128 ++++++ flaky-tests/get-started/frameworks/rspec.mdx | 109 +++++ .../frameworks/rspec/manual-uploads.mdx | 136 ++++++ flaky-tests/get-started/frameworks/rust.mdx | 127 ++++++ .../get-started/frameworks/swift-testing.mdx | 128 ++++++ .../get-started/frameworks/testplan.mdx | 170 ++++++++ flaky-tests/get-started/frameworks/vitest.mdx | 185 +++++++++ flaky-tests/get-started/frameworks/xctest.mdx | 125 ++++++ .../get-started/multiple-repositories.mdx | 121 ++++++ flaky-tests/github-pull-request-comments.mdx | 55 +++ .../infrastructure-failure-protection.mdx | 62 +++ flaky-tests/managing-detected-flaky-tests.mdx | 108 +++++ flaky-tests/overview.mdx | 71 ++++ .../quarantine-service-availability.mdx | 33 ++ flaky-tests/quarantining.mdx | 241 +++++++++++ .../the-importance-of-pr-test-results.mdx | 66 +++ flaky-tests/ticketing-integrations.mdx | 26 ++ .../jira-integration.mdx | 135 ++++++ .../linear-integration.mdx | 55 +++ .../other-ticketing-platforms.mdx | 23 ++ flaky-tests/uploader.mdx | 285 +++++++++++++ flaky-tests/use-mcp-server.mdx | 32 ++ flaky-tests/use-mcp-server/configuration.mdx | 5 + .../configuration/claude-code-cli.mdx | 56 +++ .../configuration/cursor-ide.mdx | 51 +++ .../configuration/gemini-cli.mdx | 59 +++ .../configuration/github-copilot-ide.mdx | 62 +++ .../use-mcp-server/mcp-tool-reference.mdx | 6 + .../get-root-cause-analysis.mdx | 70 ++++ .../set-up-test-uploads.mdx | 123 ++++++ flaky-tests/webhooks.mdx | 19 + .../webhooks/github-issues-integration.mdx | 209 ++++++++++ flaky-tests/webhooks/linear-integration.mdx | 347 ++++++++++++++++ .../webhooks/microsoft-teams-integration.mdx | 228 ++++++++++ flaky-tests/webhooks/slack-integration.mdx | 162 ++++++++ introduction.mdx | 263 ++++++++++++ logo/dark.svg | 1 + logo/light.svg | 1 + merge-queue/administration.mdx | 23 ++ .../administration/advanced-settings.mdx | 286 +++++++++++++ merge-queue/administration/metrics.mdx | 301 ++++++++++++++ merge-queue/getting-started.mdx | 95 +++++ .../configure-branch-protection.mdx | 119 ++++++ .../configure-ci-status-checks.mdx | 84 ++++ .../install-and-create-your-queue.mdx | 53 +++ .../getting-started/test-your-setup.mdx | 53 +++ merge-queue/integration-for-slack.mdx | 160 +++++++ merge-queue/merge-queue.mdx | 73 ++++ .../migrating-from-github-merge-queue.mdx | 57 +++ merge-queue/optimizations.mdx | 33 ++ .../optimizations/anti-flake-protection.mdx | 111 +++++ merge-queue/optimizations/batching.mdx | 390 ++++++++++++++++++ .../optimizations/direct-merge-to-main.mdx | 194 +++++++++ .../optimizations/optimistic-merging.mdx | 115 ++++++ merge-queue/optimizations/parallel-queues.mdx | 74 ++++ .../optimizations/parallel-queues/api.mdx | 41 ++ .../optimizations/parallel-queues/bazel.mdx | 35 ++ .../optimizations/parallel-queues/nx.mdx | 32 ++ .../optimizations/pending-failure-depth.mdx | 139 +++++++ .../optimizations/predictive-testing.mdx | 52 +++ .../optimizations/priority-merging.mdx | 166 ++++++++ merge-queue/reference.mdx | 21 + merge-queue/reference/common-problems.mdx | 171 ++++++++ .../reference/merge-queue-cli-reference.mdx | 201 +++++++++ merge-queue/reference/merge.mdx | 238 +++++++++++ merge-queue/reference/troubleshooting.mdx | 59 +++ merge-queue/using-the-queue.mdx | 26 ++ .../emergency-pull-requests.mdx | 23 ++ .../handle-failed-pull-requests.mdx | 72 ++++ .../using-the-queue/monitor-queue-status.mdx | 96 +++++ merge-queue/using-the-queue/reference.mdx | 50 +++ .../using-the-queue/stacked-pull-requests.mdx | 65 +++ merge-queue/webhooks.mdx | 19 + setup-and-administration/apis.mdx | 45 ++ setup-and-administration/apis/webhooks.mdx | 17 + setup-and-administration/billing.mdx | 80 ++++ .../connecting-to-trunk.mdx | 37 ++ .../github-app-permissions.mdx | 83 ++++ .../managing-your-organization.mdx | 66 +++ setup-and-administration/security.mdx | 94 +++++ setup-and-administration/support.mdx | 54 +++ 129 files changed, 14659 insertions(+) create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE create mode 100644 README.md create mode 100644 docs.json create mode 100644 favicon.svg create mode 100644 flaky-tests/dashboard.mdx create mode 100644 flaky-tests/detection.mdx create mode 100644 flaky-tests/detection/flag-as-flaky.mdx create mode 100644 flaky-tests/detection/pass-on-retry-monitor.mdx create mode 100644 flaky-tests/detection/threshold-monitor.mdx create mode 100644 flaky-tests/flaky-tests.mdx create mode 100644 flaky-tests/get-started.mdx create mode 100644 flaky-tests/get-started/ci-providers.mdx create mode 100644 flaky-tests/get-started/ci-providers/atlassian-bamboo.mdx create mode 100644 flaky-tests/get-started/ci-providers/azure-devops-pipelines.mdx create mode 100644 flaky-tests/get-started/ci-providers/bitbucket-pipelines.mdx create mode 100644 flaky-tests/get-started/ci-providers/buildkite.mdx create mode 100644 flaky-tests/get-started/ci-providers/circleci.mdx create mode 100644 flaky-tests/get-started/ci-providers/droneci.mdx create mode 100644 flaky-tests/get-started/ci-providers/github-actions.mdx create mode 100644 flaky-tests/get-started/ci-providers/gitlab.mdx create mode 100644 flaky-tests/get-started/ci-providers/jenkins.mdx create mode 100644 flaky-tests/get-started/ci-providers/otherci.mdx create mode 100644 flaky-tests/get-started/ci-providers/semaphoreci.mdx create mode 100644 flaky-tests/get-started/ci-providers/travisci.mdx create mode 100644 flaky-tests/get-started/frameworks.mdx create mode 100644 flaky-tests/get-started/frameworks/android.mdx create mode 100644 flaky-tests/get-started/frameworks/bazel.mdx create mode 100644 flaky-tests/get-started/frameworks/behave.mdx create mode 100644 flaky-tests/get-started/frameworks/cypress.mdx create mode 100644 flaky-tests/get-started/frameworks/dart-test.mdx create mode 100644 flaky-tests/get-started/frameworks/googletest.mdx create mode 100644 flaky-tests/get-started/frameworks/gotestsum.mdx create mode 100644 flaky-tests/get-started/frameworks/gradle.mdx create mode 100644 flaky-tests/get-started/frameworks/jasmine.mdx create mode 100644 flaky-tests/get-started/frameworks/jest.mdx create mode 100644 flaky-tests/get-started/frameworks/karma.mdx create mode 100644 flaky-tests/get-started/frameworks/kotest.mdx create mode 100644 flaky-tests/get-started/frameworks/maven.mdx create mode 100644 flaky-tests/get-started/frameworks/minitest.mdx create mode 100644 flaky-tests/get-started/frameworks/mocha.mdx create mode 100644 flaky-tests/get-started/frameworks/nightwatch.mdx create mode 100644 flaky-tests/get-started/frameworks/nunit.mdx create mode 100644 flaky-tests/get-started/frameworks/other-test-frameworks.mdx create mode 100644 flaky-tests/get-started/frameworks/pest.mdx create mode 100644 flaky-tests/get-started/frameworks/phpunit.mdx create mode 100644 flaky-tests/get-started/frameworks/playwright.mdx create mode 100644 flaky-tests/get-started/frameworks/pytest.mdx create mode 100644 flaky-tests/get-started/frameworks/robot-framework.mdx create mode 100644 flaky-tests/get-started/frameworks/rspec.mdx create mode 100644 flaky-tests/get-started/frameworks/rspec/manual-uploads.mdx create mode 100644 flaky-tests/get-started/frameworks/rust.mdx create mode 100644 flaky-tests/get-started/frameworks/swift-testing.mdx create mode 100644 flaky-tests/get-started/frameworks/testplan.mdx create mode 100644 flaky-tests/get-started/frameworks/vitest.mdx create mode 100644 flaky-tests/get-started/frameworks/xctest.mdx create mode 100644 flaky-tests/get-started/multiple-repositories.mdx create mode 100644 flaky-tests/github-pull-request-comments.mdx create mode 100644 flaky-tests/infrastructure-failure-protection.mdx create mode 100644 flaky-tests/managing-detected-flaky-tests.mdx create mode 100644 flaky-tests/overview.mdx create mode 100644 flaky-tests/quarantine-service-availability.mdx create mode 100644 flaky-tests/quarantining.mdx create mode 100644 flaky-tests/the-importance-of-pr-test-results.mdx create mode 100644 flaky-tests/ticketing-integrations.mdx create mode 100644 flaky-tests/ticketing-integrations/jira-integration.mdx create mode 100644 flaky-tests/ticketing-integrations/linear-integration.mdx create mode 100644 flaky-tests/ticketing-integrations/other-ticketing-platforms.mdx create mode 100644 flaky-tests/uploader.mdx create mode 100644 flaky-tests/use-mcp-server.mdx create mode 100644 flaky-tests/use-mcp-server/configuration.mdx create mode 100644 flaky-tests/use-mcp-server/configuration/claude-code-cli.mdx create mode 100644 flaky-tests/use-mcp-server/configuration/cursor-ide.mdx create mode 100644 flaky-tests/use-mcp-server/configuration/gemini-cli.mdx create mode 100644 flaky-tests/use-mcp-server/configuration/github-copilot-ide.mdx create mode 100644 flaky-tests/use-mcp-server/mcp-tool-reference.mdx create mode 100644 flaky-tests/use-mcp-server/mcp-tool-reference/get-root-cause-analysis.mdx create mode 100644 flaky-tests/use-mcp-server/mcp-tool-reference/set-up-test-uploads.mdx create mode 100644 flaky-tests/webhooks.mdx create mode 100644 flaky-tests/webhooks/github-issues-integration.mdx create mode 100644 flaky-tests/webhooks/linear-integration.mdx create mode 100644 flaky-tests/webhooks/microsoft-teams-integration.mdx create mode 100644 flaky-tests/webhooks/slack-integration.mdx create mode 100644 introduction.mdx create mode 100644 logo/dark.svg create mode 100644 logo/light.svg create mode 100644 merge-queue/administration.mdx create mode 100644 merge-queue/administration/advanced-settings.mdx create mode 100644 merge-queue/administration/metrics.mdx create mode 100644 merge-queue/getting-started.mdx create mode 100644 merge-queue/getting-started/configure-branch-protection.mdx create mode 100644 merge-queue/getting-started/configure-ci-status-checks.mdx create mode 100644 merge-queue/getting-started/install-and-create-your-queue.mdx create mode 100644 merge-queue/getting-started/test-your-setup.mdx create mode 100644 merge-queue/integration-for-slack.mdx create mode 100644 merge-queue/merge-queue.mdx create mode 100644 merge-queue/migrating-from-github-merge-queue.mdx create mode 100644 merge-queue/optimizations.mdx create mode 100644 merge-queue/optimizations/anti-flake-protection.mdx create mode 100644 merge-queue/optimizations/batching.mdx create mode 100644 merge-queue/optimizations/direct-merge-to-main.mdx create mode 100644 merge-queue/optimizations/optimistic-merging.mdx create mode 100644 merge-queue/optimizations/parallel-queues.mdx create mode 100644 merge-queue/optimizations/parallel-queues/api.mdx create mode 100644 merge-queue/optimizations/parallel-queues/bazel.mdx create mode 100644 merge-queue/optimizations/parallel-queues/nx.mdx create mode 100644 merge-queue/optimizations/pending-failure-depth.mdx create mode 100644 merge-queue/optimizations/predictive-testing.mdx create mode 100644 merge-queue/optimizations/priority-merging.mdx create mode 100644 merge-queue/reference.mdx create mode 100644 merge-queue/reference/common-problems.mdx create mode 100644 merge-queue/reference/merge-queue-cli-reference.mdx create mode 100644 merge-queue/reference/merge.mdx create mode 100644 merge-queue/reference/troubleshooting.mdx create mode 100644 merge-queue/using-the-queue.mdx create mode 100644 merge-queue/using-the-queue/emergency-pull-requests.mdx create mode 100644 merge-queue/using-the-queue/handle-failed-pull-requests.mdx create mode 100644 merge-queue/using-the-queue/monitor-queue-status.mdx create mode 100644 merge-queue/using-the-queue/reference.mdx create mode 100644 merge-queue/using-the-queue/stacked-pull-requests.mdx create mode 100644 merge-queue/webhooks.mdx create mode 100644 setup-and-administration/apis.mdx create mode 100644 setup-and-administration/apis/webhooks.mdx create mode 100644 setup-and-administration/billing.mdx create mode 100644 setup-and-administration/connecting-to-trunk.mdx create mode 100644 setup-and-administration/github-app-permissions.mdx create mode 100644 setup-and-administration/managing-your-organization.mdx create mode 100644 setup-and-administration/security.mdx create mode 100644 setup-and-administration/support.mdx diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..8863ee4 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,34 @@ +> **Customize this file**: Tailor this template to your project by noting specific contribution types you're looking for, adding a Code of Conduct, or adjusting the writing guidelines to match your style. + +# Contribute to the documentation + +Thank you for your interest in contributing to our documentation! This guide will help you get started. + +## How to contribute + +### Option 1: Edit directly on GitHub + +1. Navigate to the page you want to edit +2. Click the "Edit this file" button (the pencil icon) +3. Make your changes and submit a pull request + +### Option 2: Local development + +1. Fork and clone this repository +2. Install the Mintlify CLI: `npm i -g mint` +3. Create a branch for your changes +4. Make changes +5. Navigate to the docs directory and run `mint dev` +6. Preview your changes at `http://localhost:3000` +7. Commit your changes and submit a pull request + +For more details on local development, see our [development guide](development.mdx). + +## Writing guidelines + +- **Use active voice**: "Run the command" not "The command should be run" +- **Address the reader directly**: Use "you" instead of "the user" +- **Keep sentences concise**: Aim for one idea per sentence +- **Lead with the goal**: Start instructions with what the user wants to accomplish +- **Use consistent terminology**: Don't alternate between synonyms for the same concept +- **Include examples**: Show, don't just tell diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..5411374 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Mintlify + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..4552fbc --- /dev/null +++ b/README.md @@ -0,0 +1,55 @@ +# Mintlify Starter Kit + +Use the starter kit to get your docs deployed and ready to customize. + +Click the green **Use this template** button at the top of this repo to copy the Mintlify starter kit. The starter kit contains examples with + +- Guide pages +- Navigation +- Customizations +- API reference pages +- Use of popular components + +**[Follow the full quickstart guide](https://starter.mintlify.com/quickstart)** + +## AI-assisted writing + +Set up your AI coding tool to work with Mintlify: + +```bash +npx skills add https://mintlify.com/docs +``` + +This command installs Mintlify's documentation skill for your configured AI tools like Claude Code, Cursor, Windsurf, and others. The skill includes component reference, writing standards, and workflow guidance. + +See the [AI tools guides](/ai-tools) for tool-specific setup. + +## Development + +Install the [Mintlify CLI](https://www.npmjs.com/package/mint) to preview your documentation changes locally. To install, use the following command: + +``` +npm i -g mint +``` + +Run the following command at the root of your documentation, where your `docs.json` is located: + +``` +mint dev +``` + +View your local preview at `http://localhost:3000`. + +## Publishing changes + +Install our GitHub app from your [dashboard](https://dashboard.mintlify.com/settings/organization/github-app) to propagate changes from your repo to your deployment. Changes are deployed to production automatically after pushing to the default branch. + +## Need help? + +### Troubleshooting + +- If your dev environment isn't running: Run `mint update` to ensure you have the most recent version of the CLI. +- If a page loads as a 404: Make sure you are running in a folder with a valid `docs.json`. + +### Resources +- [Mintlify documentation](https://mintlify.com/docs) diff --git a/docs.json b/docs.json new file mode 100644 index 0000000..20d59dc --- /dev/null +++ b/docs.json @@ -0,0 +1,291 @@ +{ + "$schema": "https://mintlify.com/docs.json", + "name": "Trunk Platform Documentation", + "theme": "aspen", + "colors": { + "primary": "#346DDB", + "light": "#346DDB", + "dark": "#346DDB" + }, + "favicon": "favicon.svg", + "logo": { + "light": "/logo/light.svg", + "dark": "/logo/dark.svg" + }, + "fonts": { + "heading": { + "family": "Neue", + "source": "https://trunk.io/_next/static/media/neue_medium-s.p.a1f08070.woff2", + "format": "woff2" + }, + "body": { + "family": "Neue", + "source": "https://trunk.io/_next/static/media/neue_regular-s.p.b8e8b595.woff2", + "format": "woff2" + } + }, + "navbar": { + "links": [ + { + "label": "Log in", + "href": "https://app.trunk.io/login" + } + ], + "primary": { + "type": "button", + "label": "Get a demo", + "href": "https://calendly.com/trunk/demo" + } + }, + "navigation": { + "tabs": [ + { + "tab": "Home", + "groups": [ + { + "group": "Platform", + "pages": ["introduction"] + } + ] + }, + { + "tab": "Merge Queue", + "groups": [ + { + "group": "Overview", + "pages": ["merge-queue/merge-queue"] + }, + { + "group": "Getting Started", + "pages": [ + "merge-queue/getting-started", + "merge-queue/getting-started/install-and-create-your-queue", + "merge-queue/getting-started/configure-branch-protection", + "merge-queue/getting-started/configure-ci-status-checks", + "merge-queue/getting-started/test-your-setup", + "merge-queue/migrating-from-github-merge-queue" + ] + }, + { + "group": "Optimizations", + "pages": [ + "merge-queue/optimizations", + "merge-queue/optimizations/predictive-testing", + "merge-queue/optimizations/anti-flake-protection", + "merge-queue/optimizations/batching", + "merge-queue/optimizations/priority-merging", + "merge-queue/optimizations/optimistic-merging", + "merge-queue/optimizations/pending-failure-depth", + "merge-queue/optimizations/direct-merge-to-main" + ] + }, + { + "group": "Parallel Queues", + "pages": [ + "merge-queue/optimizations/parallel-queues", + "merge-queue/optimizations/parallel-queues/bazel", + "merge-queue/optimizations/parallel-queues/nx", + "merge-queue/optimizations/parallel-queues/api" + ] + }, + { + "group": "Using the Queue", + "pages": [ + "merge-queue/using-the-queue", + "merge-queue/using-the-queue/reference", + "merge-queue/using-the-queue/monitor-queue-status", + "merge-queue/using-the-queue/handle-failed-pull-requests", + "merge-queue/using-the-queue/stacked-pull-requests", + "merge-queue/using-the-queue/emergency-pull-requests" + ] + }, + { + "group": "Integrations", + "pages": [ + "merge-queue/integration-for-slack", + "merge-queue/webhooks" + ] + }, + { + "group": "Administration", + "pages": [ + "merge-queue/administration", + "merge-queue/administration/advanced-settings", + "merge-queue/administration/metrics" + ] + }, + { + "group": "Reference", + "pages": [ + "merge-queue/reference", + "merge-queue/reference/merge-queue-cli-reference", + "merge-queue/reference/merge", + "merge-queue/reference/common-problems", + "merge-queue/reference/troubleshooting" + ] + } + ] + }, + { + "tab": "Flaky Tests", + "groups": [ + { + "group": "Overview", + "pages": [ + "flaky-tests/overview", + "flaky-tests/get-started", + "flaky-tests/managing-detected-flaky-tests", + "flaky-tests/dashboard" + ] + }, + { + "group": "Test Frameworks", + "pages": [ + "flaky-tests/get-started/frameworks", + "flaky-tests/get-started/frameworks/android", + "flaky-tests/get-started/frameworks/bazel", + "flaky-tests/get-started/frameworks/behave", + "flaky-tests/get-started/frameworks/cypress", + "flaky-tests/get-started/frameworks/dart-test", + "flaky-tests/get-started/frameworks/googletest", + "flaky-tests/get-started/frameworks/gotestsum", + "flaky-tests/get-started/frameworks/gradle", + "flaky-tests/get-started/frameworks/jasmine", + "flaky-tests/get-started/frameworks/jest", + "flaky-tests/get-started/frameworks/karma", + "flaky-tests/get-started/frameworks/kotest", + "flaky-tests/get-started/frameworks/maven", + "flaky-tests/get-started/frameworks/minitest", + "flaky-tests/get-started/frameworks/mocha", + "flaky-tests/get-started/frameworks/nightwatch", + "flaky-tests/get-started/frameworks/nunit", + "flaky-tests/get-started/frameworks/other-test-frameworks", + "flaky-tests/get-started/frameworks/pest", + "flaky-tests/get-started/frameworks/phpunit", + "flaky-tests/get-started/frameworks/playwright", + "flaky-tests/get-started/frameworks/pytest", + "flaky-tests/get-started/frameworks/robot-framework", + "flaky-tests/get-started/frameworks/rspec", + "flaky-tests/get-started/frameworks/rspec/manual-uploads", + "flaky-tests/get-started/frameworks/rust", + "flaky-tests/get-started/frameworks/swift-testing", + "flaky-tests/get-started/frameworks/testplan", + "flaky-tests/get-started/frameworks/vitest", + "flaky-tests/get-started/frameworks/xctest" + ] + }, + { + "group": "CI Providers", + "pages": [ + "flaky-tests/get-started/ci-providers", + "flaky-tests/get-started/ci-providers/atlassian-bamboo", + "flaky-tests/get-started/ci-providers/azure-devops-pipelines", + "flaky-tests/get-started/ci-providers/bitbucket-pipelines", + "flaky-tests/get-started/ci-providers/buildkite", + "flaky-tests/get-started/ci-providers/circleci", + "flaky-tests/get-started/ci-providers/droneci", + "flaky-tests/get-started/ci-providers/github-actions", + "flaky-tests/get-started/ci-providers/gitlab", + "flaky-tests/get-started/ci-providers/jenkins", + "flaky-tests/get-started/ci-providers/otherci", + "flaky-tests/get-started/ci-providers/semaphoreci", + "flaky-tests/get-started/ci-providers/travisci", + "flaky-tests/get-started/multiple-repositories" + ] + }, + { + "group": "Detection & Operations", + "pages": [ + "flaky-tests/detection", + "flaky-tests/detection/pass-on-retry-monitor", + "flaky-tests/detection/threshold-monitor", + "flaky-tests/detection/flag-as-flaky", + "flaky-tests/infrastructure-failure-protection", + "flaky-tests/the-importance-of-pr-test-results", + "flaky-tests/quarantining", + "flaky-tests/quarantine-service-availability", + "flaky-tests/github-pull-request-comments" + ] + }, + { + "group": "Ticketing Integrations", + "pages": [ + "flaky-tests/ticketing-integrations", + "flaky-tests/ticketing-integrations/jira-integration", + "flaky-tests/ticketing-integrations/linear-integration", + "flaky-tests/ticketing-integrations/other-ticketing-platforms" + ] + }, + { + "group": "Webhooks", + "pages": [ + "flaky-tests/webhooks", + "flaky-tests/webhooks/slack-integration", + "flaky-tests/webhooks/microsoft-teams-integration", + "flaky-tests/webhooks/github-issues-integration", + "flaky-tests/webhooks/linear-integration" + ] + }, + { + "group": "APIs & CLI", + "pages": ["flaky-tests/flaky-tests", "flaky-tests/uploader"] + }, + { + "group": "MCP Server", + "pages": [ + "flaky-tests/use-mcp-server", + "flaky-tests/use-mcp-server/configuration", + "flaky-tests/use-mcp-server/configuration/cursor-ide", + "flaky-tests/use-mcp-server/configuration/github-copilot-ide", + "flaky-tests/use-mcp-server/configuration/claude-code-cli", + "flaky-tests/use-mcp-server/configuration/gemini-cli", + "flaky-tests/use-mcp-server/mcp-tool-reference", + "flaky-tests/use-mcp-server/mcp-tool-reference/get-root-cause-analysis", + "flaky-tests/use-mcp-server/mcp-tool-reference/set-up-test-uploads" + ] + } + ] + }, + { + "tab": "Setup & Administration", + "groups": [ + { + "group": "Account Setup", + "pages": ["setup-and-administration/connecting-to-trunk"] + }, + { + "group": "Administration", + "pages": [ + "setup-and-administration/managing-your-organization", + "setup-and-administration/github-app-permissions", + "setup-and-administration/support", + "setup-and-administration/billing", + "setup-and-administration/security" + ] + }, + { + "group": "API Reference", + "pages": [ + "setup-and-administration/apis", + "setup-and-administration/apis/webhooks" + ] + } + ] + } + ] + }, + "contextual": { + "options": ["copy", "view", "chatgpt", "claude"] + }, + "footer": { + "socials": { + "x": "https://x.com/trunk_io", + "github": "https://github.com/trunk-io" + } + }, + "seo": { + "metatags": { + "robots": "noindex" + } + } +} diff --git a/favicon.svg b/favicon.svg new file mode 100644 index 0000000..d50ceed --- /dev/null +++ b/favicon.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/flaky-tests/dashboard.mdx b/flaky-tests/dashboard.mdx new file mode 100644 index 0000000..e434a23 --- /dev/null +++ b/flaky-tests/dashboard.mdx @@ -0,0 +1,85 @@ +--- +title: "Dashboard" +description: "Learn to find flaky tests and understand their impact using the Flaky Tests dashboard" +--- +Trunk Flaky Tests detect flaky tests by analyzing test results. The health of your tests is displayed in the Flaky Tests dashboard. + +### Key repository metrics + +

Key repo metrics

+ +Trunk Flaky Test provides key repo metrics based on the detected health status of your tests. You'll find metrics for the following information at the top of the Flaky Test dashboard. + +
MetricDescription
Flaky testsNumber of flaky test cases in your repo.
PRs blocked by failed testsPRs that have been blocked by failed tests in CI.
+ +These numbers are important for understanding the overall health of your repo’s tests, how flaky tests impact your developer productivity, and the developer hours saved from quarantining tests. You can also view the trends in these numbers in the trend charts. + +The trend charts display the New Test Cases added by day, as well as Test Transitions and Quarantined Runs. Test Transitions represent the number of tests that have transitioned to a particular status on a particular day, excluding new test cases (which default to a status of Healthy). If a bar shows 5 Healthy, 10 Flaky, and 2 Broken on a single day, that indicates 5 tests transitioned to Healthy, 10 to Flaky, and 2 to Broken on that day. Quarantined Runs represents the number of runs of quarantined tests by day. + +### Tests cases overview + +
+ +You can view a table of all your test cases and their current status in Trunk Flaky Tests. + +Filters can also be set on the table to narrow test results down by test status, quarantine setting, ticket status, or by the name, file, or suite name of the test case. + +The table is sorted by default by the number of PRs impacted by the case, which is the best way to measure the impact of a flaky test. You can click on each test case to view [the test case’s details](#test-case-details). + +
ColumnDescription
TestsThe variant, file path, and name of the test case.
StatusThe health status of the test case: Healthy, Flaky, or Broken. Broken indicates consistent high-rate failures; Flaky indicates intermittent failures.
Failure RateThe percentage of CI runs failed due to this test case.
PRs ImpactedThe number of PRs that have been affected by this test case failing in CI.
Last RunThe most recent timestamp for an upload test run.
+ + +Test Deletion & History + +* Inactive tests disappear from the dashboard automatically after 30 days and are fully removed after 45 days. Tests cannot be manually deleted. +* Changing test identifiers (e.g., adding file paths) creates new test entries — merging with old history isn’t supported. + + +### Test case details + +
+ +You can *click* on any of the test cases listed on the Flaky Test dashboard to access the test case’s details. On a test's details page, you can find: + +* The test's current status (Healthy, Flaky, or Broken) +* Which monitors are currently active for the test, and which monitor triggered each status change +* Visualizations and a timeline detailing the test's health history +* A table of unique failure types for this test + +This is in addition to information like ticket status and the current codeowner. + +### **Code owners** + +If you have a codeowners file configured in your repos, you will see who owns each flaky test in the test details view. We support code owners for [GitHub](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners) and [GitLab](https://docs.gitlab.com/ee/user/project/codeowners/) repos. + +

You can find the code owners of each test on the top right of the test details screen.

+ +This information will also be provided when creating a ticket with the [Jira integration](/flaky-tests/ticketing-integrations/jira-integration) or [webhooks](/flaky-tests/webhooks). + +### **Failure types** + +
+ +The Failure Types table shows the history of past test runs grouped by unique failure types. + +The Failure Type is a summary of the stack trace of the test run. You can click on the failure type to see a list of test runs labeled by branch, PR, Author, CI Job link, duration, and time. + +### Failure details + +You can click on any of these test runs to see the detailed stack trace: + +
+ +You can flip through the stack traces of similar failures across different test runs by clicking the left and right arrow buttons. You can also see other similar failures on this and other tests. + +#### Go to the CI job logs + +If you want to see full logging of the original CI job for an individual test failure, you can click **Logs** in the expanded failure details panel to go to the job's page in your CI provider. + +
+ +### **Test history** + +
+ +Tests may transition between Healthy, Flaky, and Broken states multiple times over their lifetime. You can see previous status changes in Test History, as well as an explanation for why each transition occurred — including which monitor triggered it. diff --git a/flaky-tests/detection.mdx b/flaky-tests/detection.mdx new file mode 100644 index 0000000..27839e1 --- /dev/null +++ b/flaky-tests/detection.mdx @@ -0,0 +1,83 @@ +--- +title: "Flaky test detection" +description: "Learn how Trunk detects and labels flaky and broken tests" +--- +Flake Detection automatically identifies problematic tests in your test suite by monitoring test behavior over time. Instead of a single set of built-in detection rules, Trunk uses **monitors**, independent detectors that each watch for a specific pattern. When any monitor flags a test, it's marked as flaky or broken. When all monitors agree the test has recovered, it returns to healthy. + +## How Monitors Work + +Each monitor independently observes your test runs and tracks two states per test: **active** (problematic behavior detected) or **inactive** (no problematic behavior). A test's overall status is determined by combining all of its monitors, with the most severe status winning: + +| Priority | Status | Condition | +| -------- | ----------- | --------------------------------------------------------------------- | +| Highest | **Broken** | Any enabled broken-type threshold monitor is active for this test | +| Middle | **Flaky** | Any enabled flaky-type monitor (threshold or pass-on-retry) is active | +| Lowest | **Healthy** | No active monitors | + +If a test triggers both a broken monitor and a flaky monitor simultaneously, it shows as **Broken**. When the broken monitor resolves (e.g., you fix the regression and the failure rate drops), the test transitions to **Flaky** if a flaky monitor is still active, or to **Healthy** if no monitors remain active. + +A test stays in its detected state until every relevant monitor that flagged it has independently resolved. + +### Disabling or Deleting a Monitor + +When you disable or delete a monitor, it is immediately set to **resolved** for every test case in the repo. This triggers a status re-evaluation for all affected tests. If the disabled monitor was the only active monitor for a test, that test transitions to healthy. If other monitors are still active, the test remains in the most severe active state. + +For example, if you have a broken threshold monitor and a flaky pass-on-retry monitor, and you disable the broken monitor, any test that was only flagged by the broken monitor will become healthy. A test flagged by both will transition from broken to flaky (because pass-on-retry is still active). + +## Monitor Types + +| Monitor | What it detects | Detection type | Plan availability | Default state | +| -------------------------------------------------------------------------------------- | ----------------------------------------------------------------- | --------------- | ----------------- | ------------- | +| [**Pass-on-Retry**](/flaky-tests/detection/pass-on-retry-monitor) | A test fails then passes on the same commit (retry after failure) | Flaky | Team and above | Enabled | +| [**Threshold**](/flaky-tests/detection/threshold-monitor) | Failure rate exceeds a configured percentage over a time window | Flaky or Broken | Paid plans | Disabled | + +You can run multiple monitors simultaneously. For example, you might use pass-on-retry to catch classic retry-based flakiness while also running threshold monitors scoped to different branches. A common pattern is to pair a broken-type threshold monitor (catching consistently failing tests) with a flaky-type threshold monitor (catching intermittently failing tests). See [Threshold Monitor: Recommended Configurations](/flaky-tests/detection/threshold-monitor#recommended-configurations) for details. + +If you need to manually flag a test that automated monitors haven't caught, use [Flag as Flaky](/flaky-tests/detection/flag-as-flaky) from the test detail page. + +## Branch-Aware Detection + +Tests often behave differently depending on where they run. Failures on `main` are usually unexpected and signal flakiness. Failures on PR branches may be expected during active development. Merge queue failures are suspicious because the code has already passed PR checks. + +Rather than applying a single set of branch rules automatically, Trunk gives you control over how detection treats different branches through **branch scoping** on threshold monitors. You can create separate monitors with different thresholds and windows for your stable branch, PR branches, and merge queue branches. See [Threshold Monitor: Recommended configurations](/flaky-tests/detection/threshold-monitor#recommended-configurations) for specific guidance. + +Pass-on-retry detection is branch-agnostic. It flags any test that fails and passes on the same commit, regardless of which branch the test ran on. + +## Muting Monitors + +You can temporarily mute a monitor for a specific test case. A muted monitor continues to run and record detections, but it won't contribute to the test's flaky status until the mute expires. + +This is useful when you know a test is flaky but want to suppress the signal temporarily, for example while a fix is in progress or during a known infrastructure issue. Unlike [Flag as Flaky](/flaky-tests/detection/flag-as-flaky), which is a persistent user override, muting preserves the detection history and automatically re-enables itself after the mute period. + +### How Muting Works + +You can mute a monitor from the test case view in the Trunk app. When muting, you choose a duration: + +| Duration | +| -------- | +| 1 hour | +| 4 hours | +| 24 hours | +| 7 days | +| 30 days | + +While muted, the monitor is excluded from the test's status calculation. If the muted monitor was the only active monitor, the test transitions from flaky to healthy for the duration of the mute. When the mute expires, the monitor is automatically included in the next status evaluation. If it's still active, the test will be flagged as flaky again. + +You can also unmute a monitor early from the test case view. + + +You can only mute a monitor that has already detected flaky behavior for a test. If a monitor has never been active for a test, the mute option is disabled. + + +### When to Mute vs. Other Options + +| Situation | Recommended action | +| ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------ | +| Fix is in progress and you want to suppress noise temporarily | **Mute** the monitor for a few days | +| Test is flaky but no automated monitor has caught it | Use [**Flag as Flaky**](/flaky-tests/detection/flag-as-flaky) to mark it as flaky | +| You want to stop a monitor from evaluating a test permanently | Adjust the monitor's branch scope or thresholds instead | +| You want to suppress all flaky signals for a test | Mute each active monitor individually, or address the root cause | + +## Variants + +If you run the same tests across different environments or architectures, you can use [variants](/flaky-tests/uploader) to separate these runs into distinct test cases. This lets monitors detect environment-specific flakes. For example, a test might be flaky on iOS but stable on Android. Using variants, monitors isolate flakes on the iOS variant instead of marking the test as flaky across all environments. See the [Trunk Analytics CLI docs](/flaky-tests/uploader) for details on how to upload with variants. diff --git a/flaky-tests/detection/flag-as-flaky.mdx b/flaky-tests/detection/flag-as-flaky.mdx new file mode 100644 index 0000000..26253b1 --- /dev/null +++ b/flaky-tests/detection/flag-as-flaky.mdx @@ -0,0 +1,54 @@ +--- +title: "Flag as Flaky" +description: "Manually mark a test as flaky from the test detail page" +--- +Manually mark a test as flaky when you know it's unreliable but automated monitors haven't detected it yet — or when you want to override the system's assessment. + +## When to Use It + +* A test is intermittently failing but hasn't been flagged by threshold or pass-on-retry monitors. +* You want to immediately quarantine a test while investigating. +* You've identified a flaky test through code review or local observation. + +## How It Works + +### Flagging a Test + +1. Navigate to the test detail page for the test you want to flag. +2. Click the **Flag as Flaky** button in the header row, next to the status badge. +3. In the popover that appears, optionally add a reason (up to 256 characters) explaining why you're flagging it. +4. Click **Flag** to confirm. + +Once flagged: + +* The test is immediately marked as **flaky**, regardless of what automated monitors report. +* An amber banner appears below the header showing who flagged it, when, and the reason (if provided). +* The flag is additive — if automated monitors later detect the test as flaky too, both signals coexist. + +### Removing the Flag + +1. On the test detail page, find the amber "Manually flagged as flaky" banner. +2. Click the **Remove flag** button on the right side of the banner. +3. Confirm by clicking **Remove flag** in the popover. + +After removing: + +* The test's status reverts to whatever the automated monitors determine. +* If monitors are still detecting the test as flaky, it remains flaky. The flag removal only clears the manual override. + +## Relationship to Monitors + +The "Flag as Flaky" action is independent of automated monitors (threshold-based, pass-on-retry). It does not appear in the Monitors tab. + +| Scenario | Test status | +| ----------------------------------- | ------------------------ | +| No monitors active, no flag | Healthy | +| Monitors active, no flag | Flaky (detected) | +| No monitors active, flag set | Flaky (manually flagged) | +| Monitors active, flag set | Flaky (both) | +| Flag removed, monitors still active | Flaky (detected) | +| Flag removed, monitors inactive | Healthy | + +## Flag History + +All flag and unflag actions are recorded as events. You can view the history by opening the Flag History panel from the test detail page. Each entry shows who performed the action, when, and the reason (if one was provided). diff --git a/flaky-tests/detection/pass-on-retry-monitor.mdx b/flaky-tests/detection/pass-on-retry-monitor.mdx new file mode 100644 index 0000000..0549bbd --- /dev/null +++ b/flaky-tests/detection/pass-on-retry-monitor.mdx @@ -0,0 +1,51 @@ +--- +title: "Pass-on-Retry Monitor" +description: "Detect tests that fail then pass on retry within the same commit" +--- +The pass-on-retry monitor detects the most common flakiness pattern: a test fails, is retried, and passes on the same commit. This indicates the failure wasn't caused by a code change and that the test is unreliable. + +This monitor is branch-agnostic. It evaluates all test runs regardless of which branch they ran on. + +## How It Works + +The monitor continuously scans your test runs looking for commits where a test has both a failure and a success. When it finds one, the test is flagged as flaky. + +Once flagged, the test remains flaky until no pass-on-retry behavior has been observed for a configurable recovery period. This prevents tests from bouncing between flaky and healthy if they only fail intermittently. + +### Example + +Your CI retries failed tests automatically. On commit `abc123`: + +1. `test_login` fails on the first attempt +2. `test_login` passes on retry + +The monitor detects that `test_login` had both a failure and success on the same commit and flags it as flaky. + +Seven days later (assuming default settings), if `test_login` hasn't exhibited any more retry behavior, the monitor resolves and the test returns to healthy. + +## Configuration + +| Setting | Description | Default | +| ----------------- | ---------------------------------------------------------------------------------------------- | ------- | +| **Enabled** | Whether the monitor is active | On | +| **Recovery days** | Days without pass-on-retry behavior before a test is resolved as healthy. Range: 1 to 15 days. | 7 | + +### What Recovery Days Controls + +A shorter recovery period (e.g., 1 to 3 days) returns tests to healthy quickly, which is useful if you fix flaky tests promptly and want fast feedback. A longer recovery period (e.g., 10 to 15 days) is more conservative. It keeps tests flagged longer to account for flaky behavior that only surfaces occasionally. + +## When Detection Happens + +Pass-on-retry detection runs continuously as new test results arrive. A failure and its corresponding retry don't need to arrive at exactly the same time. + +Resolution is evaluated daily. If a test hasn't shown pass-on-retry behavior within the recovery window, it resolves on the next daily check. + +## Muting + +You can temporarily mute the pass-on-retry monitor for a specific test case. See [Muting monitors](/flaky-tests/detection/..#muting-monitors) for details. + +## Edge Cases + +**Failure without a retry yet:** If a test fails but hasn't been retried, no detection occurs. If the retry arrives later (even hours or days later on the same commit), the monitor will pick it up. + +**Multiple retries on one commit:** If a test fails and is retried several times on the same commit, the monitor treats it as a single detection for that commit. diff --git a/flaky-tests/detection/threshold-monitor.mdx b/flaky-tests/detection/threshold-monitor.mdx new file mode 100644 index 0000000..893181d --- /dev/null +++ b/flaky-tests/detection/threshold-monitor.mdx @@ -0,0 +1,232 @@ +--- +title: "Threshold Monitor" +description: "Detect flaky or broken tests based on failure rate over a configurable time window" +--- +The threshold monitor detects tests based on failure rate over a rolling time window. Unlike pass-on-retry, which looks for a specific pattern on a single commit, the threshold monitor identifies tests that fail too often over a period of time, even if no individual failure looks like a retry. + +You can create multiple threshold monitors with different configurations. This is how you tailor detection to different branches, test volumes, sensitivity levels, and detection types. + +## Detection Type + +Each threshold monitor has a **detection type** — either **flaky** or **broken** — which controls what status a test receives when the monitor flags it: + +* **Flaky monitors** catch tests that fail intermittently (e.g., 20–50% failure rate). These are typically caused by timing issues, shared state, or non-deterministic behavior. +* **Broken monitors** catch tests that fail consistently at a high rate (e.g., 80%+ failure rate). These usually indicate a real regression — something in the code or environment is genuinely broken and needs a fix. + +The detection type is set at creation and cannot be changed afterward. If you need to switch a monitor's type, create a new monitor with the desired type and disable the old one. + +This distinction matters because the two problems call for different responses. Flaky tests might be quarantined while you investigate the root cause. Broken tests represent real failures that should be fixed, not hidden. + +## How It Works + +The monitor periodically calculates the failure rate for each test within a time window you define. If the rate meets or exceeds your activation threshold and the test has enough runs to be statistically meaningful, the test is flagged as flaky or broken depending on the monitor's detection type. + +### Example + +You configure a threshold monitor with: + +| Setting | Value | +| -------------------- | ------- | +| Detection type | Flaky | +| Activation threshold | 30% | +| Window | 6 hours | +| Minimum sample size | 50 runs | +| Branches | `main` | + +Over the last 6 hours, here's what the monitor observes: + +| Test | Runs | Failures | Failure rate | Meets min sample? | Result | +| --------------- | ---- | -------- | ------------ | ----------------- | ------------------------------------------------- | +| `test_checkout` | 120 | 42 | 35% | Yes (120 ≥ 50) | **Flagged as flaky** — rate exceeds 30% threshold | +| `test_signup` | 8 | 3 | 37.5% | No (8 < 50) | **Not flagged** — insufficient data | + +`test_checkout` is flagged because its 35% failure rate exceeds the 30% threshold and it has enough runs to be statistically meaningful. `test_signup` has a higher failure rate but is skipped entirely — the monitor needs at least 50 runs before making a call. + +## Configuration + +### Detection Type + +Choose **Flaky** or **Broken**. This determines the status a test receives when the monitor flags it. See [Detection Type](#detection-type) above for guidance on which to use. + +### Activation Threshold + +The failure rate that triggers detection, expressed as a percentage. A test is flagged when its failure rate meets or exceeds this value within the time window. + +For flaky monitors, setting this lower (e.g., 10%) catches more intermittent failures but may produce false positives. Setting it higher (e.g., 50%) is more conservative and only flags tests that fail frequently. + +For broken monitors, a high threshold (e.g., 80–100%) is appropriate — you want to catch tests that are consistently failing, not ones with occasional failures. + +### Resolution Threshold + +The failure rate a test must drop below to be resolved. If not set, it defaults to the activation threshold, meaning a test resolves as soon as its failure rate drops below the activation level. + +Setting this lower than the activation threshold creates a buffer that prevents tests from flapping between flagged and resolved. For example, if you activate at 30% and resolve at 15%, a test flagged at 30% must improve to below 15% before it's marked healthy again. A test hovering at 20% failure rate stays flagged rather than flipping back and forth. + +The gap between activation (30%) and resolution (15%) is the buffer zone. A test with a failure rate in this range keeps its current status: a healthy test won't be flagged, but a test already flagged won't be resolved either. + +### Window Duration + +The rolling time window (in hours) over which failure rate is calculated. Only test runs within this window are considered. + +A shorter window (e.g., 1 hour) reacts quickly to recent failures but may miss patterns that play out over longer periods. A longer window (e.g., 24 hours) smooths out short-term spikes and gives a more stable picture, but takes longer to detect new issues and longer to resolve. + +### Minimum Sample Size + +The minimum number of test runs required within the time window before the monitor will evaluate a test. Tests with fewer runs are skipped entirely. They won't be flagged or resolved until enough data accumulates. + +This prevents the monitor from making decisions on insufficient data. A test that ran 3 times with 2 failures is a 66% failure rate, but that's not enough data to be confident. + +The right minimum depends on how often a test actually runs on the branches you're monitoring. To get a sense of run frequency, open the test's **Test History** and filter to the branch you care about — this shows how many runs accumulate over any given period. If your tests run hundreds of times per day, a minimum of 50 to 100 is reasonable. If tests only run a few times per day, a lower minimum may be necessary, but lower minimums mean less statistical confidence. + +### Stale Timeout + +How long (in hours) a flagged test can go without any runs before it's automatically resolved as stale. This clears out tests that have been deleted, renamed, or are no longer part of your test suite. + +When not set, flagged tests remain in their detected state indefinitely until they run enough times to recover through the normal threshold check. Setting a stale timeout (e.g., 24 hours) ensures abandoned tests don't clutter your test list. + +A test resolved as stale is simply no longer being tracked by this monitor. If the test starts running again and exceeds the activation threshold, it will be re-flagged. + + +Skipped tests count as not being run. If you have a stale timeout configured and a test starts being skipped rather than executed, the monitor will treat it as having no runs and resolve it as stale once the timeout elapses. + + +### Branch Scope + +Which branches the monitor evaluates. You can specify up to 10 branch patterns. Only test runs on matching branches are included in the failure rate calculation. Runs across all matching patterns are pooled together — the failure rate is calculated from the combined set of runs, not evaluated per-pattern individually. This means a monitor scoped to `main` and `release/*` will look at all runs on any of those branches together when determining the failure rate. + +#### Branch Pattern Syntax + +Branch patterns use glob-style matching with two special characters: + +| Character | Meaning | Regex equivalent | +| --------- | -------------------------------------------- | ---------------- | +| `*` | Zero or more of any character, including `/` | `.*` | +| `?` | Exactly one of any character | `.` | + +All other characters are matched literally. Special regex characters (like `.`, `+`, `(`, `)`, `[`, `]`) are treated as literal characters in patterns, not as regex operators. You don't need to escape them. + + +Unlike some glob implementations, `*` matches across `/` separators. The pattern `feature/*` matches both `feature/login` and `feature/api/auth`. + + +#### Pattern Examples + +| Pattern | Matches | Does not match | +| --------------- | ----------------------------------- | ------------------------------------------------------ | +| `main` | `main` | `main-v2`, `maint` | +| `feature/*` | `feature/login`, `feature/api/auth` | `feature` (no trailing path), `features/x` | +| `release-?.?.?` | `release-1.2.3` | `release-10.2.3` (10 is two characters), `release-1.2` | +| `*-hotfix` | `prod-hotfix`, `release/v1-hotfix` | `hotfix`, `hotfix-1` | +| `*` | All branches | | + +A pattern with no special characters matches that exact branch name only. For example, `main` matches the branch named `main` and nothing else. + +#### Stable Branch Patterns + +For your main or stable branch, use the exact branch name: + +| Your stable branch | Pattern | +| ------------------ | --------- | +| `main` | `main` | +| `master` | `master` | +| `develop` | `develop` | + +#### Merge Queue Branch Patterns + +If you use a merge queue, your queue creates temporary branches to test changes before merging. Each merge queue product uses a different branch naming convention: + +| Merge queue | Branch pattern | Example branches matched | +| -------------------- | --------------------- | ------------------------------------------ | +| Trunk Merge Queue | `trunk-merge/*` | `trunk-merge/main/1`, `trunk-merge/main/2` | +| GitHub Merge Queue | `gh-readonly-queue/*` | `gh-readonly-queue/main/pr-123-abc` | +| Graphite Merge Queue | `graphite-merge/*` | `graphite-merge/main/1` | + +GitLab Merge Trains run on the target branch directly rather than creating separate branches. To monitor merge train runs, scope your monitor to the target branch (e.g., `main`). + +#### Tips for Branch Scoping + +* You can add up to **10 patterns** per monitor. A test run is included if its branch matches any of the patterns. +* Since patterns can't express "everything except a branch," a practical approach is to create **separate monitors**: one scoped to `main` with strict settings, and another scoped to your PR branch naming patterns (e.g., `feature/*`, `fix/*`) with more lenient settings. +* `**` is treated as two consecutive `*` wildcards, which is functionally identical to a single `*`. There is no special multi-segment matching behavior. + +## Resolution Behavior + +A flagged test resolves in one of two ways: + +**Healthy recovery:** The test's failure rate drops below the resolution threshold (or activation threshold, if no resolution threshold is set) and it still has enough runs to meet the minimum sample size. This means the test is actively running and has improved. + +**Stale recovery:** If a stale timeout is configured and the test has no runs on matching branches within that period, it resolves as stale. This is an automatic cleanup mechanism, not an indication that the test has improved. + +Tests that are still running but haven't accumulated enough runs to meet the minimum sample size remain in their current state. They won't be resolved until there's enough data to make a determination. + +## Muting + +You can temporarily mute a threshold monitor for a specific test case. See [Muting monitors](/flaky-tests/detection/..#muting-monitors) for details. + +## Recommended Configurations + +A common setup is to pair two threshold monitors — one to catch broken tests quickly and one to catch flaky tests over a longer window: + +| Monitor | Detection type | Activation threshold | Window | Purpose | +| -------------- | -------------- | -------------------- | ----------- | -------------------------------------------------------------------------------------- | +| Broken on main | Broken | 80–100% | 1–6 hours | Catch tests that are reliably failing — real regressions that need immediate attention | +| Flaky on main | Flaky | 20–50% | 12–72 hours | Catch intermittently failing tests — candidates for investigation or quarantine | + +You can create as many monitors as you need. For example, you might want separate monitors for your main branch and pull request branches, or different thresholds for different levels of severity. The following sections provide starting points for common scenarios. + +**Choosing a window:** The window duration should match how often tests run on the branches you're monitoring. A window needs enough runs to reach the minimum sample size before it can flag anything. If tests run infrequently, a longer window is necessary to accumulate enough data. A narrower window reacts more quickly — spikes of failures roll off faster, and tests recover to healthy more quickly once the underlying problem is resolved. + +### Main Branch: Catch Flakiness Early + +Failures on your stable branch are a strong signal. Tests should be passing before code is merged, so failures here are unexpected and likely indicate flakiness. + +| Setting | Suggested value | Why | +| -------------------- | ------------------------------------- | ---------------------------------------------------------------- | +| Activation threshold | 10 to 20% | Low threshold catches subtle flakiness early | +| Resolution threshold | 5 to 10% | Requires clear improvement before resolving | +| Window | 6 to 24 hours | Long enough to accumulate data, short enough to catch new issues | +| Min sample size | 20 to 50 | Depends on how often your tests run on main | +| Branches | `main` (or `master`, `develop`, etc.) | Use the exact name of your stable branch | + +### Pull Requests: Catch Broken Tests + +On PR branches, tests are expected to fail — that's part of active development. Analyzing failure rate for flakiness on PRs is generally not productive because a new failing test is likely caused by the code change under review, not non-deterministic behavior. Pass-on-retry already handles real flakiness on PRs: if a test fails and then passes on retry within the same commit, it will be detected regardless of branch. + +If you do want a threshold monitor on PRs, scope it to catch **broken** tests rather than flaky ones — tests that are consistently failing at a high rate across many PRs, which may indicate a persistent regression or a broken test environment. + +| Setting | Suggested value | Why | +| -------------------- | ------------------------------------ | ----------------------------------------------------------------------------- | +| Detection type | Broken | Focus on consistently failing tests, not intermittent ones | +| Activation threshold | 70 to 90% | High threshold distinguishes real breakage from expected development failures | +| Resolution threshold | 40 to 50% | Wide buffer prevents flapping | +| Window | 12 to 24 hours | Longer window smooths out short-lived development failures | +| Min sample size | 30 to 100 | Higher minimum avoids flagging tests that only ran a few times on PRs | +| Branches | `feature/*`, `fix/*`, `dependabot/*` | Match your team's PR branch naming conventions | + +Since branch patterns can't express "everything except main," create one monitor scoped to `main` with strict settings and a second monitor scoped to your PR branch naming patterns with more lenient settings. + +### Merge Queue: Strict Monitoring + +Merge queue branches test code that has already passed PR checks. Failures here are suspicious. If you use a merge queue, consider a dedicated monitor with settings similar to or stricter than your main branch monitor. + +When sizing your window and minimum sample size, consider how many PRs your repo merges per day. For example, if your team merges 10 PRs per day, a 12-hour window will accumulate roughly 5 merge queue runs — setting a minimum sample size of 10 would mean the rule never has enough data to evaluate. Match your minimum sample size to a realistic run count within your chosen window. + +| Setting | Suggested value | Why | +| -------------------- | ---------------------------------------- | --------------------------------------------------------------- | +| Activation threshold | 10 to 15% | Low threshold, failures here are unexpected | +| Resolution threshold | 5% | Strict recovery | +| Window | 6 to 12 hours | Shorter window for faster detection | +| Min sample size | 5 to 15 | Size to how many merge queue runs accumulate in your window | +| Branches | `trunk-merge/*` or `gh-readonly-queue/*` | Use the pattern for your merge queue provider (see table above) | + +Common branch patterns for merge queues: + +| Merge queue | Branch pattern | +| ------------------ | --------------------- | +| Trunk Merge Queue | `trunk-merge/*` | +| GitHub Merge Queue | `gh-readonly-queue/*` | + +### Other Patterns + +* **Release branches:** A monitor scoped to `release/*` with strict thresholds catches flakiness before it ships. +* **Nightly or scheduled builds:** If you run comprehensive test suites on a schedule, a monitor with a longer window and higher minimum sample size can catch slow-burn flakiness that doesn't show up in faster CI runs. diff --git a/flaky-tests/flaky-tests.mdx b/flaky-tests/flaky-tests.mdx new file mode 100644 index 0000000..1b94f5b --- /dev/null +++ b/flaky-tests/flaky-tests.mdx @@ -0,0 +1,47 @@ +--- +title: "Flaky Tests API" +description: "The Trunk Flaky Tests API provides access to check the status of Trunk services and fetch unhealthy or quarantined tests in your project. The API is an HTTP REST API, returns JSON" +--- +The Trunk Flaky Tests API provides access to check the status of Trunk services and fetch [unhealthy](/flaky-tests/detection) or [quarantined](/flaky-tests/quarantining) tests in your project. The API is an HTTP REST API, returns JSON from all requests, and uses standard HTTP response codes. + +All requests must be [authenticated](/setup-and-administration/apis#authentication) by providing the `x-api-token` header. + +## POST /flaky-tests/get-test-details + +> Get the details of a test case + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/flaky-tests/get-test-details":{"post":{"summary":"Get the details of a test case","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string","description":"The host of the repository. For example, `github.com` or `gitlab.com`. If self-hosting, this will be the hostname of your instance."},"owner":{"type":"string","description":"The owner of the repository. For example, `my-github-org` or `my-gitlab-org/my/sub/group`. `owner` for GitLab will include the GitLab org, plus the project path, but excluding the repo name itself."},"name":{"type":"string","description":"The name of the repository."}},"required":["host","owner","name"],"description":"The repository to list tests for."},"org_url_slug":{"type":"string","description":"The slug of your organization. Find this at https://app.trunk.io/trunk/settings under \"Organization Name\" > \"Slug\""},"test_id":{"type":"string","format":"uuid","description":"The id of a test case. Should be a UUID."}},"required":["repo","org_url_slug","test_id"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"type":"object","properties":{"test":{"type":"object","properties":{"id":{"type":"string","format":"uuid","description":"A stable unique identifier for the test"},"repository":{"type":"object","properties":{"html_url":{"type":"string","format":"uri","description":"The URL of the repository"}},"required":["html_url"]},"html_url":{"type":"string","format":"uri","description":"The URL of the test details"},"name":{"type":"string","description":"The name of the test"},"variant":{"type":"string","description":"The name of the test variant"},"status":{"type":"object","properties":{"value":{"type":"string","enum":["healthy","flaky","broken"],"description":"The current status value in lowercase"},"reason":{"type":"string","description":"The reason for the current status"},"timestamp":{"type":"string","format":"date-time","description":"The timestamp of the current status change"}},"required":["value","reason","timestamp"]},"most_common_failures":{"type":"array","items":{"type":"object","properties":{"summary":{"type":"string","description":"The summary of the failure"},"occurrence_count":{"type":"integer","minimum":0,"description":"The number of occurrences of this failure"},"last_occurrence":{"type":"string","format":"date-time","description":"The timestamp of the last occurrence"}},"required":["summary","occurrence_count"]},"description":"Several of the most common failures of the test. This is behind a feature flag, access to this feature can be requested by reaching out to the Trunk team."},"failure_rate_last_7d":{"type":"number","description":"The failure rate over the last 7 days"},"failure_rate_last_24h":{"type":"number","description":"The failure rate over the last 24 hours"},"file_path":{"type":"string","description":"The file path of the test"},"parent":{"type":"string","description":"The parent of the test. This includes the test suite (depending on the test runner)"},"classname":{"type":"string","description":"The class name of the test"},"codeowners":{"type":"array","items":{"type":"string"},"description":"Code owners for the test"},"pull_requests_impacted_last_7d":{"type":"integer","minimum":0,"description":"The number of pull requests impacted in the last 7 days"},"quarantined":{"type":"boolean","description":"Whether the test is quarantined.\n\n This is `true` when quarantining is enabled for the repo and either of the following applies:\n\n - The quarantine override is set to `ALWAYS_QUARANTINE` for this test\n - Automatic quarantining is enabled for the repo, and this test's status is either `flaky` or `broken`\n\n If this is `true`, the next test run will be marked as passed even if the test run conclusion is\n failed."},"ticket":{"type":"object","properties":{"html_url":{"type":"string","format":"uri","description":"The URL of the associated ticket"}},"required":["html_url"]}},"required":["id","repository","html_url","name","variant","status","most_common_failures","failure_rate_last_7d","failure_rate_last_24h","codeowners","pull_requests_impacted_last_7d","quarantined"],"description":"The details of a test case."}},"required":["test"]}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /flaky-tests/link-ticket-to-test-case + +> Link a ticket to a test case + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/flaky-tests/link-ticket-to-test-case":{"post":{"summary":"Link a ticket to a test case","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"test_case_id":{"type":"string","format":"uuid","description":"The id of the test case. Should be a UUID."},"external_ticket_id":{"type":"string","description":"The external identifier of the ticket. For Jira this is the ticket number prefixed by the Project Key. For Linear this is the ticket number prefixed by the Team Identifier"},"repo":{"type":"object","properties":{"host":{"type":"string","description":"The host of the repository. For example, `github.com` or `gitlab.com`. If self-hosting, this will be the hostname of your instance."},"owner":{"type":"string","description":"The owner of the repository. For example, `my-github-org` or `my-gitlab-org/my/sub/group`. `owner` for GitLab will include the GitLab org, plus the project path, but excluding the repo name itself."},"name":{"type":"string","description":"The name of the repository."}},"required":["host","owner","name"],"description":"The repository to list tests for."}},"required":["test_case_id","external_ticket_id","repo"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /flaky-tests/list-failing-tests + +> Get a list of distinct tests that failed in the given time range + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/flaky-tests/list-failing-tests":{"post":{"summary":"Get a list of distinct tests that failed in the given time range","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string","description":"The host of the repository. For example, `github.com` or `gitlab.com`. If self-hosting, this will be the hostname of your instance."},"owner":{"type":"string","description":"The owner of the repository. For example, `my-github-org` or `my-gitlab-org/my/sub/group`. `owner` for GitLab will include the GitLab org, plus the project path, but excluding the repo name itself."},"name":{"type":"string","description":"The name of the repository."}},"required":["host","owner","name"],"description":"The repository to list tests for."},"org_url_slug":{"type":"string","description":"The slug of your organization. Find this at https://app.trunk.io/trunk/settings under \"Organization Name\" > \"Slug\""},"start_time":{"type":"string","format":"date-time","description":"The start time of the failing tests (inclusive). Must be within 7 days of the end time."},"end_time":{"type":"string","format":"date-time","description":"The end time of the failing tests (exclusive). Must be within 7 days of the start time."},"page_query":{"type":"object","properties":{"page_size":{"type":"integer","minimum":1,"maximum":100,"description":"The number of tests to return per page."},"page_token":{"type":"string","description":"The page token to use for pagination. This is returned from the previous call to this endpoint. For the first page, this should be empty."}},"required":["page_size"],"description":"Pagination options for the list of tests."}},"required":["repo","org_url_slug","start_time","end_time","page_query"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"type":"object","properties":{"tests":{"type":"array","items":{"type":"object","properties":{"id":{"type":"string","format":"uuid","description":"A stable unique identifier for the test"},"repository":{"type":"object","properties":{"html_url":{"type":"string","format":"uri","description":"The URL of the repository"}},"required":["html_url"]},"html_url":{"type":"string","format":"uri","description":"The URL of the test details"},"name":{"type":"string","description":"The name of the test"},"variant":{"type":"string","description":"The name of the test variant"},"status":{"type":"object","properties":{"value":{"type":"string","enum":["healthy","flaky","broken"],"description":"The current status value in lowercase"},"reason":{"type":"string","description":"The reason for the current status"},"timestamp":{"type":"string","format":"date-time","description":"The timestamp of the current status change"}},"required":["value","reason","timestamp"]},"most_common_failures":{"type":"array","items":{"type":"object","properties":{"summary":{"type":"string","description":"The summary of the failure"},"occurrence_count":{"type":"integer","minimum":0,"description":"The number of occurrences of this failure"},"last_occurrence":{"type":"string","format":"date-time","description":"The timestamp of the last occurrence"}},"required":["summary","occurrence_count"]},"description":"Several of the most common failures of the test. This is behind a feature flag, access to this feature can be requested by reaching out to the Trunk team."},"failure_rate_last_7d":{"type":"number","description":"The failure rate over the last 7 days"},"failure_rate_last_24h":{"type":"number","description":"The failure rate over the last 24 hours"},"file_path":{"type":"string","description":"The file path of the test"},"parent":{"type":"string","description":"The parent of the test. This includes the test suite (depending on the test runner)"},"classname":{"type":"string","description":"The class name of the test"},"codeowners":{"type":"array","items":{"type":"string"},"description":"Code owners for the test"},"pull_requests_impacted_last_7d":{"type":"integer","minimum":0,"description":"The number of pull requests impacted in the last 7 days"},"quarantined":{"type":"boolean","description":"Whether the test is quarantined.\n\n This is `true` when quarantining is enabled for the repo and either of the following applies:\n\n - The quarantine override is set to `ALWAYS_QUARANTINE` for this test\n - Automatic quarantining is enabled for the repo, and this test's status is either `flaky` or `broken`\n\n If this is `true`, the next test run will be marked as passed even if the test run conclusion is\n failed."},"ticket":{"type":"object","properties":{"html_url":{"type":"string","format":"uri","description":"The URL of the associated ticket"}},"required":["html_url"]}},"required":["id","repository","html_url","name","variant","status","most_common_failures","failure_rate_last_7d","failure_rate_last_24h","codeowners","pull_requests_impacted_last_7d","quarantined"]},"description":"A page of failing test cases."},"page":{"type":"object","properties":{"total_rows":{"type":"number","minimum":0,"description":"The total number of test cases in the paginated list."},"total_pages":{"type":"number","minimum":0,"description":"The total number of pages in the paginated list of test cases."},"next_page_token":{"type":"string","description":"The next page token to use for pagination. See `page_token` in the request for more information."},"prev_page_token":{"type":"string","description":"The previous page token to use for pagination. See `page_token` in the request for more information."},"last_page_token":{"type":"string","description":"The last page token to use for pagination. See `page_token` in the request for more information."},"page_index":{"type":"number","minimum":0,"description":"The index of the current page in the paginated list of test cases."}},"required":["total_rows","total_pages","next_page_token","prev_page_token","last_page_token","page_index"],"description":"Pagination information for the list of test cases."}},"required":["tests","page"]}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /flaky-tests/list-unhealthy-tests + +> Get a list of unhealthy tests + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/flaky-tests/list-unhealthy-tests":{"post":{"summary":"Get a list of unhealthy tests","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string","description":"The host of the repository. For example, `github.com` or `gitlab.com`. If self-hosting, this will be the hostname of your instance."},"owner":{"type":"string","description":"The owner of the repository. For example, `my-github-org` or `my-gitlab-org/my/sub/group`. `owner` for GitLab will include the GitLab org, plus the project path, but excluding the repo name itself."},"name":{"type":"string","description":"The name of the repository."}},"required":["host","owner","name"],"description":"The repository to list tests for."},"org_url_slug":{"type":"string","description":"The slug of your organization. Find this at https://app.trunk.io/trunk/settings under \"Organization Name\" > \"Slug\""},"page_query":{"type":"object","properties":{"page_size":{"type":"integer","minimum":1,"maximum":100,"description":"The number of tests to return per page."},"page_token":{"type":"string","description":"The page token to use for pagination. This is returned from the previous call to this endpoint. For the first page, this should be empty."}},"required":["page_size"],"description":"Pagination options for the list of tests."},"status":{"type":"string","enum":["FLAKY","BROKEN"],"description":"The status filter for unhealthy tests."}},"required":["repo","org_url_slug","page_query","status"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"type":"object","properties":{"tests":{"type":"array","items":{"type":"object","properties":{"id":{"type":"string","format":"uuid","description":"A stable unique identifier for the test"},"repository":{"type":"object","properties":{"html_url":{"type":"string","format":"uri","description":"The URL of the repository"}},"required":["html_url"]},"html_url":{"type":"string","format":"uri","description":"The URL of the test details"},"name":{"type":"string","description":"The name of the test"},"variant":{"type":"string","description":"The name of the test variant"},"status":{"type":"object","properties":{"value":{"type":"string","enum":["healthy","flaky","broken"],"description":"The current status value in lowercase"},"reason":{"type":"string","description":"The reason for the current status"},"timestamp":{"type":"string","format":"date-time","description":"The timestamp of the current status change"}},"required":["value","reason","timestamp"]},"file_path":{"type":"string","description":"The file path of the test"},"parent":{"type":"string","description":"The parent of the test. This includes the test suite (depending on the test runner)"},"classname":{"type":"string","description":"The class name of the test"},"codeowners":{"type":"array","items":{"type":"string"},"description":"Code owners for the test"},"pull_requests_impacted_last_7d":{"type":"integer","minimum":0,"description":"The number of pull requests impacted in the last 7 days"},"quarantined":{"type":"boolean","description":"Whether the test is quarantined.\n\n This is `true` when quarantining is enabled for the repo and either of the following applies:\n\n - The quarantine override is set to `ALWAYS_QUARANTINE` for this test\n - Automatic quarantining is enabled for the repo, and this test's status is either `flaky` or `broken`\n\n If this is `true`, the next test run will be marked as passed even if the test run conclusion is\n failed."},"ticket":{"type":"object","properties":{"html_url":{"type":"string","format":"uri","description":"The URL of the associated ticket"}},"required":["html_url"]}},"required":["id","repository","html_url","name","variant","status","codeowners","pull_requests_impacted_last_7d","quarantined"]},"description":"A page of unhealthy test cases."},"page":{"type":"object","properties":{"total_rows":{"type":"number","minimum":0,"description":"The total number of test cases in the paginated list."},"total_pages":{"type":"number","minimum":0,"description":"The total number of pages in the paginated list of test cases."},"next_page_token":{"type":"string","description":"The next page token to use for pagination. See `page_token` in the request for more information."},"prev_page_token":{"type":"string","description":"The previous page token to use for pagination. See `page_token` in the request for more information."},"last_page_token":{"type":"string","description":"The last page token to use for pagination. See `page_token` in the request for more information."},"page_index":{"type":"number","minimum":0,"description":"The index of the current page in the paginated list of test cases."}},"required":["total_rows","total_pages","next_page_token","prev_page_token","last_page_token","page_index"],"description":"Pagination information for the list of test cases."}},"required":["tests","page"]}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /flaky-tests/list-quarantined-tests + +> Get a list of quarantined tests + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/flaky-tests/list-quarantined-tests":{"post":{"summary":"Get a list of quarantined tests","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string","description":"The host of the repository. For example, `github.com` or `gitlab.com`. If self-hosting, this will be the hostname of your instance."},"owner":{"type":"string","description":"The owner of the repository. For example, `my-github-org` or `my-gitlab-org/my/sub/group`. `owner` for GitLab will include the GitLab org, plus the project path, but excluding the repo name itself."},"name":{"type":"string","description":"The name of the repository."}},"required":["host","owner","name"],"description":"The repository to list tests for."},"org_url_slug":{"type":"string","description":"The slug of your organization. Find this at https://app.trunk.io/trunk/settings under \"Organization Name\" > \"Slug\""},"page_query":{"type":"object","properties":{"page_size":{"type":"integer","minimum":1,"maximum":100,"description":"The number of tests to return per page."},"page_token":{"type":"string","description":"The page token to use for pagination. This is returned from the previous call to this endpoint. For the first page, this should be empty."}},"required":["page_size"],"description":"Pagination options for the list of tests."}},"required":["repo","org_url_slug","page_query"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"type":"object","properties":{"quarantined_tests":{"type":"array","items":{"type":"object","properties":{"name":{"type":"string","description":"The name of the test case."},"parent":{"type":["string","null"],"description":"The parent of the test case."},"file":{"type":["string","null"],"description":"The file of the test case."},"classname":{"type":["string","null"],"description":"The class name of the test case."},"status":{"type":"string","enum":["HEALTHY","FLAKY","BROKEN"],"description":"The status of the test case."},"codeowners":{"type":"array","items":{"type":"string"},"description":"The latest codeowners of the test case."},"quarantine_setting":{"type":"string","enum":["ALWAYS_QUARANTINE","AUTO_QUARANTINE"],"description":"The quarantine setting of the test case."},"quarantined_at":{"type":"string","format":"date-time","description":"The time at which the test case was quarantined, if applicable."},"status_last_updated_at":{"type":"string","format":"date-time","description":"The last time the status of the test case was updated."},"test_case_id":{"type":"string","description":"The ID of the test case. This value is unstable and should not be relied upon."},"variant":{"type":"string","description":"The variant of the test case."}},"required":["name","parent","file","classname","status","codeowners","quarantine_setting","quarantined_at","status_last_updated_at","test_case_id","variant"],"description":"A quarantined test case."},"description":"A page of quarantined test cases."},"page":{"type":"object","properties":{"total_rows":{"type":"number","minimum":0,"description":"The total number of test cases in the paginated list."},"total_pages":{"type":"number","minimum":0,"description":"The total number of pages in the paginated list of test cases."},"next_page_token":{"type":"string","description":"The next page token to use for pagination. See `page_token` in the request for more information."},"prev_page_token":{"type":"string","description":"The previous page token to use for pagination. See `page_token` in the request for more information."},"last_page_token":{"type":"string","description":"The last page token to use for pagination. See `page_token` in the request for more information."},"page_index":{"type":"number","minimum":0,"description":"The index of the current page in the paginated list of test cases."}},"required":["total_rows","total_pages","next_page_token","prev_page_token","last_page_token","page_index"],"description":"Pagination information for the list of test cases."}},"required":["quarantined_tests","page"]}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` diff --git a/flaky-tests/get-started.mdx b/flaky-tests/get-started.mdx new file mode 100644 index 0000000..8c38aeb --- /dev/null +++ b/flaky-tests/get-started.mdx @@ -0,0 +1,59 @@ +--- +title: "Getting Started" +description: "Trunk Flaky Tests detects flaky tests by analyzing test results from your CI runs. Setup requires configuring test result output and CI upload integration." +--- +Trunk Flaky Tests detects flaky tests by analyzing test results from your CI runs. Setup requires configuring test result output and CI upload integration. + +### Prerequisites + +* Account at [app.trunk.io](https://app.trunk.io) +* Ability to modify repository CI configuration and add secrets +* Tests running in CI on both PRs and stable branches (e.g., main) + +#### Step 1: Ensure JUnit XML output + +Trunk ingests test results in JUnit XML format. If your CI already generates JUnit XML, note the file paths and skip to Step 2. + +If not, configure your test frameworks to output JUnit XML: + +* See [**Test Frameworks**](/flaky-tests/get-started/frameworks) for framework-specific configuration +* Supports multiple frameworks simultaneously + +#### Step 2: Configure CI uploads + +Add test result uploads to all CI jobs that run tests. + +1. See [**CI Providers**](/flaky-tests/get-started/ci-providers) for integration instructions +2. Configure uploads in jobs that run on: + * Pull request branches + * Stable branches (`main`, `master`, `develop`, etc.) + * Merge queue branches (if applicable) + +Uploads from both PRs and stable branches are required for accurate flaky test detection. + +#### Step 3: Verify integration + +1. Push your changes and trigger a CI run +2. Check CI logs for successful upload confirmation +3. Results typically appear within a few minutes. Verify uploads appear at [app.trunk.io](https://app.trunk.io) → your repo → **Flaky Tests > Uploads** + +

Uploads tab

+ +#### Step 4: Configure flake detection + +After uploads are flowing, navigate to your repo → **Flaky Tests > Monitors** to set up detection. + +**Pass-on-retry** is enabled by default and is the recommended baseline for everyone. It catches the most common flakiness pattern — a test that fails and then passes on retry within the same commit — without any configuration needed. + +**Threshold monitors** let you detect flakiness based on failure rate over a rolling time window. How you configure them depends on your CI setup: + +* **If tests must pass before merging to main**, set up a threshold monitor scoped to `main` to catch an elevated failure rate. For example, if you run tests 5 times per day on `main`, a 24-hour rolling window with a minimum of 4 runs and a failure threshold of 25% is a reasonable starting point. This ensures the monitor has enough data before flagging anything. +* **If you use a merge queue**, consider a dedicated monitor scoped to your merge queue branches (e.g., `trunk-merge/*` or `gh-readonly-queue/*`). Failures here are especially suspicious since the code has already passed PR checks, so a low threshold is appropriate. + +[How threshold monitors work →](/flaky-tests/detection/threshold-monitor) + +#### Quarantining + +Quarantining suppresses failures from known flaky tests, preventing them from forcing CI re-runs or blocking your merge queue. Flaky tests continue to run and report results — they just don't cause pipeline failures while your team works on fixes. This is especially valuable for unblocking merge queues and keeping development velocity high. + +[Configure Quarantining →](/flaky-tests/quarantining) diff --git a/flaky-tests/get-started/ci-providers.mdx b/flaky-tests/get-started/ci-providers.mdx new file mode 100644 index 0000000..4d61b08 --- /dev/null +++ b/flaky-tests/get-started/ci-providers.mdx @@ -0,0 +1,15 @@ +--- +title: "CI Providers" +description: "You can easily integrate Flaky Tests from any CI Provider" +--- +Trunk Flaky Tests integrates with your CI by adding a `Upload Test Results` step in each of your testing CI jobs via the [Trunk CLI](/flaky-tests/uploader). See the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing test reports for your test runner, which Trunk can ingest. + + +**Not using GitHub for source control?** + +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. + + +### Quickstart + +
Atlassian Bambooatlassian-bamboo
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/ci-providers/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/ci-providers/atlassian-bamboo.mdx b/flaky-tests/get-started/ci-providers/atlassian-bamboo.mdx new file mode 100644 index 0000000..2ff4f73 --- /dev/null +++ b/flaky-tests/get-started/ci-providers/atlassian-bamboo.mdx @@ -0,0 +1,205 @@ +--- +title: "Atlassian Bamboo" +description: "Configure Atlassian Bamboo to upload test results to Trunk Flaky Tests" +--- +Trunk Flaky Tests integrates with your CI by adding a step in your Bamboo Plans to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). + + +**Not using GitHub for source control?** + +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. + + +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. + +### Checklist + +By the end of this guide, you should achieve the following. + +* [ ] Get your Trunk organization slug and token +* [ ] Set your slug and token as a variable in CI +* [ ] Configure your CI to upload to Trunk +* [ ] Validate your uploads in Trunk + +After completing these checklist items, you'll be integrated with Trunk. + +### Trunk Organization Slug and Token + +Before setting up uploads to Trunk, you must sign in to [app.trunk.io](https://app.trunk.io/login?intent=flaky%20tests) and obtain your Trunk organization slug and token. + +#### Trunk Slug + +You can find your organization slug under **Settings > Organization > Manage > Organization Name > Slug**. You'll save this as a variable in CI in a later step. + +#### Trunk Token + +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. + +### Add the Trunk Token as a Secret + +Store the Trunk slug and API token obtained in the previous step as [Bamboo plan variables](https://confluence.atlassian.com/bamboo/bamboo-variables-289277087.html). Name them `TRUNK_ORG_SLUG` and `TRUNK_TOKEN` respectively, and mark `TRUNK_TOKEN` as a **Secret** variable in the Bamboo UI to prevent it from appearing in build logs. + +### Upload to Trunk + +Add an `Upload Test Results` step after running tests in each of your Bamboo jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your [stable branches](/flaky-tests/detection/threshold-monitor#stable-branch-patterns), for example, `main`, `master`, or `develop`. + + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection/threshold-monitor#stable-branch-patterns), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. + +[Learn more about detection](/flaky-tests/detection) + + +#### Example Bamboo Plan Spec + +The following is an example of a [Bamboo Plan Spec](https://confluence.atlassian.com/bamboo/bamboo-specs-894743906.html) that uploads test results after your tests run. The upload step is placed under `final-tasks` so it runs even when tests fail. Note: you must either run `trunk` from the repo root when uploading test results or pass a `--repo-root` argument. + +To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [frameworks](/flaky-tests/get-started/frameworks "mention") docs. + + + + + +```yaml +version: 2 +plan: + project-key: + key: + name: Run Tests and Upload to Trunk.io + +Run Tests and Upload to Trunk: + key: + tasks: + - checkout: + description: Checkout Source Code + + - script: + name: Run Tests + body: | + # Your test command here + + final-tasks: + - script: + name: Upload Test Results to Trunk.io + body: | + curl -fsSLO --retry 3 https://trunk.io/releases/trunk + chmod +x ./trunk + ./trunk flakytests upload \ + --junit-paths "" \ + --org-url-slug ${bamboo.TRUNK_ORG_SLUG} \ + --token ${bamboo.TRUNK_TOKEN} + +variables: + TRUNK_ORG_SLUG: + TRUNK_TOKEN: +``` + + + + + +```yaml +version: 2 +plan: + project-key: + key: + name: Run Tests and Upload to Trunk.io + +Run Tests and Upload to Trunk: + key: + tasks: + - checkout: + description: Checkout Source Code + + - script: + name: Run Tests + body: | + # Your test command here + + final-tasks: + - script: + name: Upload Test Results to Trunk.io + body: | + curl -fsSLO --retry 3 https://trunk.io/releases/trunk + chmod +x ./trunk + ./trunk flakytests upload \ + --bazel-bep-path \ + --org-url-slug ${bamboo.TRUNK_ORG_SLUG} \ + --token ${bamboo.TRUNK_TOKEN} + +variables: + TRUNK_ORG_SLUG: + TRUNK_TOKEN: +``` + + + + + +```yaml +version: 2 +plan: + project-key: + key: + name: Run Tests and Upload to Trunk.io + +Run Tests and Upload to Trunk: + key: + tasks: + - checkout: + description: Checkout Source Code + + - script: + name: Run Tests + body: | + # Your test command here + + final-tasks: + - script: + name: Upload Test Results to Trunk.io + body: | + curl -fsSLO --retry 3 https://trunk.io/releases/trunk + chmod +x ./trunk + ./trunk flakytests upload \ + --xcresult-path \ + --org-url-slug ${bamboo.TRUNK_ORG_SLUG} \ + --token ${bamboo.TRUNK_TOKEN} + +variables: + TRUNK_ORG_SLUG: + TRUNK_TOKEN: +``` + + + + + +#### Uploading from Pull Request Builds + +To detect flaky tests on pull requests, configure your plan to create [plan branches](https://confluence.atlassian.com/bamboo/using-plan-branches-289276872.html) for pull requests. Add the following to your Plan Spec: + +```yaml +branches: + create: + for-pull-request: + accept-fork: false +``` + +Bamboo automatically sets the `bamboo_repository_pr_key` variable on PR builds, which the Trunk CLI uses to associate uploads with the correct pull request. + + +**PR number not detected?** If your Bamboo setup does not set `bamboo_repository_pr_key`, you can override it by passing the `--pr-number` flag or setting the `TRUNK_PR_NUMBER` environment variable when running the upload command. + + +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. + +#### Stale files + +Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. + + +**Have questions?** + +Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. + diff --git a/flaky-tests/get-started/ci-providers/azure-devops-pipelines.mdx b/flaky-tests/get-started/ci-providers/azure-devops-pipelines.mdx new file mode 100644 index 0000000..cad5e09 --- /dev/null +++ b/flaky-tests/get-started/ci-providers/azure-devops-pipelines.mdx @@ -0,0 +1,169 @@ +--- +title: "Azure DevOps Pipelines" +description: "Trunk Flaky Tests integrates with your CI by adding a step in your Azure DevOps Pipelines to upload tests with the Trunk Uploader CLI." +--- +Trunk Flaky Tests integrates with your CI by adding a step in your Azure DevOps Pipelines to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). + + +**Not using GitHub for source control?** + +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. + + +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. + +### Checklist + +By the end of this guide, you should achieve the following. + +* [ ] Get your Trunk organization slug and token +* [ ] Set your slug and token as a variable in CI +* [ ] Configure your CI to upload to Trunk +* [ ] Validate your uploads in Trunk + +After completing these checklist items, you'll be integrated with Trunk. + +### Trunk Organization Slug and Token + +Before setting up uploads to Trunk, you must sign in to [app.trunk.io](https://app.trunk.io/login?intent=flaky%20tests) and obtain your Trunk organization slug and token. + +#### Trunk Slug + +You can find your organization slug under **Settings > Organization > Manage > Organization Name > Slug**. You'll save this as a variable in CI in a later step. + +#### Trunk Token + +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. + +### Add the Trunk Token as a Secret + +Store the Trunk slug and API token obtained in the previous step in your Azure DevOps Pipelines as new variables named `TRUNK_ORG_SLUG` and `TRUNK_TOKEN` respectively. + +### Upload to Trunk + +Add an upload step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your [stable branches](/flaky-tests/detection), for example, `main`, `master`, or `develop`. + + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. + +[Learn more about detection](/flaky-tests/detection) + + +#### Add Uploader to Testing Pipelines + +The following is an example of a workflow step to upload test results after your tests run. Note: you must either run `trunk` from the repo root when uploading test results or pass a `--repo-root` argument. + +To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [Test Frameworks](/flaky-tests/get-started/frameworks) docs. + + + + + +```yaml +trigger: +- main + +pool: + vmImage: ubuntu-latest + +steps: +# ... Omitted steps + +- script: | + curl -fsSLO --retry 3 https://trunk.io/releases/trunk + chmod +x ./trunk + ./trunk flakytests upload --junit-paths "" \ + --org-url-slug $(TRUNK_ORG_SLUG) \ + --token $(TRUNK_TOKEN) + condition: always() # this should always run + displayName: Upload test results to Trunk.io +``` + + + + + +```yaml +trigger: +- main + +pool: + vmImage: ubuntu-latest + +steps: +# ... Omitted steps + +- script: | + curl -fsSLO --retry 3 https://trunk.io/releases/trunk + chmod +x ./trunk + ./trunk flakytests upload --bazel-bep-path \ + --org-url-slug $(TRUNK_ORG_SLUG) \ + --token $(TRUNK_TOKEN) + condition: always() # this should always run + displayName: Upload test results to Trunk.io +``` + + + + + +```yaml +trigger: +- main + +pool: + vmImage: ubuntu-latest + +steps: +# ... Omitted steps + +- script: | + curl -fsSLO --retry 3 https://trunk.io/releases/trunk + chmod +x ./trunk + ./trunk flakytests upload --xcresult-path \ + --org-url-slug $(TRUNK_ORG_SLUG) \ + --token $(TRUNK_TOKEN) + condition: always() # this should always run + displayName: Upload test results to Trunk.io +``` + + + + + +```yaml +trigger: +- main + +pool: + vmImage: ubuntu-latest + +steps: +# ... Omitted steps + +- script: | + TRUNK_ORG_URL_SLUG=$(TRUNK_ORG_SLUG) \ + TRUNK_API_TOKEN=$(TRUNK_TOKEN) \ + bundle exec rspec + displayName: Run RSpec tests and upload results to Trunk.io +``` + + + + + +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. + +#### Stale files + +Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. + +[Learn more about cleaning up artifacts in Azure DevOps Pipelines](https://learn.microsoft.com/en-us/azure/devops/pipelines/repos/pipeline-options-for-git?view=azure-devops\&tabs=yaml#clean-the-local-repo-on-the-agent) + + +**Have questions?** + +Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. + diff --git a/flaky-tests/get-started/ci-providers/bitbucket-pipelines.mdx b/flaky-tests/get-started/ci-providers/bitbucket-pipelines.mdx new file mode 100644 index 0000000..0d225bf --- /dev/null +++ b/flaky-tests/get-started/ci-providers/bitbucket-pipelines.mdx @@ -0,0 +1,173 @@ +--- +title: "BitBucket Pipelines" +description: "Trunk Flaky Tests integrates with your CI by adding a step in your BitBucket Pipelines to upload tests with the Trunk Uploader CLI." +--- +Trunk Flaky Tests integrates with your CI by adding a step in your BitBucket Pipelines to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). + + +**Not using GitHub for source control?** + +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. + + +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. + +### Checklist + +By the end of this guide, you should achieve the following. + +* [ ] Get your Trunk organization slug and token +* [ ] Set your slug and token as a variable in CI +* [ ] Configure your CI to upload to Trunk +* [ ] Validate your uploads in Trunk + +After completing these checklist items, you'll be integrated with Trunk. + +### Trunk Organization Slug and Token + +Before setting up uploads to Trunk, you must sign in to [app.trunk.io](https://app.trunk.io/login?intent=flaky%20tests) and obtain your Trunk organization slug and token. + +#### Trunk Slug + +You can find your organization slug under **Settings > Organization > Manage > Organization Name > Slug**. You'll save this as a variable in CI in a later step. + +#### Trunk Token + +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. + +### Add the Trunk Token as a Secret + +Store the Trunk slug and API token obtained in the previous step in your BitBucket as a new variable named `TRUNK_ORG_SLUG` and `TRUNK_TOKEN` respectively. + +### Upload to Trunk + +Add an `after-script` step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your [stable branches](/flaky-tests/detection), for example, `main`, `master`, or `develop`. + + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. + +[Learn more about detection](/flaky-tests/detection) + + +#### Add Uploader to Testing Pipelines + +The following is an example of a workflow step to upload test results after your tests run. Note: you must either run `trunk` from the repo root when uploading test results or pass a `--repo-root` argument. + +To find out how to produce the JUnit XML files the uploader needs, see the instructions for your test framework in the [Test Frameworks](/flaky-tests/get-started/frameworks) docs. + + + + + + +```yaml +image: + +pipelines: + default: + - step: + # ... omitted setup and build steps + - step: + name: Run Tests and Upload Results + script: + - + after-script: + # This ensures trunk upload runs even if the test script fails + - | + curl -fsSLO --retry 3 https://trunk.io/releases/trunk + chmod +x ./trunk + ./trunk flakytests upload --junit-paths "**/junit.xml" \ + --org-url-slug $TRUNK_ORG_SLUG \ + --token $TRUNK_TOKEN +``` + + + + + + +```yaml +image: + +pipelines: + default: + - step: + # ... omitted setup and build steps + - step: + name: Run Tests and Upload Results + script: + - + after-script: + # This ensures trunk upload runs even if the test script fails + - | + curl -fsSLO --retry 3 https://trunk.io/releases/trunk + chmod +x ./trunk + ./trunk flakytests upload --bazel-bep-path \ + --org-url-slug $TRUNK_ORG_SLUG \ + --token $TRUNK_TOKEN +``` + + + + + +```yaml +image: + +pipelines: + default: + - step: + # ... omitted setup and build steps + - step: + name: Run Tests and Upload Results + script: + - + after-script: + # This ensures trunk upload runs even if the test script fails + - | + curl -fsSLO --retry 3 https://trunk.io/releases/trunk + chmod +x ./trunk + ./trunk flakytests upload --xcresult-path \ + --org-url-slug $TRUNK_ORG_SLUG \ + --token $TRUNK_TOKEN +``` + + + + + +```yaml +image: + +pipelines: + default: + - step: + # ... omitted setup and build steps + - step: + name: Run Tests and Upload Results + script: + - | + TRUNK_ORG_URL_SLUG=$TRUNK_ORG_SLUG \ + TRUNK_API_TOKEN=$TRUNK_TOKEN \ + bundle exec rspec +``` + + + + + +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. + +#### Stale files + +Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. + +You can do this by omitting the `artifacts` definitions in the test steps of your configuration. [Learn more about artifacts in BitBucket Pipelines](https://support.atlassian.com/bitbucket-cloud/docs/use-artifacts-in-steps/). + + +**Have questions?** + +Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. + diff --git a/flaky-tests/get-started/ci-providers/buildkite.mdx b/flaky-tests/get-started/ci-providers/buildkite.mdx new file mode 100644 index 0000000..6072276 --- /dev/null +++ b/flaky-tests/get-started/ci-providers/buildkite.mdx @@ -0,0 +1,142 @@ +--- +title: "Buildkite" +description: "Configure Buildkite jobs to upload test results to Trunk Flaky Tests" +--- +Trunk Flaky Tests integrates with your CI by adding a step in your Buildkite Pipelines to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). + + +**Not using GitHub for source control?** + +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. + + +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. + +### Checklist + +By the end of this guide, you should achieve the following. + +* [ ] Get your Trunk organization slug and token +* [ ] Set your slug and token as a variable in CI +* [ ] Configure your CI to upload to Trunk +* [ ] Validate your uploads in Trunk + +After completing these checklist items, you'll be integrated with Trunk. + +### Trunk Organization Slug and Token + +Before setting up uploads to Trunk, you must sign in to [app.trunk.io](https://app.trunk.io/login?intent=flaky%20tests) and obtain your Trunk organization slug and token. + +#### Trunk Slug + +You can find your organization slug under **Settings > Organization > Manage > Organization Name > Slug**. You'll save this as a variable in CI in a later step. + +#### Trunk Token + +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. + +### Add the Trunk Token as a Secret + +Store the Trunk slug and API token obtained in the previous step in your as a new [Buildkite CI secret](https://buildkite.com/docs/pipelines/security/secrets/managing) named `TRUNK_ORG_SLUG` and `TRUNK_TOKEN` respectively. + +### Upload to Trunk + +Add an `Upload Test Results` step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your [stable branches](/flaky-tests/detection), for example, `main`, `master`, or `develop`. + + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. + +[Learn more about detection](/flaky-tests/detection) + + +#### Example Buildkite Pipeline + +The following is an example of a Buildkite step to upload test results after your tests run. Note: you must either run `trunk` from the repo root when uploading test results or pass a `--repo-root` argument. + +To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [frameworks](/flaky-tests/get-started/frameworks "mention") docs. + + + + + +```yaml +steps: + - label: Run Tests + command: ... + key: tests + + - label: Upload Test Results to Trunk.io + commands: + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --junit-paths "" --org-url-slug --token $TRUNK_TOKEN + key: upload + depends_on: + - tests +``` + + + + + +```yaml +steps: + - label: Run Tests + command: ... + key: tests + + - label: Upload Test Results to Trunk.io + commands: + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --bazel-bep-path --org-url-slug --token $TRUNK_TOKEN + key: upload + depends_on: + - tests +``` + + + + + +```yaml +steps: + - label: Run Tests + command: ... + key: tests + + - label: Upload Test Results to Trunk.io + commands: + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --xcresult-path --org-url-slug --token $TRUNK_TOKEN + key: upload + depends_on: + - tests +``` + + + + + +```yaml +steps: + - label: Run Tests and Upload Results to Trunk.io + command: TRUNK_ORG_URL_SLUG=$TRUNK_ORG_SLUG TRUNK_API_TOKEN=$TRUNK_TOKEN bundle exec rspec + key: tests +``` + + + + + +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. + +#### Stale files + +Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. + + +**Have questions?** + +Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. + diff --git a/flaky-tests/get-started/ci-providers/circleci.mdx b/flaky-tests/get-started/ci-providers/circleci.mdx new file mode 100644 index 0000000..3c461cf --- /dev/null +++ b/flaky-tests/get-started/ci-providers/circleci.mdx @@ -0,0 +1,152 @@ +--- +title: "CircleCI" +description: "Configure CircleCI jobs to upload test results to Trunk Flaky Tests" +--- +Trunk Flaky Tests integrates with your CI by adding a step in your CircleCI Pipelines to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). + + +**Not using GitHub for source control?** + +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. + + +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. + +### Checklist + +By the end of this guide, you should achieve the following. + +* [ ] Get your Trunk organization slug and token +* [ ] Set your slug and token as a variable in CI +* [ ] Configure your CI to upload to Trunk +* [ ] Validate your uploads in Trunk + +After completing these checklist items, you'll be integrated with Trunk. + +### Trunk Organization Slug and Token + +Before setting up uploads to Trunk, you must sign in to [app.trunk.io](https://app.trunk.io/login?intent=flaky%20tests) and obtain your Trunk organization slug and token. + +#### Trunk Slug + +You can find your organization slug under **Settings > Organization > Manage > Organization Name > Slug**. You'll save this as a variable in CI in a later step. + +#### Trunk Token + +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. + +### Add the Trunk Token as a Secret + +Store your Trunk slug and API token in your CircleCI project settings under **Environment Variables** as new variables named `TRUNK_ORG_SLUG` and `TRUNK_TOKEN` respectively. + +### Upload to Trunk + +Add an `Upload Test Results` step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your main or [stable branches](/flaky-tests/detection), for example, `main`, `master`, or `develop`. + + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. + +[Learn more about detection](/flaky-tests/detection) + + +#### Example CircleCI workflow + +The following is an example of a workflow step to upload test results after your tests run. Note: you must either run `trunk` from the repo root when uploading test results or pass a `--repo-root` argument. + +To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [Test Frameworks](/flaky-tests/get-started/frameworks) docs. + + + + + +```yaml +jobs: + test-node: + # Install node dependencies and run tests + executor: node/default + steps: + - run: + name: Run Tests + command: ... + + - run: + name: Upload Test Results to Trunk.io + command: | + curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + ./trunk flakytests upload --junit-paths "**/junit.xml" --org-url-slug --token ${TRUNK_TOKEN} +``` + + + + + +```yaml +jobs: + test-node: + # Install node dependencies and run tests + executor: node/default + steps: + - run: + name: Run Tests + command: ... + + - run: + name: Upload Test Results to Trunk.io + command: | + curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + ./trunk flakytests upload --bazel-bep-path --org-url-slug --token ${TRUNK_TOKEN} +``` + + + + + +```yaml +jobs: + test-node: + # Install node dependencies and run tests + executor: node/default + steps: + - run: + name: Run Tests + command: ... + + - run: + name: Upload Test Results to Trunk.io + command: | + curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + ./trunk flakytests upload --xcresult-path --org-url-slug --token ${TRUNK_TOKEN} +``` + + + + + +```yaml +jobs: + test-node: + # Install node dependencies and run tests + executor: node/default + steps: + - run: + name: Run Tests and Upload Results to Trunk.io + command: TRUNK_ORG_URL_SLUG=${TRUNK_ORG_SLUG} TRUNK_API_TOKEN=${TRUNK_TOKEN} bundle exec rspec +``` + + + + + +See the [Uploader CLI Reference](/flaky-tests/uploader) for all available command line arguments and usage. + +#### Stale files + +Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. + + +**Have questions?** + +Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. + diff --git a/flaky-tests/get-started/ci-providers/droneci.mdx b/flaky-tests/get-started/ci-providers/droneci.mdx new file mode 100644 index 0000000..a4f26d2 --- /dev/null +++ b/flaky-tests/get-started/ci-providers/droneci.mdx @@ -0,0 +1,166 @@ +--- +title: "Drone CI" +description: "Configure Flaky Tests using Drone CI" +--- +Trunk Flaky Tests integrates with your CI by adding a step in your Drone CI Pipelines to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). + + +**Not using GitHub for source control?** + +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. + + +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. + +### Checklist + +By the end of this guide, you should achieve the following. + +* [ ] Get your Trunk organization slug and token +* [ ] Set your slug and token as a variable in CI +* [ ] Configure your CI to upload to Trunk +* [ ] Validate your uploads in Trunk + +After completing these checklist items, you'll be integrated with Trunk. + +### Trunk Organization Slug and Token + +Before setting up uploads to Trunk, you must sign in to [app.trunk.io](https://app.trunk.io/login?intent=flaky%20tests) and obtain your Trunk organization slug and token. + +#### Trunk Slug + +You can find your organization slug under **Settings > Organization > Manage > Organization Name > Slug**. You'll save this as a variable in CI in a later step. + +#### Trunk Token + +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. + +### Add the Trunk Token as a Secret + +Store your Trunk slug and API token in your Drone CI project settings as new variables named `TRUNK_ORG_SLUG` and `TRUNK_TOKEN` respectively. + +### Upload to Trunk + +Add an upload step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your [stable branches](/flaky-tests/detection), for example, `main`, `master`, or `develop`. + + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. + +[Learn more about detection](/flaky-tests/detection) + + +#### Add Uploader to Testing Pipelines + +The following is an example of a workflow step to upload test results after your tests run. Note: you must either run `trunk` from the repo root when uploading test results or pass a `--repo-root` argument. + +To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [Test Frameworks](/flaky-tests/get-started/frameworks) docs. + + + + + +```yaml +kind: pipeline +type: docker +name: test + +steps: + - name: Run Tests + commands: ... + + - name: Upload Test Results to Trunk.io + environment: + TRUNK_ORG_SLUG: + from_secret: TRUNK_ORG_SLUG + TRUNK_API_TOKEN: + from_secret: TRUNK_API_TOKEN + commands: + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --junit-paths --org-url-slug --token $TRUNK_TOKEN +``` + + + + + +```yaml +kind: pipeline +type: docker +name: test + +steps: + - name: Run Tests + commands: ... + + - name: Upload Test Results to Trunk.io + environment: + TRUNK_ORG_SLUG: + from_secret: TRUNK_ORG_SLUG + TRUNK_API_TOKEN: + from_secret: TRUNK_API_TOKEN + commands: + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --bazel-bep-path --org-url-slug --token $TRUNK_TOKEN +``` + + + + + +```yaml +kind: pipeline +type: docker +name: test + +steps: + - name: Run Tests + commands: ... + + - name: Upload Test Results to Trunk.io + environment: + TRUNK_ORG_SLUG: + from_secret: TRUNK_ORG_SLUG + TRUNK_API_TOKEN: + from_secret: TRUNK_API_TOKEN + commands: + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --xcresult-path --org-url-slug --token $TRUNK_TOKEN +``` + + + + + +```yaml +kind: pipeline +type: docker +name: test + +steps: + - name: Run Tests and Upload Results to Trunk.io + environment: + TRUNK_ORG_SLUG: + from_secret: TRUNK_ORG_SLUG + TRUNK_TOKEN: + from_secret: TRUNK_TOKEN + commands: + - TRUNK_ORG_URL_SLUG=$TRUNK_ORG_SLUG TRUNK_API_TOKEN=$TRUNK_TOKEN bundle exec rspec +``` + + + + + +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. + +#### Stale files + +Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. + + +**Have questions?** + +Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. + diff --git a/flaky-tests/get-started/ci-providers/github-actions.mdx b/flaky-tests/get-started/ci-providers/github-actions.mdx new file mode 100644 index 0000000..3da8e59 --- /dev/null +++ b/flaky-tests/get-started/ci-providers/github-actions.mdx @@ -0,0 +1,371 @@ +--- +title: "GitHub Actions" +description: "Configure Flaky Tests detection using a GitHub Action" +--- +Before you start these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing Trunk-compatible reports for your test runner. + +Trunk Flaky Tests integrates with your CI by adding a step in your GitHub Action workflow to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). + +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. + +### Checklist + +By the end of this guide, you should achieve the following. + +* [ ] Get your Trunk organization slug and token +* [ ] Set your slug and token as secrets in GitHub Actions +* [ ] Configure GitHub Actions to upload to Trunk +* [ ] Validate your uploads in Trunk + +After completing these checklist items, you'll be integrated with Trunk. + +### Trunk Organization Slug and Token + +Before setting up uploads to Trunk, you must sign in to [app.trunk.io](https://app.trunk.io/login?intent=flaky%20tests) and obtain your Trunk organization slug and token. + +#### Trunk Slug + +You can find your organization slug under **Settings > Organization > General > Organization > Name**. You'll save this as a variable in CI in a later step. + +#### Trunk Token + +You can find your token under **Settings > Organization > General > API > API Key**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. + +### Add Your Trunk Token and Organization Slug as Secrets + +Store the Trunk slug and API token obtained in the previous step in your repo as [GitHub secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions) named `TRUNK_ORG_URL_SLUG` and `TRUNK_API_TOKEN` respectively. + +### Upload to Trunk + +Add an `Upload Test Results` step after running tests in each of your CI jobs that run tests. This should minimally include all jobs that run on pull requests, as well as jobs that run on your main or [stable branches](/flaky-tests/detection), for example, `main`, `master`, or `develop`. + + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. + +[Learn more about detection](/flaky-tests/detection) + + +#### Example GitHub Actions Workflow + +The following is an example of a GitHub Actions workflow step to upload test results after your tests using Trunk's [**Analytics Uploader Action**](https://github.com/trunk-io/analytics-uploader). + +To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [**Test Frameworks**](/flaky-tests/get-started/frameworks) docs. + + + + + +```yaml +jobs: + test: + name: Upload Tests + runs-on: ubuntu-latest + + steps: + - name: Run Tests + run: ... + + - name: Upload Test Results to Trunk.io + if: ${{ !cancelled() }} # Upload the results even if the tests fail + continue-on-error: true # don't fail this job if the upload fails + uses: trunk-io/analytics-uploader@v1 + with: + junit-paths: **/junit.xml + org-slug: + token: ${{ secrets.TRUNK_TOKEN }} +``` + + + + + +```yaml +jobs: + test: + name: Upload Tests + runs-on: ubuntu-latest + + steps: + - name: Run Tests + run: ... + + - name: Upload Test Results to Trunk.io + if: ${{ !cancelled() }} # Upload the results even if the tests fail + continue-on-error: true # don't fail this job if the upload fails + uses: trunk-io/analytics-uploader@v1 + with: + xcresult-path: ./test-results.xcresult + org-slug: + token: ${{ secrets.TRUNK_TOKEN }} +``` + + + + + +```yaml +jobs: + test: + name: Upload Tests + runs-on: ubuntu-latest + + steps: + - name: Run Tests + run: ... + + - name: Upload Test Results to Trunk.io + if: ${{ !cancelled() }} # Upload the results even if the tests fail + continue-on-error: true # don't fail this job if the upload fails + uses: trunk-io/analytics-uploader@v1 + with: + bazel-bep-path: ./build_events.json + org-slug: + token: ${{ secrets.TRUNK_TOKEN }} +``` + + + + + +```yaml +jobs: + test: + name: Run and Upload Tests + runs-on: ubuntu-latest + + steps: + - name: Run Tests and Upload Results to Trunk.io + run: TRUNK_ORG_URL_SLUG=${{ secrets.TRUNK_ORG_SLUG }} TRUNK_API_TOKEN=${{ secrets.TRUNK_TOKEN }} bundle exec rspec + +``` + + + + + +See the [GitHub Actions Reference page](https://github.com/trunk-io/analytics-uploader) for all available CLI arguments and usage. + +#### Enable quarantining + +You can quarantine flaky tests by wrapping the test command or as a follow-up step. + + + + + + +Using the Trunk Analytics Uploader Action in your GitHub Actions Workflow files, may need modifications to your workflow files to support quarantining. + +If you upload your test results as a second step after you run your tests, **you need to add** `continue-on-error: true` **on your test step so your CI** job will continue even on failures. + + +Here's an example file. + + +```yaml +name: Run Tests And Upload Results +on: + workflow_dispatch: +jobs: + upload-test-results: + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - name: Run Tests + id: unit_tests + shell: bash + run: # command to run tests goes here + continue-on-error: true # ensure CI job continues to upload step on errors + + - name: Upload test results + if: always() + uses: trunk-io/analytics-uploader@v1 + with: + junit-paths: + org-slug: my-trunk-org-slug + token: ${{ secrets.TRUNK_API_TOKEN }} +``` + + +If you want to run the test command and upload in a single step, the test command must be **run via the Analytics Uploader** through the `run: ` parameter. + +This will override the response code of the test command. Make sure to set `continue-on-error: false` so un-quarantined tests are blocking. + + +```yaml +name: Run Tests And Upload Results +on: + workflow_dispatch: +jobs: + upload-test-results: + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Run tests and upload results + uses: trunk-io/analytics-uploader@v1 + with: + junit-paths: + run: # command to run tests goes here + org-slug: my-trunk-org-slug + token: ${{ secrets.TRUNK_API_TOKEN }} +``` + + + + + + +**Using Flaky Tests as a separate step** + + +If you upload your test results as a second step after you run your tests, you need to ensure your test step **continues on errors** so the upload step that's run after can quarantine failed tests. + +When quarantining is enabled, the `flakytests upload` command will **return an error** if there are unquarantined failures and return a status code 0 if all tests are quarantined. + + + +```bash + || true # doesn't fail job on failure +| + ./trunk flakytests upload \ + --org-url-slug $TRUNK_ORG_SLUG \ + --token $TRUNK_API_TOKEN \ + --junit-paths $JUNIT_PATH +``` + + +**Using Flaky Tests as a single step** + +You can also wrap the test command with the Trunk CLI. When wrapping the command with the Trunk CLI, if there are unquarantined tests, the command will return an error. If there are no unquarantined tests, the command will return a status code `0`. + +```bash +./trunk flakytests test \ + --org-url-slug \ + --token $TRUNK_API_TOKEN \ + --junit-paths $JUNIT_PATH \ + --allow-empty-test-results \ + +``` + + + + + +#### Stale files + +Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. + +### Getting Direct Links to Job Logs + + +**Direct Links to Job Logs is an optional configuration, and relies on a** [**third-party actions dependency**](https://github.com/marketplace/actions/get-action-job-id)**.** + + +By default, Trunk Flaky Tests links to your overall workflow run when you click "Logs" on a test failure. However, GitHub Actions makes it difficult to get a direct link to the specific job where the test ran. + +If you want **direct links to individual job logs** instead of the workflow run, you can manually set the `JOB_URL` environment variable using a third-party action to extract the job ID. + +#### Setup + +1. **Add the job ID extraction step** to your workflow using a community action: + + +```yaml +jobs: + run_tests: + runs-on: ubuntu-latest + name: Run Tests # This name is important - use it in the next step + steps: + - name: Checkout + uses: actions/checkout@v3 + + # Extract the job ID + - name: Get Job ID + id: get-job-id + uses: ayachensiyuan/get-action-job-id@v1.6 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + job-name: Run Tests # Must match the job 'name' above +``` + + +2. **Pass the job URL** when uploading test results: + + +```yaml + - name: Run Tests + id: unit_tests + run: + continue-on-error: true + + - name: Upload test results + if: always() + uses: trunk-io/analytics-uploader@v1 + with: + junit-paths: + org-slug: my-trunk-org-slug + token: ${{ secrets.TRUNK_API_TOKEN }} + env: + JOB_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}/job/${{ steps.get-job-id.outputs.jobId }} +``` + + +#### Complete Example + +Here's a full workflow example with direct job linking: + +```yaml +name: Run Tests And Upload Results +on: + push: + pull_request: + +jobs: + test-suite: + runs-on: ubuntu-latest + name: Test Suite + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Get Job ID + id: get-job-id + uses: ayachensiyuan/get-action-job-id@v1.6 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + job-name: Test Suite + + - name: Run Tests + run: npm test + continue-on-error: true + + - name: Upload test results + if: always() + uses: trunk-io/analytics-uploader@v1 + with: + junit-paths: junit.xml + org-slug: my-trunk-org-slug + token: ${{ secrets.TRUNK_API_TOKEN }} + env: + JOB_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}/job/${{ steps.get-job-id.outputs.jobId }} +``` + +#### How It Works + +* The `ayachensiyuan/get-action-job-id` [action](https://github.com/marketplace/actions/get-action-job-id) extracts the GitHub Actions job ID +* We construct the full job URL using: `https://github.com/{repo}/actions/runs/{run_id}/job/{job_id}` +* This URL is passed to Trunk via the `JOB_URL` environment variable +* When you click "Logs" on a test failure in Trunk, you'll go directly to that job's logs instead of the workflow overview + +#### Notes + +* The `job-name` parameter must **exactly match** your job's `name` field +* The `GITHUB_TOKEN` must have appropriate permissions to read workflow job information +* If the job ID extraction fails, Trunk will fall back to linking to the workflow run diff --git a/flaky-tests/get-started/ci-providers/gitlab.mdx b/flaky-tests/get-started/ci-providers/gitlab.mdx new file mode 100644 index 0000000..ef99dff --- /dev/null +++ b/flaky-tests/get-started/ci-providers/gitlab.mdx @@ -0,0 +1,158 @@ +--- +title: "GitLab" +description: "Configure Flaky Tests using GitLab CI" +--- +Trunk Flaky Tests integrates with your CI by adding a step in your GitLab CI/CD pipelines to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). + + +**Not using GitHub for source control?** + +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. + + +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. + +### Checklist + +By the end of this guide, you should achieve the following. + +* [ ] Get your Trunk organization slug and token +* [ ] Set your slug and token as a variable in CI +* [ ] Configure your CI to upload to Trunk +* [ ] Validate your uploads in Trunk + +After completing these checklist items, you'll be integrated with Trunk. + +### Trunk Organization Slug and Token + +Before setting up uploads to Trunk, you must sign in to [app.trunk.io](https://app.trunk.io/login?intent=flaky%20tests) and obtain your Trunk organization slug and token. + +#### Trunk Slug + +You can find your organization slug under **Settings > Organization > Manage > Organization Name > Slug**. You'll save this as a variable in CI in a later step. + +#### Trunk Token + +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. + +### Add the Trunk Token as a Secret + +Store the Trunk slug and API token obtained in the previous step in your GitLab CI/CD pipelines as new [GitLab Variables](https://docs.gitlab.com/ee/ci/variables/index.html#for-a-project) named `TRUNK_ORG_SLUG` and `TRUNK_TOKEN` respectively. + +### Upload to Trunk + +Add an `upload_test_results` step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your main or [stable branches](/flaky-tests/detection), for example, `main`, `master`, or `develop`. + + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. + +[Learn more about detection](/flaky-tests/detection) + + +#### Example GitLab Pipeline + +The following is an example of a GitLab pipeline step to upload test results after your tests run. Note: you must either run `trunk` from the repo root when uploading test results or pass a `--repo-root` argument. + +To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [frameworks](/flaky-tests/get-started/frameworks "mention") docs. + +To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [Test Frameworks](/flaky-tests/get-started/frameworks) docs. + + + + + +```yaml +image: node:latest + +stages: # List of stages for jobs, and their order of execution + - test + - flaky-tests + +unit_test_job: # This job runs the tests + stage: test + script: ... + +upload_test_results: # This job uploads tests results run in the last stage to Trunk.io + stage: flaky-tests + script: + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --junit-paths "" --org-url-slug --token $TRUNK_TOKEN +``` + + + + + +```yaml +image: node:latest + +stages: # List of stages for jobs, and their order of execution + - test + - flaky-tests + +unit_test_job: # This job runs the tests + stage: test + script: ... + +upload_test_results: # This job uploads tests results run in the last stage to Trunk.io + stage: flaky-tests + script: + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --bazel-bep-path --org-url-slug --token $TRUNK_TOKEN +``` + + + + + +```yaml +image: node:latest + +stages: # List of stages for jobs, and their order of execution + - test + - flaky-tests + +unit_test_job: # This job runs the tests + stage: test + script: ... + +upload_test_results: # This job uploads tests results run in the last stage to Trunk.io + stage: flaky-tests + script: + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --xcresult-path --org-url-slug --token $TRUNK_TOKEN +``` + + + + + +```yaml +image: node:latest + +stages: # List of stages for jobs, and their order of execution + - test + +unit_test_job: # This job runs the tests and uploads the results to Trunk.io + stage: test + script: + - TRUNK_ORG_URL_SLUG=$TRUNK_ORG_SLUG TRUNK_API_TOKEN=$TRUNK_TOKEN bundle exec rspec +``` + + + + + +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. + +#### Stale files + +Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. + + +**Have questions?** + +Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. + diff --git a/flaky-tests/get-started/ci-providers/jenkins.mdx b/flaky-tests/get-started/ci-providers/jenkins.mdx new file mode 100644 index 0000000..7b7f311 --- /dev/null +++ b/flaky-tests/get-started/ci-providers/jenkins.mdx @@ -0,0 +1,155 @@ +--- +title: "Jenkins" +description: "Configure Flaky Tests using Jenkins" +--- +Trunk Flaky Tests integrates with your CI by adding a step in your Jenkins Pipelines to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). + + +**Not using GitHub for source control?** + +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. + + +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. + +### Checklist + +By the end of this guide, you should achieve the following. + +* [ ] Get your Trunk organization slug and token +* [ ] Set your slug and token as a variable in CI +* [ ] Configure your CI to upload to Trunk +* [ ] Validate your uploads in Trunk + +After completing these checklist items, you'll be integrated with Trunk. + +### Trunk Organization Slug and Token + +Before setting up uploads to Trunk, you must sign in to [app.trunk.io](https://app.trunk.io/login?intent=flaky%20tests) and obtain your Trunk organization slug and token. + +#### Trunk Slug + +You can find your organization slug under **Settings > Organization > Manage > Organization Name > Slug**. You'll save this as a variable in CI in a later step. + +#### Trunk Token + +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. + +### Add the Trunk Token as a Secret + +Store the Trunk slug and API token obtained in the previous step in your Jenkins as new credentials named `TRUNK_ORG_SLUG` and `TRUNK_TOKEN` respectively. + +### Upload to Trunk + +Add an `Upload Test Results` step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your main or [stable branches](/flaky-tests/detection), for example, `main`, `master`, or `develop`. + + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. + +[Learn more about detection](/flaky-tests/detection) + + +#### Example Jenkins Pipeline + +The following is an example of a Jenkins pipeline step to upload test results after your tests run. Note: you must either run `trunk` from the repo root when uploading test results or pass a `--repo-root` argument. + +To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [Test Frameworks](/flaky-tests/get-started/frameworks) docs + + + + + +```groovy +pipeline { + environment { + TRUNK_TOKEN = credentials('TRUNK_TOKEN') + } + stages { + stage('Run Tests'){ + ... + } + stage('Upload Test Results'){ + sh 'curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk' + sh './trunk flakytests upload --junit-paths "" --org-url-slug --token $TRUNK_TOKEN' + } + } +} +``` + + + + + +```yaml +pipeline { + environment { + TRUNK_TOKEN = credentials('TRUNK_TOKEN') + } + stages { + stage('Run Tests'){ + ... + } + stage('Upload Test Results'){ + sh 'curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk' + sh './trunk flakytests upload --bazel-bep-path --org-url-slug --token $TRUNK_TOKEN' + } + } +} +``` + + + + + +```yaml +pipeline { + environment { + TRUNK_TOKEN = credentials('TRUNK_TOKEN') + } + stages { + stage('Run Tests'){ + ... + } + stage('Upload Test Results'){ + sh 'curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk' + sh './trunk flakytests upload --xcresult-path --org-url-slug --token $TRUNK_TOKEN' + } + } +} +``` + + + + + +```groovy +pipeline { + environment { + TRUNK_ORG_SLUG = credentials('TRUNK_ORG_SLUG') + TRUNK_TOKEN = credentials('TRUNK_TOKEN') + } + stages { + stage('Run Tests and Upload Results to Trunk.io'){ + sh 'TRUNK_ORG_URL_SLUG=$TRUNK_ORG_SLUG TRUNK_API_TOKEN=$TRUNK_TOKEN bundle exec rspec' + } + } +} +``` + + + + + +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. + +#### Stale files + +Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. + + +**Have questions?** + +Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. + diff --git a/flaky-tests/get-started/ci-providers/otherci.mdx b/flaky-tests/get-started/ci-providers/otherci.mdx new file mode 100644 index 0000000..8613989 --- /dev/null +++ b/flaky-tests/get-started/ci-providers/otherci.mdx @@ -0,0 +1,170 @@ +--- +title: "Other CI Providers" +description: "Configure Flaky Tests using any CI Provider" +--- +Trunk Flaky Tests integrates with your CI provider by adding an upload step in each of your testing CI jobs via the [Trunk Uploader CLI](/flaky-tests/uploader). + + +**Not using GitHub for source control?** + +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. + + +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing JUnit XML output for your test runner, supported by virtually all test frameworks, which is what Trunk ingests. + +### Checklist + +By the end of this guide, you should achieve the following. + +* [ ] Get your Trunk organization slug and token +* [ ] Set your slug and token as a variable in CI +* [ ] Configure your CI to upload to Trunk +* [ ] Validate your uploads in Trunk + +After completing these checklist items, you'll be integrated with Trunk. + +### Trunk Organization Slug and Token + +Before setting up uploads to Trunk, you must sign in to [app.trunk.io](https://app.trunk.io/login?intent=flaky%20tests) and obtain your Trunk organization slug and token. + +#### Trunk Slug + +You can find your organization slug under **Settings > Organization > Manage > Organization Name > Slug**. You'll save this as a variable in CI in a later step. + +#### Trunk Token + +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. + +### Add the Trunk Token as a Secret + +Store the Trunk slug and API token obtained in the previous step in your CI provider as a secret, environment variable, or an equivalent concept and name them `TRUNK_ORG_SLUG` and `TRUNK_TOKEN` respectively. + +### Upload to Trunk + +Add an `Upload Test Results` step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your main or [stable branches](/flaky-tests/detection), for example,`main`, `master`, or `develop`. + + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. + +[Learn more about detection](/flaky-tests/detection) + + +#### Example Upload Script + +The following is an example of a script to upload test results after your tests run. Note: you must either run `trunk` from the repo root when uploading test results or pass a `--repo-root` argument. + +To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [frameworks](/flaky-tests/get-started/frameworks "mention") docs. + +You can install the Trunk CLI locally like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +``` + + + + + +Then, you can validate the results using the `trunk flakytests validate` command like this: + +```bash +./trunk-analytics-cli validate --junit-paths +``` + +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. + +#### Environment Variables + +Set these environment variables before running `trunk flaky-tests upload` on unsupported CI systems: + + +**Config Requirement:** `CUSTOM` must be set to `true` for environment varaibles to take effect and override the auto-detection of CI. + +All other variables are optional but recommended. + + +| Variable | Description | Example | +| ---------------- | ---------------------------------------------------------------------------------------------------------------- | ------------------------------------- | +| `CUSTOM` | Set to `true` to indicate this CI system is not one of our supported providers | `CUSTOM=true` | +| `JOB_URL` | Direct link to the CI job/build page. This is the link users will click when viewing test failure logs in Trunk. | `https://ci.example.com/builds/12345` | +| `JOB_NAME` | Name of the CI job or test suite | `unit-tests` | +| `AUTHOR_EMAIL` | Email address of the commit author | `dev@example.com` | +| `AUTHOR_NAME` | Full name of the commit author | `Jane Developer` | +| `COMMIT_BRANCH` | Git branch being tested | `main` | +| `COMMIT_MESSAGE` | Commit message for the tested commit | `Fix authentication bug` | +| `PR_NUMBER` | Pull request number (if applicable) | `123` | +| `PR_TITLE` | Pull request title (if applicable) | `Add new feature` | + +#### About JOB\_URL + +The `JOB_URL` variable controls where the "Logs" link in Trunk Flaky Tests points to. When users click "Logs" on a test failure, they'll be taken to this URL to view the complete CI job output. + +**Best practice:** Provide the most specific link possible: + +* ✅ Direct link to the specific job/build where the test ran +* ✅ Link that shows the full logs and test output +* ❌ Link to a dashboard or workflow overview (less helpful for debugging) + + +**For GitHub Actions users:** While GitHub Actions is auto-detected, you can override the default workflow URL with a direct job URL. See [GitHub Actions - Getting Direct Links to Job Logs](/flaky-tests/get-started/ci-providers/github-actions) for instructions. + + +#### Stale files + +Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. + + +**Have questions?** + +Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. + diff --git a/flaky-tests/get-started/ci-providers/semaphoreci.mdx b/flaky-tests/get-started/ci-providers/semaphoreci.mdx new file mode 100644 index 0000000..6d166b8 --- /dev/null +++ b/flaky-tests/get-started/ci-providers/semaphoreci.mdx @@ -0,0 +1,206 @@ +--- +title: "Semaphore CI" +description: "Configure Flaky Tests using Semaphore CI" +--- +Trunk Flaky Tests integrates with your CI by adding a step in your Semaphore CI Pipeline to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). + + +**Not using GitHub for source control?** + +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. + + +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. + +### Checklist + +By the end of this guide, you should achieve the following. + +* [ ] Get your Trunk organization slug and token +* [ ] Set your slug and token as a variable in CI +* [ ] Configure your CI to upload to Trunk +* [ ] Validate your uploads in Trunk + +After completing these checklist items, you'll be integrated with Trunk. + +### Trunk Organization Slug and Token + +Before setting up uploads to Trunk, you must sign in to [app.trunk.io](https://app.trunk.io/login?intent=flaky%20tests) and obtain your Trunk organization slug and token. + +#### Trunk Slug + +You can find your organization slug under **Settings > Organization > Manage > Organization Name > Slug**. You'll save this as a variable in CI in a later step. + +#### Trunk Token + +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. + +### Add the Trunk Token as a Secret + +Store the Trunk slug and API token obtained in the previous step in your Semaphore CI Pipelines as new secrets named `TRUNK_ORG_SLUG` and `TRUNK_TOKEN` respectively. + +### Upload to Trunk + +Add an upload step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your main or [stable branches](/flaky-tests/detection), for example, `main`, `master`, or `develop`. + + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. + +[Learn more about detection](/flaky-tests/detection) + + +#### Example Semaphore CI Workflow + +The following is an example of a Semaphore CI workflow step to upload test results after your tests run. Note: you must either run `trunk` from the repo root when uploading test results or pass a `--repo-root` argument. + +To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [Test Frameworks](/flaky-tests/get-started/frameworks) docs. + + + + + +```yaml +version: v1.0 +name: Semaphore JavaScript Example Pipeline +blocks: + - name: Tests + task: + secrets: + - name: TRUNK_TOKEN + env_vars: + - name: NODE_ENV + value: test + - name: CI + value: "true" + prologue: + commands: + - checkout + - nvm use + - node --version + - npm --version + jobs: + - name: Run Tests + commands: ... + epilogue: + always: + commands: + # Upload results to trunk.io + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x trunk + - ./trunk flakytests upload --junit-paths "" --org-url-slug --token ${TRUNK_TOKEN} +``` + + + + + +```yaml +version: v1.0 +name: Semaphore JavaScript Example Pipeline +blocks: + - name: Tests + task: + secrets: + - name: TRUNK_TOKEN + env_vars: + - name: NODE_ENV + value: test + - name: CI + value: "true" + prologue: + commands: + - checkout + - nvm use + - node --version + - npm --version + jobs: + - name: Run Tests + commands: ... + epilogue: + always: + commands: + # Upload results to trunk.io + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x trunk + - ./trunk flakytests upload --bazel-bep-path --org-url-slug --token ${TRUNK_TOKEN} +``` + + + + + +```yaml +version: v1.0 +name: Semaphore JavaScript Example Pipeline +blocks: + - name: Tests + task: + secrets: + - name: TRUNK_TOKEN + env_vars: + - name: NODE_ENV + value: test + - name: CI + value: "true" + prologue: + commands: + - checkout + - nvm use + - node --version + - npm --version + jobs: + - name: Run Tests + commands: ... + epilogue: + always: + commands: + # Upload results to trunk.io + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x trunk + - ./trunk flakytests upload --xcresult-path --org-url-slug --token ${TRUNK_TOKEN} +``` + + + + + +``` +version: v1.0 +name: Semaphore JavaScript Example Pipeline +blocks: + - name: Tests + task: + secrets: + - name: TRUNK_TOKEN + - name: TRUNK_ORG_SLUG + env_vars: + - name: NODE_ENV + value: test + - name: CI + value: "true" + prologue: + commands: + - checkout + - nvm use + - node --version + - npm --version + jobs: + - name: Run Tests + commands: + - TRUNK_ORG_URL_SLUG=${TRUNK_ORG_SLUG} TRUNK_API_TOKEN=${TRUNK_TOKEN} bundle exec rspec +``` + + + + + +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. + +#### Stale files + +Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. + + +**Have questions?** + +Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. + diff --git a/flaky-tests/get-started/ci-providers/travisci.mdx b/flaky-tests/get-started/ci-providers/travisci.mdx new file mode 100644 index 0000000..a459103 --- /dev/null +++ b/flaky-tests/get-started/ci-providers/travisci.mdx @@ -0,0 +1,133 @@ +--- +title: "Travis CI" +description: "Configure Flaky Tests using Travis CI" +--- +Trunk Flaky Tests integrates with your CI by adding a step in your Travis CI Pipelines to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). + + +**Not using GitHub for source control?** + +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. + + +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. + +### Checklist + +By the end of this guide, you should achieve the following. + +* [ ] Get your Trunk organization slug and token +* [ ] Set your slug and token as a variable in CI +* [ ] Configure your CI to upload to Trunk +* [ ] Validate your uploads in Trunk + +After completing these checklist items, you'll be integrated with Trunk. + +### Trunk Organization Slug and Token + +Before setting up uploads to Trunk, you must sign in to [app.trunk.io](https://app.trunk.io/login?intent=flaky%20tests) and obtain your Trunk organization slug and token. + +#### Trunk Slug + +You can find your organization slug under **Settings > Organization > Manage > Organization Name > Slug**. You'll save this as a variable in CI in a later step. + +#### Trunk Token + +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. + +### Add the Trunk Token as a Secret + +Store the Trunk slug and API token obtained in the previous step as new secrets named `TRUNK_ORG_SLUG` and `TRUNK_TOKEN` respectively. + +### Upload to Trunk + +Add a script step after running tests in each of your CI jobs that run tests. This should be run on pull requests, as well as from jobs that run on your main or [stable branches](/flaky-tests/detection), for example, `main`, `master`, or `develop`. + + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. + +[Learn more about detection](/flaky-tests/detection) + + +#### Example Travis CI Workflow + +The following is an example of a Travis CI workflow step to upload test results after your tests run. Note: you must either run `trunk` from the repo root when uploading test results or pass a `--repo-root` argument. + +To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [Test Frameworks](/flaky-tests/get-started/frameworks) docs. + + +Note that TravisCI requires a recent version of Linux to use the current NodeJS runtimes. You may need to set the `dist` to `jammy` or later. See this [forum note](https://travis-ci.community/t/node-lib-x86-64-linux-gnu-libm-so-6-version-glibc-2-27-not-found-required-by-node/13655/2) for more details. + + + + + + +```yaml +language: node_js +dist: jammy +node_js: + - 20 +script: + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --junit-paths "" --org-url-slug --token $TRUNK_TOKEN +``` + + + + + +```yaml +language: node_js +dist: jammy +node_js: + - 20 +script: + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --bazel-bep-path --org-url-slug --token $TRUNK_TOKEN +``` + + + + + +```yaml +language: node_js +dist: jammy +node_js: + - 20 +script: + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --xcresult-path --org-url-slug --token $TRUNK_TOKEN +``` + + + + + +```yaml +language: node_js +dist: jammy +node_js: + - 20 +script: + - TRUNK_ORG_URL_SLUG=$TRUNK_ORG_SLUG TRUNK_API_TOKEN=$TRUNK_TOKEN bundle exec rspec +``` + + + + + +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. + +#### Stale files + +Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. + + +**Have questions?** + +Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. + diff --git a/flaky-tests/get-started/frameworks.mdx b/flaky-tests/get-started/frameworks.mdx new file mode 100644 index 0000000..d4e25d2 --- /dev/null +++ b/flaky-tests/get-started/frameworks.mdx @@ -0,0 +1,9 @@ +--- +title: "Test frameworks" +description: "Guides for generating Trunk-compatible test results from various test frameworks" +--- +Trunk Flaky Tests uses test results uploaded from your CI jobs to detect flaky tests. + +Follow one of the guides below to configure your test framework to output compatible test reports and integrate with Trunk. + +
Cover image
Androidandroidandroid.png
Bazelbazelbazel.png
Behavebehaveunittest.png
cargo-nextestrustcargo-next.png
Cypresscypresscypress.png
Dart Testdart-testdart.png
Gogotestsumgotestsum.png
GoogleTestgoogletestgoogletest.png
Gradlegradlegradle.png
Jasminejasminejasmine.png
Jestjestjest.png
Karmakarmakarma.png
Kotestkotestkotest.png
Mavenmavenmaven.png
minitestminitestminitest.png
Mochamochamocha.png
Nightwatchnightwatchnightwatch.png
NUnitnunitnunit.png
PESTpest.png
PHPUnitphpunitphpunit.png
Playwrightplaywrightplaywright.png
pytestpytestpytest.png
Robot Frameworkrobot-frameworkrobot.png
RSpecrspecrspec.png
Swift Testingswift-testingswift-testing.png
Testplantestplantestplan-box.png
Vitestvitestvitest.png
XCTestxctestxctest.png
diff --git a/flaky-tests/get-started/frameworks/android.mdx b/flaky-tests/get-started/frameworks/android.mdx new file mode 100644 index 0000000..e4c0c78 --- /dev/null +++ b/flaky-tests/get-started/frameworks/android.mdx @@ -0,0 +1,162 @@ +--- +title: "Android" +description: "A guide for generating Trunk-compatible test reports for Android projects" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Android projects by integrating with Trunk. This document explains how to configure Android to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating reports + +Android tests run with Gradle, typically using `./gradlew test` in CI. This will generate JUnit XML output by default, which you can further configure in your `build.gradle.kts` or `build.gradle`. + +#### Report file path + +By default, Android projects will produce a directory with JUnit XML reports under `./app/build/test-results`. + +You can customize the report output location in your `build.gradle.kts` or `build.gradle`, for example, writing the reports to `./app/junit-reports`. + + + + + +```groovy +android { + testOptions { + unitTests { + all { + reports { + junitXml { + outputLocation = file("./junit-reports") + } + } + } + } + } +} +``` + + + + + +```kotlin +android { + testOptions { + unitTests { + all { + reports { + junitXml.outputLocation.set(file("./junit-reports")) + } + } + } + } +} +``` + + + + + +When you configure your CI provider to upload reports in later steps, you will be uploading the reports using a glob such as `./junit-reports/*.xml`. + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. + +If you've enabled retries using a plugin like the [test-retry-gradle-plugin](https://github.com/gradle/test-retry-gradle-plugin), disable it when running tests for Trunk flaky tests. + +### Try It Locally + +#### The Validate Command + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./apps/junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./apps/junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./apps/junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./apps/junit-reports/*.xml" +``` + + + + + +**This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./app/junit-reports/*.xml" \ + --org-url-slug \ + --token +``` + +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/bazel.mdx b/flaky-tests/get-started/frameworks/bazel.mdx new file mode 100644 index 0000000..569a6d3 --- /dev/null +++ b/flaky-tests/get-started/frameworks/bazel.mdx @@ -0,0 +1,133 @@ +--- +title: "Bazel" +description: "A guide for generating Trunk-compatible test reports with Bazel" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Bazel projects by integrating with Trunk. This document explains how to configure Bazel to output compatible reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Trunk can parse JSON serialized [Build Event Protocol (BEP) ](https://bazel.build/remote/bep)files to detect flaky tests. You can run tests with Bazel in CI with the `nobuild_event_json_file_path_conversion` option to produce a serialized BEP file: + +```sh +bazel test \ + --nobuild_event_json_file_path_conversion +``` + +#### Report File Path + +You can specify the path of the generated report through the `build_event_json_file` option: + +```sh +bazel test \ + --nobuild_event_json_file_path_conversion + --build_event_json_file=build_events.json +``` + +Trunk can parse the `build_events.json` file to locate your test reports. You will still need to **configure your test runners to output compatible reports**, and you can refer to the guides for [individual test frameworks](/flaky-tests/get-started/frameworks). + +#### Build Without the Bytes + +If your CI environment is set up to [build without the bytes](https://blog.bazel.build/2023/10/06/bwob-in-bazel-7.html), you will need the following flag to pull the reports from the remote execution engine: + +```sh +--remote_download_regex='.*/test.xml' +``` + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them for more accurate detection results. + +Disable retries if you're retrying tests using the `--flaky_test_attempts` command line option or retrying in your test runner. + +### Try It Locally + +#### **The Validate Command** + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --bazel-bep-path=build_events.json +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --bazel-bep-path=build_events.json +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --bazel-bep-path=build_events.json +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --bazel-bep-path=build_events.json +``` + + + + + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --bazel-bep-path=build_events.json \ + --org-url-slug \ + --token +``` + +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/behave.mdx b/flaky-tests/get-started/frameworks/behave.mdx new file mode 100644 index 0000000..2e6c84e --- /dev/null +++ b/flaky-tests/get-started/frameworks/behave.mdx @@ -0,0 +1,130 @@ +--- +title: "Behave" +description: "A guide for generating Trunk-compatible test reports for Behave" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your projects running Behave by integrating with Trunk. This document explains how to configure Behave to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. Behave can output JUnit XML reports which are compatible with Trunk. You can do so with the `--junit` option: + +```sh +behave --junit +``` + +#### Report File Path + +You can customize the file path of the reports using the `--junit-directory` option. + +```sh +behave --junit --junit-directory ./junit-reports +``` + +Behave outputs multiple XML reports under the JUnit directory. You can locate these when uploading the reports in CI with the `"./junit-reports/*.xml"` glob. + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. + +You must remove the [rerun formatter](https://behave.readthedocs.io/en/latest/formatters/#formatters) from your `behave.ini` file if it is being used to automatically rerun failed tests. + +### Try It Locally + +#### The Validate Command + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" +``` + + + + + +**This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./junit-reports/*.xml" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Atlassian Bambooatlassian-bamboo
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/.gitbook/includes/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/cypress.mdx b/flaky-tests/get-started/frameworks/cypress.mdx new file mode 100644 index 0000000..a6e9d4f --- /dev/null +++ b/flaky-tests/get-started/frameworks/cypress.mdx @@ -0,0 +1,136 @@ +--- +title: "Cypress" +description: "A guide for generating Trunk-compatible test reports for Cypress tests" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Cypress projects by integrating with Trunk. This document explains how to configure Cypress to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Cypress has a built-in XML reporter which you can use to output a Trunk-compatible report. + +Update your Cypress config, such as you `cypress.config.js` or `cypress.config.ts` file to output XML reports: + +```javascript +const { defineConfig } = require('cypress') + +module.exports = defineConfig({ + reporter: 'junit', + reporterOptions: { + mochaFile: './junit.xml', + toConsole: true, + }, +}) +``` + +#### Report File Path + +The JUnit report location is specified by the `mochaFile` property in your Cypress config. In the above example, the file will be at `./junit.xml`. + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. + +You can disable retries by setting `retries: 0` in your Cypress config file. + +```javascript +module.exports = defineConfig({ + retries: 0, +}) +``` + +### Try It Locally + +#### **The Validate Command** + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./junit.xml" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +## Next Step + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/dart-test.mdx b/flaky-tests/get-started/frameworks/dart-test.mdx new file mode 100644 index 0000000..3097e73 --- /dev/null +++ b/flaky-tests/get-started/frameworks/dart-test.mdx @@ -0,0 +1,124 @@ +--- +title: "Dart Test" +description: "A guide for generating Trunk-compatible test reports for Dart tests" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Dart projects by integrating with Trunk. This document explains how to configure Dart to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Before you can upload to Trunk, you need to output a Trunk-compatible report. Dart supports JUnit outputs by using the `tojunit` library. You can install the `tojunit` library using the following command: + +```sh +dart pub global activate junitreport +``` + +Then, you can convert test reports to a JUnit format by piping it to `tojunit`and piping the output to a file like this: + + +```sh +dart test --reporter json | tojunit > junit.xml +``` + + +#### Report File Path + +The JUnit report is written to the location specified by the `tojunit >` pipe. In the example above, the test results will be written to `./junit.xml`. + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. + +Dart provides retries through the [retry class annotations](https://pub.dev/documentation/test/latest/test/Retry-class.html). Disable retry, use Trunk to [detect](/flaky-tests/detection)[ flaky tests](/flaky-tests/detection), and use Quarantining to isolate flaky tests dynamically at run time. + +### Try It Locally + +#### **The Validate Command** + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./junit.xml" \ + --org-url-slug \ + --token +``` + +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/googletest.mdx b/flaky-tests/get-started/frameworks/googletest.mdx new file mode 100644 index 0000000..8e111da --- /dev/null +++ b/flaky-tests/get-started/frameworks/googletest.mdx @@ -0,0 +1,128 @@ +--- +title: "GoogleTest" +description: "A guide for generating Trunk-compatible test reports for GoogleTest" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your GoogleTest projects by integrating with Trunk. This document explains how to configure GoogleTest to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Before you can integrate with Trunk, you need to generate a Trunk-compatible report. For GoogleTest, the built in XML reporter will work. You can use the [`--gtest_output=xml`](https://google.github.io/googletest/advanced.html#generating-an-xml-report) argument when you run your built test project: + +```shell +./build/run_test --gtest_output=xml +``` + +#### Report File Path + +By default, the JUnit report will be written to a `test_detail.xml` file. + +You can specify a custom directory and filename with: + +```bash +--gtest_output=xml: +``` + +For example, the following argument writes a JUnit report to `./junit.xml`: + +```bash +--gtest_output=xml:junit.xml +``` + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. + +Omit the[ ](https://docs.pytest.org/en/stable/how-to/cache.html)[`--gtest_repeat`](https://google.github.io/googletest/advanced.html#repeating-the-tests) argument if you've previously configured your CI with these options to disable retries. + +### Try It Locally + +#### **The Validate Command** + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./junit.xml" \ + --org-url-slug \ + --token +``` + +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/gotestsum.mdx b/flaky-tests/get-started/frameworks/gotestsum.mdx new file mode 100644 index 0000000..797eb8f --- /dev/null +++ b/flaky-tests/get-started/frameworks/gotestsum.mdx @@ -0,0 +1,190 @@ +--- +title: "Go" +description: "A guide for generating Trunk-compatible test reports for Go tests" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Go projects by integrating with Trunk. This document explains how to configure Go to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### **Why an Extra Step for `go test`?** + +The standard Go test runner, `go test`, is excellent for executing tests and providing immediate feedback to developers. However, it does not natively produce test reports in the JUnit XML format that Trunk Flaky Tests requires for ingestion and analysis. Therefore, an additional tool is needed to convert the output of `go test` into this compatible format. This intermediate step ensures that Trunk can accurately process your test results and identify flaky tests. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report (JUnit XML). +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating JUnit XML Reports from Go Tests + +Before integrating with Trunk, you need to generate a Trunk-compatible report. For Go, `go test` does not output JUnit XML by default, so you must use a tool to format it. + + + + + +Update your existing `go test` usage to generate json and use [**go-junit-report**](https://github.com/jstemmer/go-junit-report) to convert your standard Go testing output into JUnit XML. + +``` +go install github.com/jstemmer/go-junit-report/v2@latest +``` + +Then pipe `go test` into the `go-junit-report`: + +``` +go test -json 2>&1 | go-junit-report -parser gojson -out junit_report.xml +``` + + + + + +Install gotestsum into your project:\ +\ +`go install gotest.tools/gotestsum@latest`\ +\ +Call `gotestsum` to both execute your tests and generate the junit.xml file + +``` +gotestsum [path-to-tests-to-run] --junitfile ./junit.xml +``` + + + + + +Since `go test` doesn't directly output JUnit XML, you'll use a tool to convert its output. Here are two common options: + +#### Option 1: Using `gotestsum` + +* **What it is:** `gotestsum` is a Go test runner that wraps `go test`. It executes your tests (using `go test -json` for more structured input) and can format the results into JUnit XML, alongside other human-readable formats and test run summaries. +* **Why choose this approach:** You might prefer `gotestsum` if you favor using a single command that serves as a wrapper to both execute your Go tests (by calling `go test` internally) and directly generate the JUnit XML report required for flaky test analysis. +* **Installation:** Download from [releases](https://github.com/gotestyourself/gotestsum/releases) or install via `go install`: + +```bash +go install gotest.tools/gotestsum@latest +``` + +* **Usage:** + +```bash +gotestsum --junitfile ./junit-gotestsum.xml -- ./... +# The '-- ./...' passes arguments directly to 'go test'. +# Adjust './...' to target your specific packages if needed. +``` + +#### Option 2: Using `go-junit-report` + +* **What it is:** `go-junit-report` is a tool that converts the output of a standard `go test` command into JUnit XML. This is achieved by running `go test` and then piping its output to `go-junit-report` as a separate step. +* **Why choose this approach:** You might prefer `go-junit-report` if you want to keep your `go test` command distinct and add a separate, explicit step for converting its output to JUnit XML, often suitable for a minimal setup focused purely on this conversion. +* **Installation:** + +```bash +go install github.com/jstemmer/go-junit-report/v2@latest +``` + +* **Usage:** For reliable report generation, use `go test -json` and pipe its output. The `-parser gojson` flag tells `go-junit-report` to expect this JSON stream: + +```bash +go test -json ./... 2>&1 | go-junit-report -parser gojson -out report-go-junit.xml +# Adjust './...' to target your specific packages. +# 2>&1 ensures stderr (where build errors can appear) is also piped. +``` + +#### Report File Path + +The tools will write a JUnit test report to the file specified (e.g., `junit-gotestsum.xml` or `report-go-junit.xml`). You'll need this path when configuring uploads to Trunk. + +#### Disable Retries + +Regardless of the tool chosen, you need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests.\ +\ +If you're using a package like [**retry**](https://pkg.go.dev/github.com/hashicorp/consul/sdk/testutil/retry), disable it to get more accurate results from Trunk. + +### Try It Locally + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./junit.xml" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/gradle.mdx b/flaky-tests/get-started/frameworks/gradle.mdx new file mode 100644 index 0000000..d24df02 --- /dev/null +++ b/flaky-tests/get-started/frameworks/gradle.mdx @@ -0,0 +1,144 @@ +--- +title: "Gradle" +description: "A guide for generating Trunk-compatible test reports for Gradle" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Gradle projects by integrating with Trunk. This document explains how to configure Gradle to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Tests run with Gradle will generate JUnit XML reports by default and are compatible with Trunk. You can further [configure reporting behavior](https://docs.gradle.org/8.10.2/userguide/java_testing.html#test_reporting) in your `build.gradle.kts` or `build.gradle`. + +#### Report File Path + +By default, Android projects will produce a directory with JUnit XML reports under `./app/build/test-results/test`. You can locate these files with the glob `"./app/build/test-results/test/*.xml"`. + +If you wish to override the default test result path, you can do so in the `build.gradle.kts` or `build.gradle` files: + + + + + +```groovy +java.testResultsDir = layout.buildDirectory.dir("junit-reports") +``` + + + + + +```kotlin +java.testResultsDir = layout.buildDirectory.dir("junit-reports") +``` + + + + + +This example will write report files to `"./app/build/junit-reports/test/*.xml"` + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. + +If you've enabled retries using a plugin like the [test-retry-gradle-plugin](https://github.com/gradle/test-retry-gradle-plugin), disable it when running tests for Trunk flaky tests. + +### Try It Locally + +#### The Validate Command + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./app/build/junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./app/build/junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./app/build/junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./app/build/junit-reports/*.xml" +``` + + + + + +**This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./app/build/junit-reports/test/" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/jasmine.mdx b/flaky-tests/get-started/frameworks/jasmine.mdx new file mode 100644 index 0000000..9214a9e --- /dev/null +++ b/flaky-tests/get-started/frameworks/jasmine.mdx @@ -0,0 +1,154 @@ +--- +title: "Jasmine" +description: "A guide for generating Trunk-compatible test reports for Jasmine tests" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Jasmine projects by integrating with Trunk. This document explains how to configure Jasmine to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Before integrating with Trunk, you need to generate Trunk-compatible reports. For Jasmine, the easiest approach is to generate XML reports. + +First, install the [`jasmine-reporters`](https://www.npmjs.com/package/jasmine-reporters) package: + +```shell +npm install --save-dev jasmine-reporters +``` + +#### In-Browser tests + +When used for in-browser tests, the reporters are registered on a `jasmineReporters` object in the global scope (i.e. `window.jasmineReporters`). You can register it like this in your Jasmine config under `/spec/support/jasmine.mjs`: + +```javascript +import jasmineReporters from 'jasmine-reporters'; + +var junitReporter = new jasmineReporters.JUnitXmlReporter({ + savePath: "test-reports", + consolidateAll: false +}); +jasmine.getEnv().addReporter(junitReporter); +``` + +#### NodeJS + +In Node.js, `jasmine-reporters` exports an object with all the reporters. You can register it like this in your Jasmine config under `/spec/support/jasmine.mjs`: + +```javascript +var reporters = require('jasmine-reporters'); +var junitReporter = new reporters.JUnitXmlReporter({ + savePath: "test-reports", + consolidateAll: false +}); +jasmine.getEnv().addReporter(junitReporter) + +``` + +#### Report File Path + +Jasmine will generate an XML report at the location specified by the `savePath` property. In the examples above, the XML report can be located with the glob `test_reports/*.xml`. + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. + +If you're using a package like [protractor-flake](https://www.npmjs.com/package/protractor-flake), disable it to get more accurate results from Trunk. Instead, you can mitigate flaky tests using the [Quarantining](/flaky-tests/quarantining) feature in Trunk. + +### Try It Locally + +#### The Validate Command + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" +``` + + + + + +**This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli flakytests upload --junit-paths "./junit-reports/*.xml" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/jest.mdx b/flaky-tests/get-started/frameworks/jest.mdx new file mode 100644 index 0000000..255b211 --- /dev/null +++ b/flaky-tests/get-started/frameworks/jest.mdx @@ -0,0 +1,142 @@ +--- +title: "Jest" +description: "A guide for generating Trunk-compatible test reports for Jest tests" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Jest projects by integrating with Trunk. This document explains how to configure Jest to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this by generating XML reports from your test runs. + +To generate a Trunk-compatible XML report, install the `jest-junit` package: + +```bash +npm install --save-dev jest-junit +``` + +Update your Jest config to add `jest-junit` as a reporter: + +```json +{ + "reporters": [ + [ + "jest-junit", + { + "outputDirectory": "./", + "outputName": "junit.xml", + "addFileAttribute": "true", + "reportTestSuiteErrors": "true" + } + ] + ] +} +``` + +#### Report File Path + +The `outputDirectory` and `outputName` options specify the path of the XML report. You'll need this path later when configuring automatic uploads to Trunk. + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. + +If you have retries configured using the [jest.retryTimes method](https://jestjs.io/docs/jest-object#jestretrytimesnumretries-options), disable them for more accurate results. + +### Try It Locally + +#### **The Validate Command** + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./junit.xml" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Atlassian Bambooatlassian-bamboo
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/.gitbook/includes/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
+ +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/karma.mdx b/flaky-tests/get-started/frameworks/karma.mdx new file mode 100644 index 0000000..119e6c9 --- /dev/null +++ b/flaky-tests/get-started/frameworks/karma.mdx @@ -0,0 +1,141 @@ +--- +title: "Karma" +description: "A guide for generating Trunk-compatible test reports for Karma tests" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Karma projects by integrating with Trunk. This document explains how to configure Karma to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this by generating XML reports from your test runs. + +To generate a Trunk-compatible XML report, install the `karma-junit-reporter` package: + +```shell +npm install --save-dev karma-junit-reporter +``` + +Add the `junit` reporter to your karma config file: + +```javascript +module.exports = function(config) { + config.set( + { + reporters: ['junit'], + junitReporter: { + outputDir: 'test-reports', + } + } + ) +} +``` + +#### Report File Path + +The `outputDir` and `outputFile` specify the location of the JUnit test report. In the example above, the JUnit would be at `./test-reports/{$browserName}.xml`. You can locate the reports during uploads with the glob `./test-reports/*.xml`. + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. + +Karma doesn't support retries out of the box, but if you implemented retries, remember to disable them. + +### Try It Locally + +#### The Validate Command + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" +``` + + + + + +**This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./junit-reports/*.xml" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Atlassian Bambooatlassian-bamboo
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/.gitbook/includes/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/kotest.mdx b/flaky-tests/get-started/frameworks/kotest.mdx new file mode 100644 index 0000000..9b5a35c --- /dev/null +++ b/flaky-tests/get-started/frameworks/kotest.mdx @@ -0,0 +1,224 @@ +--- +title: "Kotest" +description: "A guide for generating Trunk-compatible test reports for Kotest" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Kotest projects by integrating with Trunk. This document explains how to configure Kotest to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Steps for generating JUnit XML reports for Kotest depend on the build system you use for your project: + + + + + +Tests run with Gradle will generate Trunk-compatible JUnit XML reports by default. You can further [configure reporting behavior](https://docs.gradle.org/8.10.2/userguide/java_testing.html#test_reporting) in your `build.gradle.kts` or `build.gradle`. + + + + + +Kotest projects using Maven require the following to be added to a project's `pom.xml` so JUnit XML reports can be generated: + +* the `maven-surefire-plugin` must be added to the `plugins` section of `pom.xml` + +```xml + + + + + org.apache.maven.plugins + maven-surefire-plugin + 3.2.2 + + + + + + +``` + +* the `kotest-extensions-junitxml` must be added to the `dependencies` section of `pom.xml` + +```xml + + + io.kotest + kotest-extensions-junitxml-jvm + 5.9.0 + test + + + + +``` + + + + + +#### Report File Path + +You can configure the path for generated JUnit XML files: + + + + + +By default, Kotlin projects will produce a directory with JUnit XML reports under `./app/build/test-results/test`. You can locate these files with the glob `"./app/build/test-results/test/*.xml"`. + +If you wish to override the default test result path, you can do so in the `build.gradle.kts` or `build.gradle` files: + +```kotlin +java.testResultsDir = layout.buildDirectory.dir("junit-reports") +``` + + + + + +You can change the report file path by configuring the `reportsDirectory` in your `maven-surefire-plugin` in your `pom.xml` file: + +```xml + + org.apache.maven.plugins + maven-surefire-plugin + 3.2.2 + + ${project.build.directory}/junit/ + + +``` + +The example above will output JUnit XML reports that can be located with the `/target/junit/*.xml` glob. + + + + + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. + + + + + +If you've enabled retries using a plugin like the [test-retry-gradle-plugin](https://github.com/gradle/test-retry-gradle-plugin), disable it when running tests for Trunk flaky tests. + + + + + +Maven uses the `maven-surefire-plugin` to run tests, which allows you to control the test retry behavior. You can disable retries by specifying 0 retries: + +``` +mvn -Dsurefire.rerunFailingTestsCount=0 test +``` + + + + + +### Try It Locally + +#### The Validate Command + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./app/junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./app/junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./app/junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./app/junit-reports/*.xml" +``` + + + + + +Make sure to specify the path to your JUnit XML test reports. + +**This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./app/junit-reports/*.xml" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Atlassian Bambooatlassian-bamboo
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/.gitbook/includes/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/maven.mdx b/flaky-tests/get-started/frameworks/maven.mdx new file mode 100644 index 0000000..3f7126e --- /dev/null +++ b/flaky-tests/get-started/frameworks/maven.mdx @@ -0,0 +1,156 @@ +--- +title: "Maven" +description: "A guide for generating Trunk-compatible test reports for Maven" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Maven projects by integrating with Trunk. This document explains how to configure Maven to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Maven uses the `maven-surefire-plugin` by default to output JUnit XML reports, which is Trunk compatible. You can configure the plugin in your project's `pom.xml`. + +#### Report File Path + +You can change the report file path by configuring the `maven-surefire-plugin` plugin in your `pom.xml` file: + +```xml + + + + + org.apache.maven.plugins + maven-surefire-plugin + 3.2.2 + + ${project.build.directory}/junit/ + + + + + +``` + +The example above will output JUnit XML reports that can be located with the `/target/junit/*.xml` glob. + +#### Using Kotlin and Kotest + +If you have a Kotlin project and are using the Kotest test framework, you also need to include `kotest-extensions-junitxml` in your project's `pom.xml`. This allows Kotest to generate JUnit XML reports. + +```xml + + io.kotest + kotest-extensions-junitxml-jvm + 5.9.0 + test + +``` + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. + +Maven uses the `maven-surefire-plugin` to run tests, which allows you to control the test retry behavior. You can disable retries by specifying 0 retries: + +``` +mvn -Dsurefire.rerunFailingTestsCount=0 test +``` + +### Try It Locally + +#### The Validate Command + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./app/junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./app/junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./app/junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./app/junit-reports/*.xml" +``` + + + + + +**This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./app/junit-reports/*.xml" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Atlassian Bambooatlassian-bamboo
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/.gitbook/includes/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/minitest.mdx b/flaky-tests/get-started/frameworks/minitest.mdx new file mode 100644 index 0000000..dfba7b6 --- /dev/null +++ b/flaky-tests/get-started/frameworks/minitest.mdx @@ -0,0 +1,139 @@ +--- +title: "minitest" +description: "A guide for generating Trunk-compatible test reports for minitest" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your minitest projects by integrating with Trunk. This document explains how to configure minitest to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. + +To generate XML reports, install the `minitest-reporters` gem: + +```shell +gem install minitest-reporters +``` + +Configure the `JUnitReporter` reporter in your `test_helper.rb` file: + +```ruby +require "minitest/reporters" +Minitest::Reporters.use! Minitest::Reporters::JUnitReporter.new +``` + +#### Report File Path + +You can specify a file path for your minitest results with the `MINITEST_REPORTERS_REPORTS_DIR` environment variable: + +```sh +MINITEST_REPORTERS_REPORTS_DIR="./junit.xml" ruby -Ilib:test +``` + +This will automatically write all test results to JUnit XML files in the `results` directory. + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. + +Minitest doesn't support retries out of the box, but if you implemented retries or imported a package, remember to disable them. + +### Try It Locally + +#### **The Validate Command** + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +**This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./junit.xml" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/mocha.mdx b/flaky-tests/get-started/frameworks/mocha.mdx new file mode 100644 index 0000000..0e31d2e --- /dev/null +++ b/flaky-tests/get-started/frameworks/mocha.mdx @@ -0,0 +1,143 @@ +--- +title: "Mocha" +description: "A guide for generating Trunk-compatible test reports for Mocha" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Mocha projects by integrating with Trunk. This document explains how to configure Mocha to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Before integrating with Trunk, you need to generate Trunk-compatible reports. For Mocha, the easiest approach is to generate XML reports. + +First, install the `mocha-junit-reporter` package: + +```shell +npm install --save-dev mocha-junit-reporter +``` + +You can then generate reports when you run your tests by providing the `--reporter` and `--reporter-options` options when you run your tests: + +```sh +mocha --reporter mocha-junit-reporter --reporter-options mochaFile=./junit.xml +``` + +You can configure your Mocha runner to use the reporter programmatically as well: + +```javascript +var mocha = new Mocha({ + reporter: 'mocha-junit-reporter', + reporterOptions: { + mochaFile: './junit.xml' + } +}); +``` + +#### Report File Path + +The resulting JUnit XML file will be written to the location specified by the `mochaFile` property in `reporterOptions`. In the examples above, the results would be at `./junit.xml`. + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. + +You can disable retry by omitting the `--retries` CLI option and [removing retries for individual tests](https://mochajs.org/#retry-tests). + +### Try It Locally + +#### **The Validate Command** + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +**This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./junit.xml" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/nightwatch.mdx b/flaky-tests/get-started/frameworks/nightwatch.mdx new file mode 100644 index 0000000..7539066 --- /dev/null +++ b/flaky-tests/get-started/frameworks/nightwatch.mdx @@ -0,0 +1,141 @@ +--- +title: "Nightwatch" +description: "A guide for generating Trunk-compatible test reports for Nightwatch" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Nightwatch projects by integrating with Trunk. This document explains how to configure Nightwatch to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Nightwatch will automatically report test results in multiple formats. You can configure the output location by updating the `nightwatch.conf.cjs` config file. + +```javascript +module.exports = { + output_folder: 'test-reports', + ... +} +``` + +You can also specify output at runtime with the command line option `--output `: + +```sh +nightwatch --output ./test-reports +``` + +#### Report File Path + +Nightwatch outputs multiple reports for each test suite under the specified output folder. + +If you configured your output folder to be under `./test-reports`, the JUnit XML files will be found under `./test-reports/**`. You can upload multiple JUnit reports by using a glob like `./test-reports/**/*.xml`. + + +**Duplicate Uploads** + +When using globs, it's important to clean up old test reports between test runs. If your glob path contains old JUnit files, uploading old test results can cause tests to be mislabeled. + + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. + +Nightwatch doesn't implement any form of automatic retry for failed or flaky tests by default. If you have a custom implementation of retries, remember to disable them. + +### Try It Locally + +#### **The Validate Command** + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +**This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./test-reports/**/*.xml" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/nunit.mdx b/flaky-tests/get-started/frameworks/nunit.mdx new file mode 100644 index 0000000..0d5a6f5 --- /dev/null +++ b/flaky-tests/get-started/frameworks/nunit.mdx @@ -0,0 +1,128 @@ +--- +title: "NUnit" +description: "A guide for generating Trunk-compatible test reports for NUnit" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your NUnit projects by integrating with Trunk. This document explains how to configure NUnit to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. + +You can do this in dotnet with the NUnit's built-in JUnit reporter: + +```sh +dotnet test -o build -- NUnit.TestOutputXml="junit" +``` + +#### Report File Path + +.NET will output each build to the path specified by `-o ` and test results under a sub-folder of `/test-reports`, specified by the `-- NUnit.TestOutputXml=""` option. + +In the example command from the [Generating Reports step](#generating-reports), the XMLs will be located under `./build/test-reports/junit/*.xml`. This is also the glob you'll use to locate the results when uploading test results. + +#### Disable Retries + +You need to disable automatic retries if you previously included them. Retries compromise the accurate detection of flaky tests. + +Omit `[Retry(n)]` from tests to disable retries. + +### Try It Locally + +#### The Validate Command + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./build/test-reports/junit/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./build/test-reports/junit/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./build/test-reports/junit/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./build/test-reports/junit/*.xml" +``` + + + + + +**This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./build/test-reports/junit/*.xml" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/other-test-frameworks.mdx b/flaky-tests/get-started/frameworks/other-test-frameworks.mdx new file mode 100644 index 0000000..0e29d67 --- /dev/null +++ b/flaky-tests/get-started/frameworks/other-test-frameworks.mdx @@ -0,0 +1,89 @@ +--- +title: "Other Test Frameworks" +description: "A guide for generating Trunk-compatible test reports with other test frameworks" +--- +Trunk Flaky Tests is designed to be test framework agnostic. If you don't see a guide for your test framework, you can still use Flaky Tests. To use Flaky Tests, you will need to report test results in a format that Trunk understands and upload them to Trunk in CI. This guide will explain how to integrate your test framework with Trunk. + +## 1. Generate JUnit + +Trunk detects flaky tests by analyzing each test case's results over time. Trunk currently supports the JUnit XML and XCResult report formats. You will need to configure your test runner to report in one of these formats using a plugin or your own test result reporter. + +Make sure your tests reports accurately report the file name, test name, and stack trace of each test result. Make sure the test names are not randomized. These details help Trunk better detect and display your test cases' health status. + +## 2. Output Location + +You'll need to validate and upload the generated JUnit files to Trunk later during the setup process. Make sure the reports are generated with a **consistent name** and aren't **cached or committed** to Git. + +## 3. Validate Your Reports + +Since you'll be generating JUnit reports using a new plugin or custom reporter, you should use the Trunk CLI to validate your results and fix any warnings or errors. + +You can install the Trunk CLI locally like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +``` + + + + + +Then, you can validate the results using the `trunk flakytests validate` command like this: + +```bash +./trunk-analytics-cli validate --junit-paths +``` + +## Next Step + +You'll need to upload the JUnit reports generated by your CI jobs to Trunk so Trunk can [detect flaky tests](/flaky-tests/detection) and [report them to the dashboard](/flaky-tests/dashboard). See [CI Providers](/flaky-tests/get-started/ci-providers) for a guide on how to upload test results to Trunk. + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/pest.mdx b/flaky-tests/get-started/frameworks/pest.mdx new file mode 100644 index 0000000..18fb363 --- /dev/null +++ b/flaky-tests/get-started/frameworks/pest.mdx @@ -0,0 +1,126 @@ +--- +title: "Pest" +description: "A guide for generating Trunk-compatible test reports for Pest" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your PHP projects by integrating with Trunk. This document explains how to configure Pest to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. + +To generate XML reports, append `--log-junit junit.xml` to your `pest` test command: + +```undefined +pest --log-junit junit.xml +``` + +#### Report File Path + +The JUnit report is written to the location specified by `--log-junit`. In the example above, the test results will be written to `./junit.xml`. + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. + +Pest doesn't support retries out of the box, but if you implemented retries, remember to disable them. + +### Try It Locally + +#### **The Validate Command** + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +**This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./junit.xml" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/phpunit.mdx b/flaky-tests/get-started/frameworks/phpunit.mdx new file mode 100644 index 0000000..7718d51 --- /dev/null +++ b/flaky-tests/get-started/frameworks/phpunit.mdx @@ -0,0 +1,126 @@ +--- +title: "PHPUnit" +description: "A guide for generating Trunk-compatible test reports for PHPUnit" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your PHP projects by integrating with Trunk. This document explains how to configure PHPUnit to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. + +To generate XML reports, append `--log-junit junit.xml` to your `phpunit` test command: + +```undefined +phpunit ./tests --log-junit junit.xml +``` + +#### Report File Path + +The JUnit report is written to the location specified by `--log-junit`. In the example above, the test results will be written to `./junit.xml`. + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. + +PHPUnit doesn't support retries out of the box, but if you implemented retries, remember to disable them. + +### Try It Locally + +#### **The Validate Command** + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +**This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./junit.xml" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/playwright.mdx b/flaky-tests/get-started/frameworks/playwright.mdx new file mode 100644 index 0000000..31bc5b4 --- /dev/null +++ b/flaky-tests/get-started/frameworks/playwright.mdx @@ -0,0 +1,150 @@ +--- +title: "Playwright" +description: "A guide for generating Trunk-compatible test reports for Playwright" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Playwright projects by integrating with Trunk. This document explains how to configure Playwright to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Playwright has multiple built-in reporters, including JUnit XML which Trunk can ingest. To get XML reports, add the following to your Playwright config: + +```typescript +import { defineConfig } from '@playwright/test'; + +export default defineConfig({ + reporter: [ + ['junit', { outputFile: 'junit.xml' }] + ], +}); +``` + +Alternatively, you can specify reporting behavior inline in your CI: + +```sh +npx playwright test --reporter=junit +``` + +#### Report File Path + +You can specify the report's output location with the `PLAYWRIGHT_JUNIT_OUTPUT_FILE` environment variable: + +```sh +export PLAYWRIGHT_JUNIT_OUTPUT_FILE=junit.xml +``` + +You can also specify the report's location in your `playwright.config.ts` file: + +```typescript +export default defineConfig({ + reporter: [ + ['junit', { outputFile: 'junit.xml' }] + ], +}); +``` + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. + +You can disable retries in Playwright by omitting the `--retries` command line option and [removing retries in your `playwright.config.ts` file](https://playwright.dev/docs/test-retries#retries). + +### Try It Locally + +#### **The Validate Command** + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +**This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./junit.xml" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +## Next Step + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/pytest.mdx b/flaky-tests/get-started/frameworks/pytest.mdx new file mode 100644 index 0000000..095ef65 --- /dev/null +++ b/flaky-tests/get-started/frameworks/pytest.mdx @@ -0,0 +1,126 @@ +--- +title: "Pytest" +description: "A guide for generating Trunk-compatible test reports for Pytest" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Pytest projects by integrating with Trunk. This document explains how to configure Pytest to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this by generating JUnit XML reports from your test runs. + +In your CI job, update your `pytest` command to include the `--junit-xml` and `junit_family=xunit1` arguments to generate XML reports: + +```shell +pytest --junit-xml=junit.xml -o junit_family=xunit1 +``` + +The `junit_family=xunit1` is necessary so that the generated XML report includes file paths for each test case. File paths for test cases are used for features that use code owners like the [Jira integration](/flaky-tests/ticketing-integrations/jira-integration) and [webhooks](/flaky-tests/webhooks). + +#### Report File Path + +The `--junit-xml` argument specifies the path of the JUnit report. You'll need this path later when configuring automatic uploads to Trunk. + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. + +Omit the [`--lf` or `--ff` options](https://docs.pytest.org/en/stable/how-to/cache.html) if you've previously configured your CI with these options to disable retries. + +### Try It Locally + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +**This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./junit.xml" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/robot-framework.mdx b/flaky-tests/get-started/frameworks/robot-framework.mdx new file mode 100644 index 0000000..25c1202 --- /dev/null +++ b/flaky-tests/get-started/frameworks/robot-framework.mdx @@ -0,0 +1,128 @@ +--- +title: "Robot Framework" +description: "A guide for generating Trunk-compatible test reports for Robot Framework" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your projects running tests with Robot by integrating with Trunk. This document explains how to configure Robot to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. + +To output compatible reports, add the `--xunit` argument to your `robot` command: + +```shell +robot --xunit=junit.xml TestSuite.robot +``` + +#### Report File Path + +The JUnit report will be written to the location specified by the `--xunit` argument. In the example above, it would be at `./junit.xml`. + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable them and prefer using the [Quarantine](/flaky-tests/quarantining) feature to mitigate the negative impact of Flaky Tests. + +Omit the [`--rerunfailed`](https://docs.robotframework.org/docs/flaky_tests#re-execute-failed-tests-and-merge-results) flag and remove any [RetryFailed Listeners](https://docs.robotframework.org/docs/flaky_tests#retryfailed-listener) previously configured to run as part of your CI pipeline to disable retries. + +### Try It Locally + +#### **The Validate Command** + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +**This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./junit.xml" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Atlassian Bambooatlassian-bamboo
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/.gitbook/includes/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
+ +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/rspec.mdx b/flaky-tests/get-started/frameworks/rspec.mdx new file mode 100644 index 0000000..364cd30 --- /dev/null +++ b/flaky-tests/get-started/frameworks/rspec.mdx @@ -0,0 +1,109 @@ +--- +title: "RSpec" +description: "A guide for generating Trunk-compatible test reports for RSpec using Trunk's RSpec plugin" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your projects running RSpec by integrating with Trunk. This document explains how to use Trunk's RSpec plugin to upload test results to Trunk. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Set up and installed Trunk's RSpec plugin +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + + +Using the plugin is the best way to accurately detect flaky RSpec tests. + +You can also [manually generate and upload](/flaky-tests/get-started/frameworks/rspec/manual-uploads) test results in RSpec, however, **manual RSpec uploads are not recommended.** + + +### Installing the plugin + +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this for your Rspec tests using Trunk's RSpec plugin. + +To install the plugin in your project, add the `rspec_trunk_flaky_tests` gem to your `Gemfile`: + +```shell +gem "rspec_trunk_flaky_tests" +``` + +Install the plugin: + +```sh +bundle install +``` + +Then, load the plugin in `spec_helper.rb`: + +```shell +require "trunk_spec_helper" +``` + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. + +If you have a step in CI to rerun failed tests with the `--only-failures` option, or you're using a package like [rspec-retry](https://github.com/NoRedInk/rspec-retry), remember to disable them. + +#### Versions and Updating the Plugin + +You can find the Gem for `rspec_trunk_flaky_tests` [here](https://rubygems.org/gems/rspec_trunk_flaky_tests), along with its version history. This plugin is periodically updated for more robust support and bug fixes, and if you're encountering something unexpected, we first encourage you to: + +``` +bundle update rspec_trunk_flaky_tests +``` + +### Environment Variables + +These optoinal environment variables can be set in your project to change the behavior of the RSpec plugin. + +#### Repository metadata variables: + +| Argument | Description | +| ------------------------------ | -------------------------------------------------------- | +| `TRUNK_REPO_ROOT` | Path to repository root | +| `TRUNK_REPO_URL` | Repository URL (e.g., https://github.com/org/repo.git) | +| `TRUNK_REPO_HEAD_SHA` | HEAD commit SHA | +| `TRUNK_REPO_HEAD_BRANCH` | HEAD branch name | +| `TRUNK_REPO_HEAD_COMMIT_EPOCH` | HEAD commit timestamp (seconds since epoch) | +| `TRUNK_REPO_HEAD_AUTHOR_NAME` | HEAD commit author name | + +#### Configuration variables: + +| Argument | Description | +| -------------------------------- | --------------------------------------------------------- | +| `TRUNK_CODEOWNERS_PATH` | Path to CODEOWNERS file | +| `TRUNK_VARIANT` | Variant name for test results (e.g., 'linux', 'pr-123') | +| `TRUNK_DISABLE_QUARANTINING` | Set to 'true' to disable quarantining | +| `TRUNK_ALLOW_EMPTY_TEST_RESULTS` | Set to 'true' to allow empty results | +| `TRUNK_DRY_RUN` | Set to 'true' to save bundle locally instead of uploading | +| `TRUNK_USE_UNCLONED_REPO` | Set to 'true' for uncloned repo mode | +| `TRUNK_LOCAL_UPLOAD_DIR` | Directory to save test results locally (disables upload) | +| `DISABLE_RPSEC_TRUNK_FLAY_TESTS` | Set to 'true' to completely disable Trunk | + +### Try It Locally + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +TRUNK_ORG_URL_SLUG= \ +TRUNK_API_TOKEN= \ +bundle exec rspec +``` + +You can find your Trunk organization URL slug and token in the **Settings** or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). + +After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the test report uploaded by the plugin has issues. + +
+ +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Atlassian Bambooatlassian-bamboo
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/.gitbook/includes/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/rspec/manual-uploads.mdx b/flaky-tests/get-started/frameworks/rspec/manual-uploads.mdx new file mode 100644 index 0000000..0f1bf98 --- /dev/null +++ b/flaky-tests/get-started/frameworks/rspec/manual-uploads.mdx @@ -0,0 +1,136 @@ +--- +title: "RSpec (Manual Uploads)" +description: "A guide for generating Trunk-compatible test reports for RSpec without using Trunk's RSpec plugin" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your projects running RSpec by integrating with Trunk. This document explains how to configure RSpec to output JUnit XML reports that can be uploaded to Trunk for analysis. + + +We highly recommend using [Trunk's RSpec plugin](/flaky-tests/get-started/frameworks/rspec) to upload test results for the best accuracy when detecting flaky tests. + + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this for your Rspec tests by generating JUnit XML reports from your test runs. + +To generate Trunk-compatible reports, install the `rspec_junit_formatter`: + +```shell +gem install rspec_junit_formatter +``` + +You can use `rspec_junit_formatter` like this: + +```shell +rspec --format RspecJunitFormatter --out junit.xml +``` + +#### Report File Path + +The JUnit report will be written to the location specified by the `--out` argument. In the example above, the report would be at `./junit.xml`. You will need this when you update your CI config to integrate with Trunk. + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. + +If you have a step in CI to rerun failed tests with the `--only-failures` option, or you're using a package like [rspec-retry](https://github.com/NoRedInk/rspec-retry), remember to disable them. + +### Try It Locally + +#### **The Validate Command** + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +**This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./junit.xml" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Atlassian Bambooatlassian-bamboo
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/.gitbook/includes/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/rust.mdx b/flaky-tests/get-started/frameworks/rust.mdx new file mode 100644 index 0000000..34d14ab --- /dev/null +++ b/flaky-tests/get-started/frameworks/rust.mdx @@ -0,0 +1,127 @@ +--- +title: "cargo-nextest" +description: "A guide for generating Trunk-compatible test reports for Rust" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Rust projects by integrating with Trunk. This document explains how to configure cargo-nextest to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +`cargo-nextest` has built-in reporting for JUnit XML reports, which is trunk-compatible. You can enable JUnit reporting by adding the following to your nextest config: + +```toml +[profile.ci.junit] +path = "junit.xml" +``` + +You can invoke this profile when running tests with: + +```sh +cargo nextest run --profile ci +``` + +#### Report File Path + +`cargo-nextest` outputs artifacts at `target/nextest` by default. When you provide a profile and a file name via the config example above, it produces a report at `target/nextest/ci/junit.xml`. + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. + +Omit the `--retries` option. + +### Try It Locally + +#### The Validate Command + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./target/nextest/ci/junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./target/nextest/ci/junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./target/nextest/ci/junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./target/nextest/ci/junit.xml" +``` + + + + + +This will not upload anything to Trunk. To improve detection accuracy, you should address all errors and warnings before proceeding to the next steps. + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./target/nextest/ci/junit.xml" \ + --org-url-slug \ + --token +``` + +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/swift-testing.mdx b/flaky-tests/get-started/frameworks/swift-testing.mdx new file mode 100644 index 0000000..57b98e0 --- /dev/null +++ b/flaky-tests/get-started/frameworks/swift-testing.mdx @@ -0,0 +1,128 @@ +--- +title: "Swift Testing" +description: "A guide for generating Trunk-compatible test reports with Swift Testing" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Swift projects by integrating with Trunk. This document explains how to configure Swift Testing to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. + +To output a compatible report, add the `--xunit-output` argument to your Swift test command: + +```shell +swift test --xunit-output junit.xml --parallel +``` + +Due to a [known bug](https://github.com/swiftlang/swift-package-manager/issues/4752) with Swift, you must include the `--parallel` flag for the XML report to output properly. + +#### Report File Path + +The test report will be written to the location specified by the `--xunit-output` argument. In the example above, it would be at `./junit.xml`. + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. + +Swift Testing doesn't support retries out of the box, but if you implemented retries or imported a package, remember to disable them. + +### Try It Locally + +#### **The Validate Command** + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +**This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "./junit.xml" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/testplan.mdx b/flaky-tests/get-started/frameworks/testplan.mdx new file mode 100644 index 0000000..4d93b1b --- /dev/null +++ b/flaky-tests/get-started/frameworks/testplan.mdx @@ -0,0 +1,170 @@ +--- +title: "Testplan" +description: "A guide for generating Trunk-compatible test reports for Testplan" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your projects running Testplan by integrating with Trunk. This document explains how to configure Testplan to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating reports + +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. Testplan can output JUnit XML reports which are compatible with Trunk. You can do so with the `--xml` option: + +```sh +./test_plan.py --xml +``` + +#### Report file path + +You can specify the file path for the reports with the `--xml` option. + +```sh +./test_plan.py --xml ./junit-reports +``` + +Testplan outputs multiple XML reports under the JUnit directory. You can locate these when uploading the reports in CI with the `"./junit-reports/*.xml"` glob. + + + + + +```python +@test_plan(name='SamplePlan', xml_dir='junit-reports') +def main(plan): + ... +``` + + + + + +### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. + +#### Task-level Retries + +If you're using execution pools (ThreadPool, ProcessPool) and have configured tasks with the rerun parameter, you should remove this configuration: + +```python +# Remove or set rerun=0 +task = Task(target='make_multitest', module='tasks', rerun=2) +plan.schedule(task, resource='MyPool') + +# Instead, use: +task = Task(target='make_multitest', module='tasks') +plan.schedule(task, resource='MyPool') +``` + +#### Thread Pool Retries + +If you're using ThreadPools, tasks retries can be disabled at the pool level, you should remove this configuration: + +```python +# Set allow_task_rerun=FALSE +pool = ThreadPool(name="MyPool", allow_task_rerun=True) + +# Instead, use: +pool = ThreadPool(name="MyPool", allow_task_rerun=False) +``` + +### Try It Locally + +#### The Validate Command + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "junit-reports/*.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "junit-reports/*.xml" +``` + + + + + + +**This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. + + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +./trunk-analytics-cli upload --junit-paths "junit-reports/*.xml" \ + --org-url-slug \ + --token +``` + + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + + +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/vitest.mdx b/flaky-tests/get-started/frameworks/vitest.mdx new file mode 100644 index 0000000..8f53ff5 --- /dev/null +++ b/flaky-tests/get-started/frameworks/vitest.mdx @@ -0,0 +1,185 @@ +--- +title: "Vitest" +description: "A guide for generating Trunk-compatible test reports with Vitest" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Vitest projects by integrating with Trunk. This document explains how to configure Vitest to output JUnit XML reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. + +You can configure Vitest to produce a Trunk-compatible JUnitXML report by updating your `vitest.config.ts`. + +```javascript +import { defineConfig } from 'vitest/config'; + +export default defineConfig({ + test: { + reporters: [ + ['junit', { outputFile: './junit.xml', addFileAttribute: true }], + ], + }, +}); +``` + + +**Important**: The `addFileAttribute: true` option is required for the JUnit report to pass `trunk-analytics-cli` validation. This option adds file path information to each test case in the XML output, which Trunk uses to associate test results with source files. + + +#### Report File Path + +The `outputFile: './junit.xml'` option specifies the path of the JUnit report. You'll need this path later when configuring automatic uploads to Trunk. + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. + +If you've enabled retries, you can disable them following the [Vitest docs](https://vitest.dev/api/) for more accurate results. + + +**Note**: Configuration errors can sometimes mask themselves as consistent test failures. If you're seeing file-level test entries instead of individual test cases, resolve configuration issues first before adjusting retry settings. A properly configured test suite should show individual test case names in the JUnit output, not file names. + + +### Troubleshooting + +**Configuration Errors and File-Level Test Failures** + +**Issue**: You might see Trunk identifying flaky tests with names that match your test file names (e.g., `auth.test.ts` instead of `should login successfully`) rather than individual test case names. + +**Root Cause**: This typically occurs when Vitest encounters configuration errors that prevent it from properly parsing or running the tests in a file. Common scenarios include: + +* TypeScript configuration errors in `tsconfig.json` +* Missing dependencies or import resolution failures +* Syntax errors in test setup files +* Invalid Vitest configuration options + +**What Happens**: When Vitest cannot execute the individual tests within a file due to configuration issues, it generates a single JUnit test case entry named after the file itself, regardless of how many actual test cases exist in that file. + +**How to Diagnose**: + +1. Run your tests locally with verbose output: `vitest --reporter=verbose` +2. Check for configuration warnings or errors in the test output +3. Look for test files that show as single entries in your JUnit report when they should contain multiple test cases + +**How to Fix**: + +1. **Check TypeScript Configuration**: Ensure your `tsconfig.json` is valid and includes all necessary paths +2. **Verify Dependencies**: Make sure all imported modules are properly installed and accessible +3. **Review Setup Files**: Check any test setup files referenced in your Vitest config for errors +4. **Validate Vitest Config**: Ensure your `vitest.config.ts` doesn't contain invalid options + +### Try It Locally + +#### Validate Test Execution First + +Before validating your JUnit reports with Trunk, ensure Vitest can properly execute your tests: + +```bash +# Run tests with detailed output to catch configuration issues +vitest run --reporter=verbose + +# Check that individual test cases appear in output, not just file names +vitest run --reporter=json | jq '.testResults[].assertionResults' +``` + +If you see test files listed as single entries rather than individual test cases, you likely have configuration issues that need to be resolved before proceeding. + +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + +#### **The Validate Command** + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli validate --junit-paths "./junit.xml" +``` + + + + + +**This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x trunk +./trunk-analytics-cli upload --junit-paths "./junit.xml" \ + --org-url-slug \ + --token +``` + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +### Next Step + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/xctest.mdx b/flaky-tests/get-started/frameworks/xctest.mdx new file mode 100644 index 0000000..4f8c9f3 --- /dev/null +++ b/flaky-tests/get-started/frameworks/xctest.mdx @@ -0,0 +1,125 @@ +--- +title: "XCTest" +description: "A guide for generating Trunk-compatible test reports for XCode and xcodebuild" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your XCTest projects by integrating with Trunk. This document explains how to configure XCTest to output XCResult reports that can be uploaded to Trunk for analysis. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Generate a compatible test report +* [ ] Configure the report file path or glob +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). + +### Generating Reports + +Running XCTests from `xcodebuild` produces a `.xcresult` in an obscure directory by default. You can specify a `-resultBundlePath` option to generate the results locally: + +```sh +xcodebuild test -scheme \ + -resultBundlePath ./test-results.xcresult +``` + +You can upload `.xcresult` directories directly to Trunk Flaky Tests. + + +Only XCode versions 16 or higher are supported. + + +#### Report File Path + +The test reports will be written to the `./test-results.xcresult` directory when running tests with the `-resultBundlePath ./test-results.xcresult`option. You will need this path when uploading results to Trunk in CI. + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. + +If you run tests in CI with [the `-retry-tests-on-failure` option](https://keith.github.io/xcode-man-pages/xcodebuild.1.html#retry-tests-on-failure), disable it for more accurate results. + +### Try It Locally + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli upload --xcresult-path "./test-results.xcresult" \ + --org-url-slug \ + --token +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli upload --xcresult-path "./test-results.xcresult" \ + --org-url-slug \ + --token +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli upload --xcresult-path "./test-results.xcresult" \ + --org-url-slug \ + --token +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +./trunk-analytics-cli upload --xcresult-path "./test-results.xcresult" \ + --org-url-slug \ + --token +``` + + + + + +You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. + +
+ +### Next Step + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/multiple-repositories.mdx b/flaky-tests/get-started/multiple-repositories.mdx new file mode 100644 index 0000000..7ca91a5 --- /dev/null +++ b/flaky-tests/get-started/multiple-repositories.mdx @@ -0,0 +1,121 @@ +--- +title: "Multiple Repositories and Forks" +description: "Learn how Trunk identifies repositories and how to track tests across forks and multiple repositories without mixing results." +--- +Trunk Flaky Tests identifies repositories by their **git remote URL**, not by the API token. You can safely use the same organization API token across multiple repositories, including forks, without mixing test results. + +### How Repository Identification Works + +When the Trunk Analytics CLI uploads test results, it reads the git remote URL from your CI environment and parses it into three components: + +* **Host**: `github.com`, `gitlab.com`, or your self-hosted instance +* **Owner**: The organization or user (e.g., `your-company`) +* **Name**: The repository name (e.g., `your-repo`) + +These three components together uniquely identify the repository in Trunk. The API token determines which *organization* the upload belongs to, but does not affect which *repository* the results are associated with. + +### Using Trunk with Forks + +If you run tests from a fork, Trunk automatically keeps test results separate based on the fork's remote URL. + +For example, if your company forks `metabase/metabase` to `your-company/metabase-fork`: + +| Repository | Remote URL | Trunk Repo ID | +| ---------- | --------------------------------------- | ---------------------------- | +| Original | `github.com/metabase/metabase` | `metabase/metabase` | +| Your fork | `github.com/your-company/metabase-fork` | `your-company/metabase-fork` | + +You can use the same organization API token for both repositories. Trunk creates separate repo entries and keeps all test data isolated. + + +No special configuration is needed for forks. As long as your fork has a different remote URL (which it does by default), Trunk keeps the data separate automatically. + + +### Verifying Your Remote URL + +Before setting up uploads, verify your CI job is using the correct remote URL: + +```bash +git remote -v +# origin git@github.com:your-company/metabase-fork.git (fetch) +# origin git@github.com:your-company/metabase-fork.git (push) +``` + + +Some CI providers set environment variables like `GITHUB_REPOSITORY` that may differ from your git remote. The CLI reads the git remote URL by default. If your CI environment modifies the remote, use the `--repo-url` flag to override repository detection. + + +### Overriding Repository Detection + +In some CI environments, you may need to manually specify the repository URL: + +* The git remote is not available or is incorrect +* You are uploading results from a build artifact without a git checkout +* A shallow clone has modified remotes + +Override the repository URL with the `--repo-url` flag: + +```bash +./trunk-analytics-cli upload \ + --junit-paths "test_output.xml" \ + --org-url-slug \ + --token $TRUNK_API_TOKEN \ + --repo-url "https://github.com/your-company/your-repo.git" +``` + +You can also set the repository URL via the `TRUNK_REPO_URL` environment variable: + +```bash +export TRUNK_REPO_URL="https://github.com/your-company/your-repo.git" +./trunk-analytics-cli upload \ + --junit-paths "test_output.xml" \ + --org-url-slug \ + --token $TRUNK_API_TOKEN +``` + +See the [Trunk Analytics CLI](https://github.com/trunk-io/docs/blob/main/uploader) reference for the full list of override flags. + +### Monorepo with Multiple Test Suites + +To track different test suites within the same repository separately, use the `--variant` flag: + +```bash +# Frontend tests +./trunk-analytics-cli upload \ + --junit-paths "frontend/test_output.xml" \ + --variant "frontend" \ + --org-url-slug \ + --token $TRUNK_API_TOKEN + +# Backend tests +./trunk-analytics-cli upload \ + --junit-paths "backend/test_output.xml" \ + --variant "backend" \ + --org-url-slug \ + --token $TRUNK_API_TOKEN +``` + +### Troubleshooting + +#### Test results appearing in wrong repository + +1. **Check your git remote**: Run `git remote -v` in your CI job to verify the URL. +2. **Check CI environment variables**: Some CI providers override git configuration. +3. **Use explicit override**: Set `--repo-url` to force the correct repository. + +#### Duplicate repositories in dashboard + +This can happen if the same repository is uploaded with different URL formats (e.g., HTTPS vs SSH). To resolve: + +1. Standardize the remote URL format across all CI jobs. +2. Use `--repo-url` to set a consistent URL. +3. Contact support@trunk.io to merge duplicate repository entries. + +### FAQ + +| Question | Answer | +| ------------------------------------------------ | ------------------------------------------------------------------- | +| Can I use the same API token for multiple repos? | Yes. The token is org-scoped, not repo-scoped. | +| Will fork test results mix with upstream? | No. Repos are identified by remote URL, not by token. | +| Do I need separate tokens for forks? | No. The same token works for all repos in your organization. | +| Can I override the detected repository? | Yes. Use `--repo-url` or the `TRUNK_REPO_URL` environment variable. | diff --git a/flaky-tests/github-pull-request-comments.mdx b/flaky-tests/github-pull-request-comments.mdx new file mode 100644 index 0000000..e63c96f --- /dev/null +++ b/flaky-tests/github-pull-request-comments.mdx @@ -0,0 +1,55 @@ +--- +title: "Pull request comments" +description: "Flaky Tests provides summary analytics about tests running on Pull Requests" +--- +Flaky Tests can post comments on GitHub pull requests that summarize test results across CI jobs. These comments indicate which failures are flaky and include the test’s failure history and related context. + +**Note:** Flaky Tests will only post a comment when there are failing tests. + +
+ +Each GitHub comment includes a summary report showing all tests that passed, failed, flaked, were skipped, or were quarantined on the PR. + +
+ +Each test case includes the full stack trace when expanded, and the job run link takes you to the complete CI logs. + +## Configuration + +If you have the [Trunk GitHub App installed](/setup-and-administration/github-app-permissions) and are [uploading JUnit XML](/flaky-tests/get-started/frameworks) test results on pull requests, expect to start seeing comments on your Pull Requests soon. If you prefer not to use the Trunk GitHub App, you can still set up comments on your Pull Requests by providing Trunk with a GitHub access token. + +
+ +Without Trunk GitHub App + +It's recommended that the Trunk GitHub App be used to manage GitHub comments. If you need to generate comments without the Trunk GitHub app, you can do so with a service account and an API token. + +1. Create a dedicated GitHub SVC account (Service Account) with access to the repositories in your GitHub Organization that Flaky Tests will comment on e.g., `trunk-analytics-user`. +2. On [github.com](https://github.com/), for `trunk-analytics-user` (or whichever user you wish to use), generate a [*Personal access token*](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens) by navigating to **Settings** > **Developer settings** > **Personal access token** > **Fine-grained tokens** > **Generate new token**. +3. Name the new token something memorable. ex: `trunk-flaky-tests-token`. +4. The expiry time is up to you - however long you wish to try out Flaky Tests comments/how often you are willing to rotate the token. For a longer-term solution, consider installing the Trunk GitHub App. +5. The resource owner should be the GitHub Organization or user that owns the appropriate repositories. [See note on GitHub Org Ownership settings.](#github-org-ownership) +6. Select the repositories you wish to enable comments on. +7. **Permissions** - you must enable **Issues (Read and write)** and **Pull requests (Read and write)**. Note: It is expected that metadata permissions automatically change. +8. If everything looks good, scroll down to double check that your Overview for permissions looks something like the image below. If so, create the token. + +
+9. Once the token is generated, go back to the Trunk App ([app.trunk.io](https://app.trunk.io/login?intent=flaky%20tests)) > click on your profile > **Settings** > **Manage** (under *Organization*) > **Organization GitHub Token** and enter the copied token into the text field, then finally press **Submit**. + +You should see comments posted by your service account on your next PR. + +
+ +## Disable commenting + +Pull Request comments are enabled by default. If you wish to disable the comments, you can do so by navigating to the Trunk App > click on your profile > **Settings** > **RepoName**. In the **Flaky Tests** heading, toggle the **Summary Flaky Tests Reports** setting. + +## Troubleshooting + +#### **GitHub Organization ownership** + +If you wish to set the resource owner to be a GitHub Organization, you should double check that this is allowed by navigating to your **GitHub Organization** > **Settings** > **Personal access tokens** > **Settings**. Make sure under "*Fine-grained personal access tokens*", you have *"Allow access via fine-grained personal access tokens"* selected. + +Once the token is created, the Organization admin may need to approve the request for the token. This can be done by going to **Github Organization** > **Settings** > **Personal access tokens** > **Pending requests**. To confirm that the token was set, you should be able to see it under **Active tokens**. + +At any point, feel free to reach out to our team [through Slack](https://slack.trunk.io). diff --git a/flaky-tests/infrastructure-failure-protection.mdx b/flaky-tests/infrastructure-failure-protection.mdx new file mode 100644 index 0000000..5b3bde9 --- /dev/null +++ b/flaky-tests/infrastructure-failure-protection.mdx @@ -0,0 +1,62 @@ +--- +title: "Infrastructure Failure Protection" +description: "Prevent false flaky test detections during CI outages and infrastructure failures." +--- +When infrastructure issues like database outages, network problems, or CI runner failures cause a large number of tests to fail simultaneously, retrying those tests can trigger mass false flaky detections. Infrastructure Failure Protection identifies these scenarios and excludes them from flakiness detection. + +
+ +### How it works + +Trunk monitors the failure rate of each test upload. If the percentage of failing tests exceeds your configured threshold, that upload is flagged as an infrastructure failure and excluded from flaky test detection. + +For example, if your threshold is set to 80% and a CI run has 85% of tests failing (this could be due to a database being unavailable or similar infrastructure issue, etc) that entire run will be excluded from flaky test detection. This prevents tests from being incorrectly marked as flaky when they're retried and pass. + + +Uploads excluded due to infrastructure failure protection will appear in the **Uploads** tab with the status **"Upload Skipped Due to Infrastructure Error."** + + +### Configuring Infrastructure Failure Protection + +Administrators can enable this feature in repository settings: + +1. Click on your profile and open **Settings** +2. Select your repository from the left navigation +3. Locate **Infrastructure Failure Protection** under Flaky Tests +4. Toggle **Enable Protection** to on +5. Set your **Failure Threshold** percentage (default: 80%) + +The threshold determines what percentage of test failures triggers infrastructure failure detection. A threshold of 80% is a reasonable starting point for most repositories—adjust based on your test suite size and typical failure patterns. + +### Trade-offs + +When a test upload is excluded due to infrastructure failure protection: + +**Uploads are still recorded:** + +* The upload appears in the Uploads tab with "Upload Skipped Due to Infrastructure Error" status + +**Failures are excluded from analysis:** + +* Failures do not impact flakiness detection +* Failures do not contribute to failure rate metrics +* Stack traces from that run are not visible in test case history + +This is generally an acceptable trade-off since infrastructure failures don't reflect the actual behavior of individual test cases. + +### When to use this + +Enable Infrastructure Failure Protection if you experience: + +* Database or service outages that cause mass test failures +* CI runner infrastructure issues +* Network failures during test runs +* Any scenario where a large percentage of tests fail for reasons unrelated to code changes + +If you're using test quarantine, this feature is especially important to prevent infrastructure issues from automatically quarantining large numbers of tests. + +### Next steps + +* Learn more about how Trunk [detects flaky tests](/flaky-tests/detection) +* View excluded uploads in the Uploads tab +* Configure [test quarantine](/flaky-tests/quarantining#enable-quarantining) to automatically skip flaky tests diff --git a/flaky-tests/managing-detected-flaky-tests.mdx b/flaky-tests/managing-detected-flaky-tests.mdx new file mode 100644 index 0000000..dc17529 --- /dev/null +++ b/flaky-tests/managing-detected-flaky-tests.mdx @@ -0,0 +1,108 @@ +--- +title: "Managing detected flaky tests" +description: "A step-by-step guide for building an automated process to manage detected flaky tests." +--- +It is important to have a follow-up process in place to manage detected flaky tests. A good process ensures that flaky tests do not slow down CI for your development team and prevents flakes from accumulating over time. + +This guide walks through Trunk's recommended best practices for building a process around detected flaky tests in your organization. + + +Flaky tests will be [automatically detected](/flaky-tests/detection) by Trunk after you: + +* [Set up your test framework](/flaky-tests/get-started/frameworks) to produce test reports +* [Integrated with your CI provider](/flaky-tests/get-started/ci-providers) to upload those reports on CI runs. + +Go through these guides first to start detecting flaky tests. + + +### Step 1: Create tickets for flaky tests + +Creating Linear or Jira tickets for detected flaky tests helps to integrate flaky test fixes into your existing workflows. + +* Start by [connecting to Linear or Jira](/flaky-tests/ticketing-integrations). You can also set default labels or teams for flaky test tickets. +* Once connected, you can click **Create Ticket** on a test detail page in Trunk. Trunk will create the ticket with context, including the test ID, flake rate, and the last failure stack trace and reason. +* The ticket status and assignee will be visible on the test details page in Trunk, and these details will stay in sync with changes to the ticket. + +### Step 2: Broadcast flakes + +It is important to keep the team informed on all status changes for flaky tests . This allows for fast follow-up when a test is marked as flaky. + +* Use the [built-in Slack or Microsoft Teams webhook integrations](/flaky-tests/webhooks) to transform webhook payloads into messages. +* Trunk's built-in templates help you get started and test the connection. +* You can then customize the transformation to update the message format and content, including @-mentioning test owners so they can follow up right away. + +### Step 3: Mute monitors + +If a flaky test has a known issue or a fix in progress, you can mute the monitor that flagged it. A muted monitor continues to run and record detections, but it does not contribute to the test's flaky status until the mute expires or is manually removed. + +To mute a monitor: + +1. Navigate to the test case detail page in the Trunk app. +2. Find the monitor that flagged the test. +3. Click **Mute** and select a duration. + +| Duration | Description | +| -------- | ---------------------------------------- | +| 1 hour | Quick suppression for transient issues | +| 4 hours | Short-term suppression | +| 24 hours | Suppress for a full day | +| 7 days | Suppress for a week | +| 30 days | Suppress for a month | +| Forever | Mute indefinitely until manually unmuted | + +The **Forever** option mutes the monitor with no expiration. The monitor stays muted until you explicitly unmute it from the test case detail page. This is useful when a test has a known flake that your team has accepted, or when a fix is planned but not yet scheduled. + +For timed durations, the monitor automatically unmutes when the period expires. If the monitor is still detecting flaky behavior at that point, the test will be flagged as flaky again. + +You can optionally provide a reason when muting a monitor. This helps your team understand why the monitor was muted and makes it easier to review muted monitors later. + +You can unmute a monitor at any time from the test case detail page, regardless of the selected duration. + + +Muting suppresses the monitor's contribution to the test's status. If the muted monitor was the only active monitor for a test, the test transitions from flaky to healthy for the duration of the mute. + + +### Step 4: Flag flaky tests + +If automated detection hasn't caught a test you know is flaky, you can manually [flag it as flaky](https://github.com/trunk-io/docs/blob/main/flaky-tests/detection#flag-as-flaky) from the test detail page. Flagged tests are treated as flaky regardless of automated detection state, and the flag can be removed at any time. + +### Step 5: Quarantine flaky tests + +Flaky tests slow down CI and have a high negative impact on merge queue throughput. You can minimize or eliminate this CI slowdown by [quarantining](/flaky-tests/quarantining) flaky tests at runtime. + +* Enable quarantining for your repo at **Settings > your repo > Enable Test Quarantining**. +* Manually quarantine flaky tests by going to the test details page, clicking **Quarantine**, and setting the status to **Always**. Leave a comment detailing why you are quarantining this test to keep your team informed. The comment and quarantine status change will appear in the timeline on the test details page. + +After quarantining a test, Trunk will ignore the test result (pass/fail) on CI runs, preventing this flaky test from failing CI. + + +**Broken tests are not quarantine candidates.** Only tests with a **Flaky** status are eligible for quarantine. If a test is marked as Broken (consistently failing at a high rate), it represents a real regression that should be investigated and fixed rather than hidden. See [detection](/flaky-tests/detection) to understand the difference between flaky and broken tests. + + +### Step 6: Automation + +Trunk has [webhooks](/flaky-tests/webhooks) and [Flaky Tests APIs](/flaky-tests/flaky-tests) that can be used to build custom workflows around ticket creation, linking existing tickets to Trunk, sending notifications, and dealing with quarantined tests. + +There is also built-in automation support that handles tasks such as assigning flaky test ownership, ticket creation, and quarantining (so that unblocking CI is not a manual process). + +* [`CODEOWNERS` files](/flaky-tests/dashboard#code-owners) can automatically assign ownership of test flakes. +* Tickets can be [auto-created using webhooks](/flaky-tests/webhooks) as triggers, similar to Slack or MS Teams notifications. +* Automatically quarantine flaky tests by enabling **Settings > your repo > Auto-Quarantine Flaky Tests**. + +You can customize how flaky and quarantined tests are handled to suit your team and organization best. + +### Step 7: Review existing flakes and broken tests + +It is important to track and triage existing flaky and broken tests over time. Trunk collects historical failure logs and stack traces, providing developers as much information as possible for debugging high-impact test failures. + +* Review all new flaky and broken tests to determine their impact and the urgency of a fix. Broken tests (consistently failing at a high rate) should typically be prioritized over flaky tests as they represent real regressions. +* Review existing quarantined tests regularly to decide which tests should be fixed and which tests should be deleted from your test suite. +* Trunk can send weekly email reports with information such as your total number of flaky tests and the number of PRs blocked, and how those numbers have changed week over week. Frequently failing tests will also be highlighted in the report. Reach out on [Slack](https://slack.trunk.io/) to ask about enabling weekly reports for your organization. + +## In summary: Build a process around managing flaky tests + +Building processes for dealing with flaky tests helps decrease or eliminate their impact on CI and reduce the amount of developer time lost to debugging flakes and CI reruns. + +Trunk allows you to customize this process to fit into your existing tooling and workflows, and automates manual tasks such as notifications and ticket creation. + +Reach out to us on our [community Slack](https://slack.trunk.io/) to chat about how to structure a process for managing flaky tests across your team or organization. diff --git a/flaky-tests/overview.mdx b/flaky-tests/overview.mdx new file mode 100644 index 0000000..2df8153 --- /dev/null +++ b/flaky-tests/overview.mdx @@ -0,0 +1,71 @@ +--- +title: "Overview" +description: "Detect, quarantine, and eliminates flaky tests from your codebase" +--- +Trunk Flaky Tests lets your teams detect, track, quarantine, and fix **flaky tests** in your codebase. Trunk can also identify **broken tests** — tests failing consistently at a high rate that indicate real regressions needing immediate fixes, not just quarantining. Flaky Tests is language, environment, and framework-agnostic. + +Let's explore how Trunk Flaky Tests' features help you tackle Flaky Tests. If you can't wait to try Trunk, follow our [getting started guide](/flaky-tests/get-started). + +You can see an overview of Trunk Flaky Tests in this video. + + +Watch the walkthrough. + + +### Understand the impact + +Your dashboard shows a comprehensive overview of your test suite's health at a glance. It lets you see important impact metrics like the number of flaky tests, PRs impacted by flaky tests, and PRs rescued by quarantining flaky tests. + +

Key repo metrics

+ +To learn more, [see how Flaky Tests does detection](/flaky-tests/detection). + +### Track every flaky test + +You can find a list of known flaky tests complete with important information like their impact on PRs and if someone's working on a fix. For more granularity, you can also inspect individual tests for their execution history, results, and status changes. + +

List of flaky tests

+ +To learn more, [see how Flaky Tests does detection](/flaky-tests/detection). + +### Stay in sync + +
PR comment linking to PR Test Summary

PR comment linking to PR Test Summary

+ +Flaky Tests helps everyone in your team stay in sync about flaky test failures with [GitHub PR comments](/flaky-tests/github-pull-request-comments), so no time is wasted debugging failures from known flaky tests. + +To learn more, [see our docs about GitHub Comments and Test Summaries](/flaky-tests/github-pull-request-comments). + +### Investigate flaky failures + +Flaky Tests creates detailed reports for individual test failures so you can debug faster. + +

Summary of unique failure types

+ +Test details will summarize all the unique ways a flaky test fails and let you flip through the relevant stack traces in the Trunk app. + +

Full failure stack traces

+ +To learn more, [see our docs about the detection of flaky tests](/flaky-tests/detection). + +### **Quarantine flaky failures** + +Flaky Tests allows you to [quarantine](/flaky-tests/quarantining) detected flaky tests, stopping them from failing your CI jobs. This prevents failed flaky tests from impacting your CI pipelines, so you won’t have to disable tests and won’t be slowed down by flaky CI jobs. + +

flaky tests can be quarantined automatically or manually

+ +To learn more, [see our docs about quarantining tests](/flaky-tests/quarantining). + +### Manage tickets + +

Creating a Jira ticket for a flaky test

+ +Trunk enables the automation of quickly creating and assigning tickets through integrations with platforms like Jira and Linear, as well as custom workflows with webhooks. The status of tickets created will be reflected in real-time in the Trunk web app. This helps you track efforts to fix high-impact, flaky tests. + +To learn more, [learn about our ticketing integrations](/flaky-tests/ticketing-integrations/jira-integration). + +### **Next steps** + +
Getting startedget-started
Create an accounthttps://app.trunk.io/signup?intent=flaky+tests
+ +Start finding flaky tests today by [signing up for Trunk](https://app.trunk.io/signup?intent=flaky%20tests) or reading our [Getting Started guides](/flaky-tests/get-started). diff --git a/flaky-tests/quarantine-service-availability.mdx b/flaky-tests/quarantine-service-availability.mdx new file mode 100644 index 0000000..7fca6eb --- /dev/null +++ b/flaky-tests/quarantine-service-availability.mdx @@ -0,0 +1,33 @@ +--- +title: "Quarantine Service Availability" +description: "Trunk Analytics CLI is designed to fail safe when our quarantine service is unavailable. Your CI pipeline's integrity is never compromised by Trunk outages." +--- +### Service Availability and Graceful Degradation + +[Trunk Analytics CLI](/flaky-tests/uploader) is designed to fail safe when our quarantine service is unavailable. Your CI pipeline's integrity is never compromised by Trunk outages. + +#### What happens if Trunk is unreachable? + +When Trunk Analytics CLI cannot fetch quarantine configuration from Trunk's API: + +1. **Your original test exit code is preserved** — if tests fail, your CI fails +2. **No tests are quarantined** — failed tests are reported as failures, not suppressed +3. **A warning is displayed** in Trunk Analytics CLI output: + +> We were unable to determine the quarantine status for tests. Any failing tests will be reported as failures. + +#### Why fail-safe? + +We prioritize avoiding false positives over convenience. If Trunk is down, we'd rather your CI fails on a flaky test than silently passes on a real regression. You can always re-run the job once connectivity is restored. + +#### What this means for you + +| Scenario | CI Exit Code | Tests Quarantined | +| ------------------------------------ | --------------- | ----------------- | +| API available, tests quarantined | 0 (pass) | Yes | +| API available, tests not quarantined | Non-zero (fail) | No | +| API unavailable | Non-zero (fail) | No | + +#### Caching behavior + +Trunk Analytics CLI does not cache quarantine configuration locally. Each invocation requires a successful API call to apply quarantining. This ensures you're always operating on the freshest quarantine state rather than potentially stale data. diff --git a/flaky-tests/quarantining.mdx b/flaky-tests/quarantining.mdx new file mode 100644 index 0000000..7409337 --- /dev/null +++ b/flaky-tests/quarantining.mdx @@ -0,0 +1,241 @@ +--- +title: "Quarantining" +description: "Mitigate impact of known flaky tests by isolating them at run time" +--- +
+ +**Quarantining** isolates known flaky tests to prevent them from blocking CI jobs while continuing to run and track their results. The system identifies flaky tests at runtime and overrides their exit codes when they fail, allowing your CI pipeline to pass without requiring code changes to disable problematic tests. + +**Why use quarantining:** It acts as a crucial stopgap, minimizing the disruption from known flaky tests while your team works on fixing them. By quarantining flaky tests, you unblock critical CI pipelines—**especially your merge queue**—and regain development velocity without losing visibility, as these tests continue to run and upload results. This constant stream of data allows you to prioritize fixing the worst offenders based on their ongoing impact. + + +**Broken tests are not quarantine candidates.** Quarantining is designed for flaky tests — tests that intermittently fail and can be safely skipped to unblock CI while being investigated. A broken test represents a real failure that should not be hidden from CI results. Only tests with a **Flaky** status are eligible for auto-quarantine. + + +### What does "Quarantined" mean? + +A quarantined test continues running in CI and uploading results to Trunk Flaky Tests, but its failures won't block your pipeline. The [Trunk Analytics CLI](/flaky-tests/uploader) checks with Trunk's backend to determine if failed tests are quarantined, then overrides the exit code for those failures. When all failures in a CI job come from quarantined tests, the entire job passes. + +**Why this matters:** You maintain complete test coverage and historical data while preventing known problematic tests from disrupting your development cycle. + +### **How tests get quarantined** + +Tests can be quarantined through two methods: + +1. **Manual Quarantine** - You explicitly select specific tests using override settings +2. **Auto-Quarantine** (when enabled) - Tests already flagged by [Trunk's flaky detection](/flaky-tests/detection) are automatically quarantined + +Tests are auto-quarantined only if detected as **flaky** or [flagged as flaky](/flaky-tests/detection/flag-as-flaky) manually. Tests with a **Broken** status are not auto-quarantined — they represent real failures that should be investigated and fixed. For [manually quarantined tests](#overriding-individual-tests), all failures are quarantined regardless of test state. + +### Enable quarantining + + +Toggling the **Enable Test Quarantining** switch makes quarantining possible but does not quarantine any tests on its own. + +A test failure will only be ignored by CI if the test is already manually quarantined, or if the test has previously been identified as flaky and the Auto-Quarantine option is enabled. + +Actively quarantining tests will significantly change CI results, as failures from quarantined tests no longer cause builds to fail. [Learn more about the effects of quarantining](#whats-affected). + + +With quarantining enabled, the Analytics Uploader will compare failed test cases against known flaky tests. If a test is known to be flaky, it will be quarantined. If all failed tests are quarantined, the exit code of the test command will be overridden to return 0 and the CI job will pass. + +#### Quarantining settings + +To enable quarantining, navigate to **Settings** > **Repositories** > repository > **Flaky Tests** > toggle **on** **Enable Test Quarantining**. + +
+ +Here's what each of these options does when enabled: + +
SettingDescription
Enable Test QuarantiningThis primary toggle activates the quarantining feature set, unlocking both manual override options and the ability to enable auto-quarantining. For any quarantining to work, the necessary configurations must also be made in your CI pipeline.
Auto-Quarantine Flaky TestsWhen enabled, any test already identified by Trunk as "flaky" will be automatically quarantined. This saves you from having to manually quarantine each flaky test as it's discovered.
+ +### **Quarantining with Sharded or Parallelized Tests** + +There are two options for handling quarantining. + +**Option 1: Wrapping each test invocation** + +Wrap each command and specify its JUnit output path. Trunk captures the exit code and automatically uploads results. + +**Example** + +```bash +# run test 1 +trunk flakytests test --org-url-slug=[org] --token=[token] --junit-paths=test1_output/*.xml -- npm run test1 +# run test 2 +trunk flakytests test --org-url-slug=[org] --token=[token] --junit-paths=test2_output/*.xml -- npm run test2 +``` + +**Option 2: Handling quarantining during upload** + +For complex setups where Trunk can’t wrap test commands, run tests first and let the upload step be the final gate. When quarantining is enabled, the upload inspects the provided JUnit results and decides whether to return exit code `0` or `1` based on the outcomes. + +**Advanced: Handling build errors outside test runs** + +To handle build issues that occur outside test runs, use the --test-process-exit-code option. This provides a fallback exit code if the upload runs without detecting any Junit results. + +**Example** + +```sh +./trunk flakytests test --junit-paths "test_output.xml" \ + --org-url-slug \ + --token $TRUNK_API_TOKEN \ + --junit-paths="**/results/*.xml" \ + --test-process-exit-code=1 + +``` + +The CLI only recognizes tests defined in JUnit. If multiple test executions occur and one fails due to a build error, Flaky Tests won’t detect it and will assume the exit code came from test failures. If those failures are quarantined, the job may incorrectly be reported as successful. To prevent this: + +* Upload results for each test execution separately, or +* Generate a JUnit that records build errors. + +### Updates in CI + +If you're using the provided [GitHub Actions workflow](/flaky-tests/get-started/ci-providers) to upload test results to Flaky Test, you can quarantine flaky tests by wrapping the test command or as a follow-up step. + +If you're using the Trunk CLI directly or other CI providers, check the instructions in the **Using The Trunk CLI Directly** tab. + + + + + +Using the Trunk Analytics Uploader Action in your GitHub Actions Workflow files, may need modifications to your workflow files to support quarantining. + +If you upload your test results as a second step after you run your tests, **you need to add** `continue-on-error: true` **on your test step so your CI** job will continue even on failures. + +Here's an example file. + + +```yaml +name: Run Tests And Upload Results +on: + workflow_dispatch: +jobs: + upload-test-results: + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - name: Run Tests + id: unit_tests + shell: bash + run: + continue-on-error: true + + - name: Upload test results + if: always() + uses: trunk-io/analytics-uploader@v1 + with: + junit-paths: + org-slug: my-trunk-org-slug + token: ${{ secrets.TRUNK_API_TOKEN }} +``` + + +If you want to run the test command and upload in a single step, the test command must be **run via the Analytics Uploader** through the `run: ` parameter. + +This will override the response code of the test command. Make sure to set `continue-on-error: false` so un-quarantined tests are blocking. + +```yaml +name: Run Tests And Upload Results +on: + workflow_dispatch: +jobs: + upload-test-results: + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Run tests and upload results + uses: trunk-io/analytics-uploader@v1 + with: + junit-paths: + run: + token: ${{ secrets.TRUNK_API_TOKEN }} + org-slug: my-trunk-org-slug +``` + + + + + +**Using Flaky Tests as a separate step** + +If you upload your test results as a second step after you run your tests, you need to ensure your test step **continues on errors** so the upload step that's run after can quarantine failed tests. When quarantining is enabled, the `flakytests upload` command will **return an error** if there are unquarantined failures and return a status code 0 if all tests are quarantined. + +```bash + || true # doesn't fail job on failure +| + ./trunk flakytests upload \ + --org-url-slug $TRUNK_ORG_SLUG \ + --token $TRUNK_API_TOKEN \ + --junit-paths $JUNIT_PATH +``` + +**Using Flaky Tests as a single step** + +You can also wrap the test command with the Trunk CLI. When wrapping the command with the Trunk CLI, if there are unquarantined tests, the command will return an error. If there are no unquarantined tests, the command will return a status code 0. + +```bash +./trunk flakytests test \ + --org-url-slug \ + --token $TRUNK_API_TOKEN \ + --junit-paths $JUNIT_PATH \ + --allow-empty-test-results \ + +``` + + + + + +### Overriding individual tests + +If you have tests that should never be quarantined or should always be quarantined regardless of their current health status, you can do this by overriding individual tests. + +

overriding

+ +You can manually control a test's quarantine status from its details page. + +* To set an override: Click the **Quarantine** (or **Override**) button, then select either Always Quarantine or Never Quarantine. +* To remove an override: Click the **Remove Override** button. + +When a manual override is active, a banner shows who set it and when. + +
SettingBehavior
Always QuarantineQuarantine a test failure even if the health status is healthy.
Never QuarantineNever quarantine failures, even if the health status is flaky, and auto-quarantining is enabled for the repo.
+ +
+ +To review a history of all quarantine changes on a test, use the **Quarantine Events** filter within the **Test History** section. This will show every override, setting change, and comment, along with the author and timestamp for each entry. + +### Tracking quarantined jobs in the dashboard + +Once quarantining is active, the **Quarantining** tab provides a central hub for monitoring its impact and effectiveness. This tab serves as a complete audit log of every CI job saved by the feature, allowing you to: + +* **Visualize Trends:** A 30-day chart shows the number of jobs quarantined per day. +* **Inspect Individual Jobs:** A detailed table lists every quarantined job. Click any entry to see the specific tests that were quarantined. +* **Isolate Critical Workflows:** Use the filter to see how quarantining impacts specific branches, such as preventing flaky failures in your Merge Queue. +* **Measure ROI:** Use the data to quantify the number of builds saved and developer time reclaimed for your organization. + +
+ +### Audit logs + +Trunk provides audit logs for all setting changes and overwrites for individual tests. You can access the audit log by navigating to **Settings** > **Repositories** **>** repository **>** **Flaky Tests** > **Audit logs** under the Enable Test Quarantining heading. + +
+ +### Quarantining API and webhooks + +For advanced use cases, you can interact with quarantining features programmatically. + +* API: Use the [Flaky Tests API](/flaky-tests/flaky-tests) to fetch a list of all currently quarantined tests in your project. +* Webhooks: Subscribe to the `test_case.quarantining_setting_changed` event to trigger automated workflows whenever a test's quarantine override is modified. Learn more about [Webhooks](https://www.svix.com/event-types/us/org_2eQPL41Ew5XSHxiXZIamIUIXg8H/#test_case.status_changed). + + +#### Service Availability and Graceful Degradation + +Trunk Analytics CLI is designed to fail safe when our quarantine service is unavailable. Read more at [Quarantine Service Availability](/flaky-tests/quarantine-service-availability) + diff --git a/flaky-tests/the-importance-of-pr-test-results.mdx b/flaky-tests/the-importance-of-pr-test-results.mdx new file mode 100644 index 0000000..cdf0f9e --- /dev/null +++ b/flaky-tests/the-importance-of-pr-test-results.mdx @@ -0,0 +1,66 @@ +--- +title: "The Importance of PR Test Results" +description: "Uploading test results from pull requests (PRs) is a critical step for enabling Trunk Flaky Tests. This data provides a primary signal for detecting flaky tests and is the key metr" +--- +Uploading test results from pull requests (PRs) is a critical step for enabling Trunk Flaky Tests. This data provides a primary signal for *detecting* flaky tests and is the key metric for *measuring* their impact. Without it, you lose the most significant source of information for identifying and prioritizing these disruptive tests. + +Here's a breakdown of the key features that depend on PR test results: + +#### Crucial Flakiness Detection + +The most common and critical signal for identifying a flaky test happens on PRs. Flakiness is detected when a test produces different results on the same git commit. + +This typically happens when: + +1. A developer opens a PR, and a test fails. +2. The developer reruns the exact same tests without changing any code. +3. The test now passes. + +This "fail then pass" sequence on the same commit is a clear indication of non-deterministic, or "flaky," behavior. Since the majority of test runs occur during the development and review cycle, PRs are the largest source of this vital signal. + +#### Measuring Test Impact + +The Flaky Tests dashboard is designed to help you prioritize which tests to fix first. The single most important metric for this is `PRs Impacted`. By default, the overview table is sorted by this metric because it's the best way to measure a flaky test's true impact on developer productivity. + +If you don't upload test results from PRs: + +* The `PRs Impacted` count for every test will be zero. +* You will have no way to determine which flaky tests are causing the most disruption. +* You lose the ability to prioritize fixes based on real-world data, potentially wasting time on less important issues. + +#### Unblocking Developers with Quarantining + +Quarantining is one of the most powerful features of Trunk Flaky Tests. Its core purpose is to prevent known flaky tests from blocking developers and breaking CI pipelines, especially merge queues. + +The entire quarantining workflow is predicated on analyzing test results from PRs. Without PR data, you cannot: + +* **Identify tests as flaky from PR test runs:** The system needs to see a test pass and fail on the same commit (a signal primarily gathered from PRs) to classify it as flaky. +* **Apply Quarantine Logic at Runtime:** Uploading a test result and checking if it should be quarantined are part of the same, single step in your CI job. When a test fails on a PR, the `Trunk Analytics CLI` uploads the failure and, in the same operation, checks with the Trunk service to see if that test is on the quarantine list. If it is, the CLI overrides the job's exit code, allowing the build to pass. Without running the `Trunk Analytics CLI` on your PR jobs, this real-time check cannot occur, and even known flaky tests will continue to block your PRs. + +#### Immediate CI Feedback and Error Summaries + +The `Trunk Analytics CLI` provides a detailed summary directly in the CI job's output log. This is the fastest, most immediate feedback a developer gets about their test run. + +Without uploading PR results, you lose: + +* A Clear Test Report Summary: A quick overview of `Total`, `Pass`, `Fail`, and `Quarantined` tests. +* In-Log Failure Details: A snippet of the stack trace and assertion error for any failed test, providing immediate context without digging through full CI logs. +* Actionable Exit Codes: The CLI intelligently determines the job's outcome. + * When a real test fails, it exits with a non-zero code: `⚠️ Some test failures were not quarantined, using exit code: 1` + * When *only* a known flaky test fails, it passes the job: `🎉 All test failures were quarantined, overriding exit code to be exit_success (0)` + +This immediate, in-CI feedback loop is invaluable for developers trying to quickly understand why their build failed. + +#### Enabling Developer Productivity Features + +Trunk Flaky Tests offers features directly within the developer workflow that depend entirely on PR data, most significantly the automated pull request comment. + +These comments provide a summary of all tests run on a specific PR, highlighting failures and indicating whether they are due to a known flaky test. This feature prevents developers from wasting time investigating a failure that is already identified as flaky. Without uploading PR test results, this valuable, time-saving context is completely lost. + +#### Next Steps: Enable PR Uploads + +Now that you understand why uploading test results from pull requests is essential, the next step is to configure your CI pipeline. This single step is the key to unlocking accurate flakiness detection, true impact measurement, and powerful features like quarantining. + +Our documentation provides step-by-step guides for all major CI providers to make this setup simple. + +[➡️ Find your CI provider and start uploading test results](/flaky-tests/get-started/ci-providers) diff --git a/flaky-tests/ticketing-integrations.mdx b/flaky-tests/ticketing-integrations.mdx new file mode 100644 index 0000000..7cacee2 --- /dev/null +++ b/flaky-tests/ticketing-integrations.mdx @@ -0,0 +1,26 @@ +--- +title: "Ticketing integrations" +description: "Triage your flaky tests faster by creating automatically assigned and labeled tickets in your ticketing system" +--- +You can integrate directly with your ticketing systems to automatically create tickets when Trunk [detects a flaky test](/flaky-tests/detection). + +### Ticket content + +Flaky Tests automatically generates tickets complete with a title and description. If you’re connected to Linear or Jira, you can also assign default issue types, teams, or assignees. + +The ticket description contains the following information: + +* Identifier of the test +* Since when has the test been labeled flaky +* The last time this test failed +* The impact when run on PRs +* The impact when run on branches +* Quarantine status +* Most common failure reasons +* Code owners, according to the [CODEOWNERS](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners) file in your repository + +### Integration setup + +Currently, Ticket Creation supports integrations with Linear and Jira. However, the automatically generated ticket content is formatted in Markdown and can be copied to other platforms like Asana or GitHub issues. + +
Linearlinear-v2.pnglinear-integration
Jirajira.pngjira-integration
diff --git a/flaky-tests/ticketing-integrations/jira-integration.mdx b/flaky-tests/ticketing-integrations/jira-integration.mdx new file mode 100644 index 0000000..7ec364c --- /dev/null +++ b/flaky-tests/ticketing-integrations/jira-integration.mdx @@ -0,0 +1,135 @@ +--- +title: "Jira integration" +description: "Triage your flaky tests faster by creating automatically assigned and labeled tickets with the Jira integration" +--- +When Trunk Flaky Tests [detects a flaky test](/flaky-tests/detection), you can create an automatically generated Jira ticket for your team to pick up and fix the test. + +Webhook payloads will also contain ticket information when a Jira ticket is created with the integration or when [existing tickets are linked](#link-existing-tickets-to-tests). + +### Connecting to Jira + +
+ +To connect a Jira Cloud project, navigate to **Settings** -> **Repositories** -> **Ticketing Integration** and select **Jira** as your Ticketing System. + +Then complete the form and click Connect to Jira Cloud with the following information. + +
Field NameDescriptionExamples
Jira URLThe URL to your Jira Cloud project.https://trunk-io.atlassian.net
Project KeyThe project key for your Jira project.KAN
EmailThe email associated with your Jira API token.johndoe@example.com
Jira API tokenCreate your Jira API token here.ATATT*****19FNY5Q
Default label(s) for new ticketsLabels applied to new Jira tickets created through Trunk Flaky Testsflaky-test, debt
+ +After connecting to Jira, you can specify a default issue type for new tickets and a default assignee for new tickets. + +#### API Token permissions + +Your Jira user account must have the following project permissions to create a Jira API token that allows Trunk to read, create, and assign tickets automatically: + +* *Create issues* +* *Assign issues* OR *Browse users and groups* (global permission) +* *Browse projects* + * If issue-level security is configured, issue-level security permissions must be granted to read issues. + +You need to create an API token with the following scopes: + +* Required scopes (classic) + * `read:jira-work` + * `write:jira-work` + * `read:jira-user` +* Required scopes (granular): + * `read:issue:jira` + * `read:issue-meta:jira` + * `read:issue-security-level:jira` + * `read:issue.vote:jira` + * `read:issue.changelog:jira` + * `read:avatar:jira` + * `read:status:jira` + * `read:user:jira` + * `read:field-configuration:jira` + * `read:application-role:jira` + * `read:group:jira` + * `read:issue-type:jira` + * `read:project:jira` + * `read:project.property:jira` + * `read:issue-type-hierarchy:jira` + * `read:project-category:jira` + * `read:project-version:jira` + * `read:project.component:jira` + * `read:permission:jira` + * `write:issue:jira` + * `write:comment:jira` + * `write:comment.property:jira` + * `write:attachment:jira` + + +Jira tokens cannot last longer than 365 days. Once the token expires, you will need to generate a new API token. + + +### Create a new ticket + +You can create a new ticket for any test listed in Trunk Flaky Tests. + +There are 2 ways to create a new ticket in the Flaky Test dashboard: + +* Click on the options menu for any test case on the repo overview dashboard + +
+ +* Use the Create ticket button in the top left corner of the [test case details](/flaky-tests/dashboard#test-case-details) page. + +Before you create the ticket, you will have a preview of the title and description. + +
+ +#### Create with Jira + +If you are connected to Jira, you can click the **Create Jira Ticket** button at the end of the modal, which will automatically create a ticket with the configured labels and assignees. + +#### Link existing tickets to tests + +If you already have a ticket in Jira that you want to link to a test in the dashboard, you can use the [Link Ticket to Test Case API](/flaky-tests#post-flaky-tests-link-ticket-to-test-case). + +### Required Custom Fields + +Some Jira projects require additional fields beyond the standard fields (like summary, description, and issue type) to be specified when creating tickets. Common required custom fields include: + +* **Components** - Categories or modules within your project +* **Affects Version** - Which version of your product is impacted +* **Fix Version** - Target version for the fix +* **Epic Link** - Parent epic for the ticket +* **Sprint** - Sprint assignment +* **Story Points** - Estimation field +* Custom fields specific to your organization + +#### Enterprise Feature + + +**Support for required custom fields is an Enterprise feature.** + + +If your Jira project requires custom fields that aren't supported in the standard Trunk Flaky Tests integration, you'll see an error message when attempting to create a ticket: + +``` +The Jira project [PROJECT_KEY] requires a field "[field_name]". +Contact sales@trunk.io to upgrade your account for custom field support. +``` + +To enable support for your required custom fields, contact our sales team at sales@trunk.io to discuss Enterprise plan options. + +#### Alternative: Remove Field Requirements + +If you don't need Enterprise features, you can modify your Jira project settings to make custom fields optional instead of required. This allows Trunk Flaky Tests to create tickets without needing to specify those fields. + +**To make a field optional in Jira:** + +1. Navigate to **Project Settings** in your Jira project +2. Select **Issue Types** from the sidebar +3. Choose the issue type you're using for flaky test tickets (e.g., Task, Bug) +4. Click **Fields** to see all fields for that issue type +5. Locate the required custom field (e.g., "Components") +6. Click the field to open its configuration +7. Uncheck **Required** or change the field requirement setting +8. Save your changes + +After making the field optional, you should be able to create tickets through Trunk Flaky Tests without encountering the error. + + +**Note:** You may need Jira Administrator permissions to modify project settings. If you don't have access, contact your Jira administrator to make these changes. + diff --git a/flaky-tests/ticketing-integrations/linear-integration.mdx b/flaky-tests/ticketing-integrations/linear-integration.mdx new file mode 100644 index 0000000..090f03e --- /dev/null +++ b/flaky-tests/ticketing-integrations/linear-integration.mdx @@ -0,0 +1,55 @@ +--- +title: "Linear integration" +description: "Triage your flaky tests faster by creating automatically assigned and labeled tickets with the Linear integration" +--- +When Trunk Flaky Tests [detects a flaky test](/flaky-tests/detection), you can create an automatically generated Linear ticket for your team to pick up and fix the test. + +Webhook payloads will also contain ticket information when a Jira ticket is created with the integration or when [existing tickets are linked](#link-existing-tickets-to-tests). + +### Connecting to Linear + +
+ +To connect a Linear project: + +1. Navigate to **Settings** > **Repositories** > **Ticketing Integration.** +2. Select **Linear** as your Ticketing System. +3. Add a [Linear API key](#api-token-permissions) +4. Select a Team and **Connect to Linear**. + +After connecting to Linear, you can specify a default project and a default assignee for new tickets. + +#### API Key permissions + +The following project permissions must be granted to your Linear API key so Trunk can read, create, and assign tickets automatically: + +* *Read* +* *Create issues* + +Selecting *Full Access* will also grant the required permissions. + +### Create a new ticket + +You can create a new ticket for any test listed in Flaky Tests. + +There are 2 ways to create a new ticket in the Flaky Test dashboard: + +* Click on the options menu for any test case on the repo overview dashboard + +
+ +* Use the Create ticket button in the top left corner of the [test case details](/flaky-tests/dashboard#test-case-details) page. + +Before you create the ticket, you get a preview of the title and description. + +
+ +#### Create with Linear + +If you are connected to Linear, you can click the **Create Linear Ticket** button at the end of the modal to automatically create a ticket with the configured team and assignees. + +Note: You can use [Flaky Tests webhooks](/flaky-tests/webhooks/linear-integration) to automate ticket creation, or if you need more control over how tickets are created in Linear. This integration is not required when using webhooks. + +#### Link existing tickets to tests + +If you already have a ticket in Linear that you want to link to a test in the dashboard, you can use the [Link Ticket to Test Case API](/flaky-tests#post-flaky-tests-link-ticket-to-test-case). diff --git a/flaky-tests/ticketing-integrations/other-ticketing-platforms.mdx b/flaky-tests/ticketing-integrations/other-ticketing-platforms.mdx new file mode 100644 index 0000000..2574219 --- /dev/null +++ b/flaky-tests/ticketing-integrations/other-ticketing-platforms.mdx @@ -0,0 +1,23 @@ +--- +title: "Other ticketing platforms" +description: "Triage your flaky tests faster by manually creating tickets from generated markdown" +--- +If you have not set up an integration, Trunk Flaky Tests can still generate a ticket title and description so you can copy and paste the details into your project management software. + +### Create a new ticket + +You can create a new ticket for any test listed in Trunk Flaky Tests. + +There are 2 ways to create a new ticket in the Flaky Test dashboard: + +* Click on the options menu for any test case on the repo overview dashboard + +
+ +* Use the Create ticket button in the top left corner of the [test case details](/flaky-tests/dashboard#test-case-details) page. + +Before you create the ticket, you will have a preview of the title and description. + +
+ +Now you can copy and paste the ticket title and description into your project management or ticketing platform. diff --git a/flaky-tests/uploader.mdx b/flaky-tests/uploader.mdx new file mode 100644 index 0000000..05dd66e --- /dev/null +++ b/flaky-tests/uploader.mdx @@ -0,0 +1,285 @@ +--- +title: "Trunk Analytics CLI" +description: "Trunk detects and tracks flaky tests in your repos by receiving uploads from your test runs in CI, uploaded from the Trunk Analytics CLI. These uploads happen in the CI jobs used t" +--- +Trunk detects and tracks flaky tests in your repos by receiving uploads from your test runs in CI, uploaded from the Trunk Analytics CLI. These uploads happen in the CI jobs used to run tests in your nightly CI, post-commit jobs, and PR checks. + +### Guides + +If you're setting up Trunk Flaky Tests for the first time, you can follow the guides for your CI provider and test framework. + +
Guides by Test Frameworksframeworks
Guides by CI Providerci-providers
+ +The CLI should be **downloaded as part of your test workflow** in your CI system. The automatic launcher is platform agnostic and will download the latest version of the uploader for your platform. + +### Manual Download + +You can find the list of releases on [the GitHub release page](https://github.com/trunk-io/analytics-cli/releases). We provide executables for Linux and OS X. It’s a single file inside a tar and upon downloading the tar you will find a single binary - `trunk-analytics-cli` to use. + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +``` + + + + + +```bash +SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +``` + + + + + +```bash +SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" +curl -fL --retry 3 \ + "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ + | tar -xz + +chmod +x trunk-analytics-cli +``` + + + + + +### Organization Slug and Token + +The CLI requires your Trunk organization slug and token passed through `--org-url-slug` and `--token` to upload results to the correct organization. They can alternatively be set via environment variables, `TRUNK_ORG_URL_SLUG` and `TRUNK_API_TOKEN`, respectively. + +You can find your organization slug and token by going to **Settings** > **Manage** > **Organization**. + + + + + +

Make sure you are getting your Organization Slug, not the Organization Name.

+ +
+ + + +

Ensure you get your Organization API Token, not your repo token.

+ +
+ +
+ +### Uploading Test Results + + +The uploaded tests are processed by Trunk periodically, not in real-time. Wait for at least an hour after the initial upload before they’re displayed in the [Uploads tab](/flaky-tests/get-started#step-3-verify-integration). Multiple uploads are required before a test can be accurately detected as flaky. + + +Trunk accepts uploads in three main report formats, [XML](https://github.com/testmoapp/junitxml), [Bazel Event Protocol JSONs](https://bazel.build/remote/bep#consuming-bep-text-json), and XCode XCResult paths. You can upload each of these test report formats using the `./trunk flakytest upload` command like this: + + + + + +Trunk can accept JUnit XMLs through the `--junit-paths` argument: + +``` +./trunk-analytics-cli upload --junit-paths "test_output.xml" \ + --org-url-slug \ + --token $TRUNK_API_TOKEN +``` + + + + + +Trunk can accept Bazel through the `--bazel-bep-path` argument: + +``` +./trunk flakytests upload --bazel-bep-path \ + --org-url-slug \ + --token $TRUNK_API_TOKEN +``` + + + + + +Trunk can accept XCode through the `--xcresult-path` argument: + +``` +./trunk flakytests upload --xcresult-path \ + --org-url-slug \ + --token $TRUNK_API_TOKEN +``` + + + + + +### Variants + +If you run the same tests across different environments or architectures, you can use variants to separate these runs into distinct test cases. This allows Trunk to detect environment-specific flakes. + +For example, a test for a mobile app might be flaky on iOS but stable on Android. Using variants, Trunk can isolate flakes on the iOS variant instead of marking the test as flaky across all environments. + +You can specify a variant during upload using the `--variant` option: + +``` +./trunk flakytests upload --junit-paths "test_output.xml" \ + --org-url-slug \ + --token $TRUNK_API_TOKEN \ + --variant ios +``` + +Variant names are displayed in brackets next to test names in your dashboard: + +

The same test, but the first is a macOS variant.

+ +### Running and Quarantining Tests + +You can also execute tests and upload results to Trunk in a single step using the `test` command to **wrap** your test command. + +This is especially useful for [Quarantining](/flaky-tests/quarantining), where the Trunk CLI will **override the exit code** of the test command if all failures can be quarantined, **preventing** flaky tests from failing your builds in CI. + + + + + +Trunk can accept JUnit XMLs through the `--junit-paths` argument: + +``` +./trunk-analytics-cli test --junit-paths "test_output.xml" \ + --org-url-slug \ + --token $TRUNK_API_TOKEN \ + +``` + + + + + +Trunk can accept Bazel through the `--bazel-bep-path` argument: + +``` +./trunk-analytics-cli test --bazel-bep-path \ + --org-url-slug \ + --token $TRUNK_API_TOKEN \ + +``` + + + + + +Trunk can accept XCode through the `--xcresult-path` argument: + +``` +./trunk-analytics-cli test --xcresult-path \ + --org-url-slug \ + --token $TRUNK_API_TOKEN \ + +``` + + + + + + +#### Service Availability and Graceful Degradation + +Trunk Analytics CLI is designed to fail safe when our quarantine service is unavailable. Read more at [Quarantine Service Availability](/flaky-tests/quarantine-service-availability) + + +#### Upload failure vs test failure + +We use the `SOFTWARE` exit code (70) if the upload fails. + +If you use the `test` command and tests fail without the failures being quarantined, we return the provided exit code from the wrapped execution. + +If you use the `upload` command, we return exit code `FAILURE` or the exit code provided with the `--test_process_exit_code` argument. + +### Validating reports locally + +You can validate the test reports produced by your test frameworks before you set up Trunk in your CI jobs. This is currently **only available for XML reports**. + +You can run the validate command like this: + +``` +./trunk-analytics-cli validate --junit-paths "test_output.xml" +``` + +The `validate` command will output any problems with your reports so you can address them before setting up Trunk in CI. + +```sh +Validating the following 1 files: + File set matching junit.xml: + junit.xml + +junit.xml - 1 test suites, 0 test cases, 0 validation errors + +All 1 files are valid! ✅ +Navigate to https://app.trunk.io/onboarding?intent=flaky+tests to continue using Trunk Flaky Tests! 🚀🧪 +``` + +### Using custom CI systems + +The CLI is preconfigured to work with a set [ci-providers](/flaky-tests/get-started/ci-providers "mention") but can be used with any CI system by passing [#environment-variables](/flaky-tests/get-started/ci-providers/otherci#environment-variables "mention") to the uploader. + +> More information on using [otherci](/flaky-tests/get-started/ci-providers/otherci "mention") is documented here. + +### Full command reference + +The `trunk` command-line tool can upload and analyze test results. The `trunk-analytics-cli` command accepts the following subcommands: + +| Command | Description | +| ------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `trunk-analytics-cli upload` | Upload data to Trunk Flaky Tests. | +| `trunk-analytics-cli validate` | Validates if the provided JUnit XML files and prints any errors. | +| `trunk-analytics-cli test ` | Runs tests using the provided command, uploads results, checks whether the failures are [quarantined](/flaky-tests/quarantining) tests, and correct the exit code based on that. | + +The `upload` and `test` commands accept the following options: + +
ArgumentDescription
--junit-paths <JUNIT_PATHS>Path to the test output files. File globs are supported. Remember to wrap globs in "" quotes
--bazel-bep-path <BEP_JSON_PATH>Path to a JSON serialized Bazel Build Event Protocol. Trunk will use the BEP file to locate test reports. Your test frameworks must still output compatible report formats.
--xcresult-path <XCRESULT_PATH>Path to a .xcresult directory, which contains test reports from xcodebuild.
--org-url-slug <ORG_URL_SLUG>Trunk Organization slug, from the Settings page.
--token <TOKEN>Trunk Organization (not repo) token, from the Settings page. Defaults to the TRUNK_API_TOKEN variable.
-h, --helpAdditional detailed description of the upload command.
--repo-rootPath to the repository root. Defaults to the current directory.
--repo-url <REPO_URL>Value to override URL of repository. Optional.
--repo-head-sha <REPO_HEAD_SHA>Value to override SHA of repository head. Optional.
--repo-head-branch <REPO_HEAD_BRANCH>Value to override branch of repository head. Optional.
--repo-head-commit-epoch <REPO_HEAD_COMMIT_EPOCH>Value to override commit epoch of repository head. Optional.
--codeowners-path <CODEOWNERS_PATH>Value to override CODEOWNERS file or directory path. Optional.
--allow-empty-test-resultsDon't fail commands if test results are empty or missing. Use it when you sometimes skip all tests for certain CI jobs. Defaults to true.
--variant <VARIANT_NAME>Upload tests to a specific variant group. Optional.
--test-process-exit-code <EXIT_CODE>Specify the exit code of the test previously run. This is used by the upload command to identify errors that happen outside of the context of the test execution (such as build errors).
+ + +**Memory Overhead** + +Running tests via `trunk-analytics-cli test` adds negligible memory overhead. + +This subcommand is a thin wrapper around your existing test command and doesn't modify or parallelize test execution. + +During execution, it simply: + +* Runs your provided test command directly. +* Records start and end times. +* Captures the exit code for quarantine decisions. + +\ +You can safely run the CLI even with large or memory-intensive suites, without risking additional OOMs in your CI agents. + diff --git a/flaky-tests/use-mcp-server.mdx b/flaky-tests/use-mcp-server.mdx new file mode 100644 index 0000000..312c9dc --- /dev/null +++ b/flaky-tests/use-mcp-server.mdx @@ -0,0 +1,32 @@ +--- +title: "Use MCP Server" +description: "Leverage the power of CI Autopilot from your IDE, or the AI application of your choosing" +--- +CI Autopilot comes with a [Model Context Protocol (MCP)](https://modelcontextprotocol.io/docs/getting-started/intro) server. AI applications like Claude Code or Cursor can use MCP servers to connect to data sources, tools, and workflows - enabling them to access key information and perform tasks. + +### Supported AI applications + +The following applications are currently supported: Cursor, Claude Code, Gemini CLI, and GitHub Copilot. + + +Gemini Code Assist and Windsurf are not supported due to their limited support for MCP servers + + +### API + +Our MCP server is available at `https://mcp.trunk.io/mcp` and exposes the following tools: + +
ToolCapability
fix-flaky-testExperimental: Retrieve insights around a failing/flaky test
setup-trunk-uploadsExperimental: Create a setup plan to upload test results
+ +### Authorization + +The Trunk MCP server supports the OAuth 2.0 + OpenID Connect standard for MCP authorization. + +### Get started + +**To get started, configure your AI application to communicate with Trunk's MCP server:** + +* [Cursor](/flaky-tests/use-mcp-server/configuration/cursor-ide) +* [GitHub Copilot](/flaky-tests/use-mcp-server/configuration/github-copilot-ide) +* [Claude Code CLI](/flaky-tests/use-mcp-server/configuration/claude-code-cli) +* [Gemini CLI](/flaky-tests/use-mcp-server/configuration/gemini-cli) diff --git a/flaky-tests/use-mcp-server/configuration.mdx b/flaky-tests/use-mcp-server/configuration.mdx new file mode 100644 index 0000000..96ca2df --- /dev/null +++ b/flaky-tests/use-mcp-server/configuration.mdx @@ -0,0 +1,5 @@ +--- +title: "Configuration" +description: "Cover imageClaude Codeclaude.pngclaude-code-cliCursorcursor.pngcursor-ideGitHub Copilotgithub copilot.pnggithub-copilot-ideGeminigemini.pnggemini-cli" +--- +
Cover image
Claude Codeclaude.pngclaude-code-cli
Cursorcursor.pngcursor-ide
GitHub Copilotgithub copilot.pnggithub-copilot-ide
Geminigemini.pnggemini-cli
diff --git a/flaky-tests/use-mcp-server/configuration/claude-code-cli.mdx b/flaky-tests/use-mcp-server/configuration/claude-code-cli.mdx new file mode 100644 index 0000000..74fa8d1 --- /dev/null +++ b/flaky-tests/use-mcp-server/configuration/claude-code-cli.mdx @@ -0,0 +1,56 @@ +--- +title: "Claude Code (CLI)" +description: "Add Trunk's MCP Server to Claude Code" +--- +### CLI setup + +Run the following command to add the MCP server configuration. If you want to only enable it for yourself, use `--scope user` instead. + +```bash +claude mcp add --transport http trunk https://mcp.trunk.io/mcp --scope project +``` + +Once completed, reopen Claude Code. + +### Alternative: Update MCP configuration + +Add the following [configuration](https://docs.anthropic.com/en/docs/claude-code/mcp) to your project's `.mcp.json` file. + +```json +{ + "mcpServers": { + "trunk": { + "url": "https://mcp.trunk.io/mcp", + "type": "http" + } + } +} +``` + +### Authentication + +After the MCP server was added to Claude Code, users need to authorize to communicate with the server. Follow these steps to complete auth. + +**Step 1: Start Claude Code CLI** + +In your terminal, run `claude` . + +**Step 2: Run the mcp command** + +Claude Code should recognize that auth is required. Run `/mcp` to authenticate, select trunk, and hit Enter: + +
+ +**Step 3: Login & authorize** + +A new webpage will be opened. Log in with your Trunk account and follow the instructions to authorize Claude Code to communicate with the MCP server. + +**Step 4: Confirm** + +Follow instructions to get back to Claude Code. A confirmation should be shown: + +``` +Authentication successful. Connected to trunk. +``` + +**With auth completed, Claude Code will be able to fetch the tools exposed by Trunk's MCP server.** diff --git a/flaky-tests/use-mcp-server/configuration/cursor-ide.mdx b/flaky-tests/use-mcp-server/configuration/cursor-ide.mdx new file mode 100644 index 0000000..fbfbfc1 --- /dev/null +++ b/flaky-tests/use-mcp-server/configuration/cursor-ide.mdx @@ -0,0 +1,51 @@ +--- +title: "Cursor (IDE)" +description: "Add Trunk's MCP Server to Cursor" +--- +### One-click setup + +Use the "Add to Cursor" action to add the Trunk MCP server: + +

Add trunk MCP server to Cursor

+ +Once clicked, follow instructions to open the MCP configuration in Cursor. A new settings window to confirm the installation of the MCP server will be shown. Click on "Install" to proceed. + +
+ +### Alternative: Update MCP configuration + +Add the following [configuration](https://docs.cursor.com/en/context/mcp#model-context-protocol-mcp) to your project's `.cursor/mcp.json` file. If you want to enable it only for yourself, add it to `~/.cursor/mcp.json` instead. + +```json +{ + "mcpServers": { + "trunk": { + "url": "https://mcp.trunk.io/mcp" + } + } +} +``` + +### Authentication + +After the MCP server was added to Cursor, users need to authorize Cursor to communicate with the server. Follow these steps to complete auth. + +**Step 1: Open MCP Settings** + +Run `CMD+Shift+P` to open the command palette and choose `View: Open MCP Settings` + +**Step 2: Enable the Trunk MCP server** + +A "Needs authentication" status will be shown: + +
+ +**Step 3: Login & authorize** + +A new webpage will be opened. Login with your Trunk account and follow insturctions to authorize Cursor to communicate with the MCP server. + +**Step 4: Confirm** + +Follow instructions to get back to Cursor. With auth completed, Cursor will be able to fetch the tools exposed by Trunk's MCP server: + +
diff --git a/flaky-tests/use-mcp-server/configuration/gemini-cli.mdx b/flaky-tests/use-mcp-server/configuration/gemini-cli.mdx new file mode 100644 index 0000000..0e5a4c3 --- /dev/null +++ b/flaky-tests/use-mcp-server/configuration/gemini-cli.mdx @@ -0,0 +1,59 @@ +--- +title: "Gemini (CLI)" +description: "Add Trunk's MCP Server to Gemini" +--- +### CLI setup + +Run the following command to add the MCP server configuration. If you want to only enable it for yourself, use `--scope user` instead. + +```bash +gemini mcp add --transport http trunk https://mcp.trunk.io/mcp --scope project +``` + +Once completed, reopen Gemini. + +### Alternative: Update MCP configuration + +Add the following [configuration](https://github.com/google-gemini/gemini-cli/blob/v0.1.19/docs/tools/mcp-server#oauth-support-for-remote-mcp-servers) to your project's `.gemini/settings.json` file. + +```json +{ + "mcpServers": { + "trunk": { + "httpUrl": "https://mcp.trunk.io/mcp" + } + } +} +``` + +### Authentication + +After the MCP server was added to Gemini, users need to authorize to communicate with the server. Follow these steps to complete auth. + +**Step 1: Start Gemini CLI** + +In your terminal, run `gemini` . + +**Step 2: Run the mcp auth command** + +Run `/mcp auth trunk` to initiate the authentication and authorization flow. + +**Step 3: Login & authorize** + +A new webpage will be opened. Log in with your Trunk account and follow the instructions to authorize Gemini to communicate with the MCP server. + +**Step 4: Confirm** + +Follow instructions to get back to Gemini. A confirmation should be shown: + +``` +ℹ✅ Successfully authenticated with MCP server 'trunk'! + + +ℹRe-discovering tools from 'trunk'... + + +ℹSuccessfully authenticated and refreshed tools for 'trunk'. +``` + +**With auth completed, Gemini will be able to fetch the tools exposed by Trunk's MCP server.** diff --git a/flaky-tests/use-mcp-server/configuration/github-copilot-ide.mdx b/flaky-tests/use-mcp-server/configuration/github-copilot-ide.mdx new file mode 100644 index 0000000..0aa44a0 --- /dev/null +++ b/flaky-tests/use-mcp-server/configuration/github-copilot-ide.mdx @@ -0,0 +1,62 @@ +--- +title: "GitHub Copilot (IDE)" +description: "Add Trunk's MCP Server to Github Copilot" +--- +## One-click setup + +Use the "Add to VS Code" action to add the Trunk MCP server + +

Add trunk MCP server to VS Code

+ +### Command Palette setup + +Run `CMD+Shift+P` to open the Command Palette and choose `MCP: Add Server`. Choose `HTTP` and input `https://mcp.trunk.io/mcp`. Set the name to `trunk`. + +A new window will open to confirm the MCP configuration. It should show: + +```json +{ + "servers": { + "trunk": { + "url": "https://mcp.trunk.io/mcp", + "type": "http" + } + }, + "inputs": [] +} +``` + +### Alternative: Update MCP configuration + +Add the following [configuration](https://code.visualstudio.com/docs/copilot/chat/mcp-servers) to your project's `.vscode/mcp.json` file. + +```json +{ + "mcpServers": { + "trunk": { + "url": "https://mcp.trunk.io/mcp", + "type": "http" + } + } +} +``` + +### Authentication + +After the MCP server was added to Cursor, users need to authorize Cursor to communicate with the server. Follow these steps to complete auth. + +**Step 1: Start MCP server** + +Run `CMD+Shift+P` to open the Command Palette and choose `MCP: List Servers`. Choose `trunk` and select `Start Server` to authenticate. + +**Step 2: Login & authorize** + +A new webpage will be opened. Login with your Trunk account and follow insturctions to authorize GitHub Copilot to communicate with the MCP server. + +**Step 3: Confirm** + +Follow instructions to get back to GitHub Copilot. With auth completed, GitHub Copilot will be able to fetch the tools exposed by Trunk's MCP server. + +``` +2025-09-10 12:49:16.975 [info] Discovered 2 tools +``` diff --git a/flaky-tests/use-mcp-server/mcp-tool-reference.mdx b/flaky-tests/use-mcp-server/mcp-tool-reference.mdx new file mode 100644 index 0000000..139cb89 --- /dev/null +++ b/flaky-tests/use-mcp-server/mcp-tool-reference.mdx @@ -0,0 +1,6 @@ +--- +title: "MCP Tool Reference" +description: "- Get root cause analysis: MCP tool reference: fix-flaky-test - Set up test uploads: MCP tool reference: setup-trunk-uploads" +--- +- [Get root cause analysis](/flaky-tests/use-mcp-server/mcp-tool-reference/get-root-cause-analysis): MCP tool reference: fix-flaky-test +- [Set up test uploads](/flaky-tests/use-mcp-server/mcp-tool-reference/set-up-test-uploads): MCP tool reference: setup-trunk-uploads diff --git a/flaky-tests/use-mcp-server/mcp-tool-reference/get-root-cause-analysis.mdx b/flaky-tests/use-mcp-server/mcp-tool-reference/get-root-cause-analysis.mdx new file mode 100644 index 0000000..6d05822 --- /dev/null +++ b/flaky-tests/use-mcp-server/mcp-tool-reference/get-root-cause-analysis.mdx @@ -0,0 +1,70 @@ +--- +title: "Get root cause analysis" +description: "MCP tool reference: fix-flaky-test" +--- +### Overview + +The `fix-flaky-test` tool retrieves insights and historical failure analysis about a flaky test. This tool allows AI assistants to access investigation results and apply fixes directly in your development environment. + +\ +**Return Type:** Structured analysis data with fix recommendations. Structure: issue, root cause, proposed fix + +### Parameters + +#### Required Parameters + +| Parameter | Type | Description | +| ---------- | ------ | --------------------------------------------------------------- | +| `repoName` | string | Repository name in `owner/repo` format (e.g., `trunk-io/trunk`) | + +#### Optional Parameters + +| Parameter | Type | Description | +| --------- | ------ | ---------------------------------------------------------------------- | +| `fixId` | string | Specific fix identifier from CI Autopilot comment (e.g., `FIX-abc123`) | +| `orgSlug` | string | The name of your organization in the Trunk app | + +### Getting Parameter Values + +If your AI assistant doesn't have direct access to Git information, use these commands: + +**Get repository name:** + +```bash +git remote -v +``` + +Look for the repository name in the output (e.g., `trunk-io/trunk` from `git@github.com:trunk-io/trunk.git`) + +### Usage Examples + +#### With Fix ID + +``` +Fix the flaky test with ID +``` + +### Sample Response + +``` +Fix Flaky Tests Insight for + +Issue: The CI failure occurred during the "Run Mysql Migrations" step due to a ValidationException from AWS Secrets Manager. + +Root Cause: The SECRET_NAME being used to retrieve the secret value is malformed. The grep -oP "adminsecret.*" command is extracting the secret name along with surrounding JSON formatting (like quotes), which creates an invalid secret ID when passed to aws secretsmanager get-secret-value. + +Proposed Fix: Replace the problematic grep command with a proper JSON parser: + +- SECRET_NAME=$(aws secretsmanager list-secrets --filters Key=name,Values=adminsecret | grep Name | grep -oP "adminsecret.*") ++ SECRET_NAME=$(aws secretsmanager list-secrets --filters Key=name,Values=adminsecret | jq -r '.SecretList[0].Name') + +This fix is located in .github/actions/setup-k8s-and-migrate/action.yml at line 11. +``` + +### Error Handling + +| Error | Cause | Resolution | +| ------------------------------ | --------------------------------------------- | --------------------------------------------------------- | +| `Fix {fixId} not found` | Invalid or non-existent fix ID | Verify the fix ID from the original CI Autopilot comment | +| `fixId must be provided` | Missing required query parameter | Fix ID is required | +| Repository authorization error | Insufficient permissions or invalid repo name | Verify repository name format and your access permissions | diff --git a/flaky-tests/use-mcp-server/mcp-tool-reference/set-up-test-uploads.mdx b/flaky-tests/use-mcp-server/mcp-tool-reference/set-up-test-uploads.mdx new file mode 100644 index 0000000..7dc8c31 --- /dev/null +++ b/flaky-tests/use-mcp-server/mcp-tool-reference/set-up-test-uploads.mdx @@ -0,0 +1,123 @@ +--- +title: "Set up test uploads" +description: "MCP tool reference: setup-trunk-uploads" +--- +### Overview + +The `setup-trunk-uploads` tool helps configure test result uploads to Trunk for flaky test detection and enhanced CI Autopilot analysis. This tool provides step-by-step instructions tailored to your specific test framework and CI provider combination. + +The tool guides you through a 4-step process: + +* [ ] **Configure Test Framework** - Modify your test configuration to output JUnit XML reports +* [ ] **Run Tests** - Execute at least one test to generate reports +* [ ] **Test Upload** - Manually upload a test report to verify connectivity +* [ ] **Configure CI** - Set up automated uploads in your CI pipeline + +\ +**Return Type:** Structured setup plan to generate test reports and upload to Trunk. Structure: project analysis and setup plan + +### Parameters + + +This agent needs to be called **once per test framework** used in your repository. If your repository uses multiple test frameworks (e.g., Jest for frontend, pytest for backend), call this tool once for each framework with the same `ci_provider`. + + +#### Required Parameters + +| Parameter | Type | Description | +| --------------- | ------ | -------------------------------------------------------------------------------------------------------------------- | +| `testFramework` | string | The test framework used in your repository (e.g., `jest`, `pytest`, `mocha`) | +| `ciProvider` | string | Your CI provider (e.g., `github`, `circleci`) | +| `orgSlug` | string | Your organization slug. If not provided and you belong to multiple organizations, you'll be prompted to specify one. | + +### Supported values + +#### Test Frameworks + +* `android` - Android testing framework +* `bazel` - Bazel test runner +* `cypress` - Cypress end-to-end testing +* `gotestsum` - Go testing with gotestsum +* `jasmine` - Jasmine testing framework +* `jest` - Jest testing framework +* `karma` - Karma test runner +* `maven` - Maven Surefire/Failsafe testing +* `minitest` - Ruby minitest framework +* `mocha` - Mocha testing framework +* `phpunit` - PHPUnit testing framework +* `playwright` - Playwright testing framework +* `pytest` - Python pytest framework +* `rspec` - Ruby RSpec testing framework +* `rust` - Rust testing with cargo test +* `swift-testing` - Swift Testing framework +* `vitest` - Vitest testing framework +* `xctest` - Xcode XCTest framework + +#### CI Providers + +* `buildkite` - Buildkite pipelines +* `circleci` - CircleCI pipelines +* `drone` - Drone CI +* `github` - GitHub Actions +* `gitlab` - GitLab CI/CD +* `semaphore` - Semaphore CI +* `travis` - Travis CI +* `other` - Other CI providers (manual configuration) + +### Usage examples + +#### Basic setup + +``` +Use the setup-trunk-uploads tool with testFramework="jest" and ciProvider="github" +``` + +#### With Organization Slug + +``` +Use the setup-trunk-uploads tool with testFramework="pytest", ciProvider="circleci", and orgSlug="my-company" +``` + +#### Multiple Test Frameworks + +``` +Use the setup-trunk-uploads tool with testFramework="jest" and ciProvider="github" +Use the setup-trunk-uploads tool with testFramework="playwright" and ciProvider="github" +``` + +### Sample response + +The tool returns detailed setup instructions as plain text: + +``` +Project Analysis +- Test Framework: Vitest (detected from package.json and vitest.config.mts) +- CI Provider: GitHub Actions (detected from repository URL) +- Repository: agraebe/ci-autopilot-sample + +Setup Plan +To enable flaky test uploads to Trunk, you'll need to complete these 4 steps: + +1. Configure Vitest to output JUnit reports +Update your vitest.config.mts to include the JUnit reporter that will generate XML test reports. + +2. Run tests with the new configuration +Execute your tests to generate the JUnit XML report. + +3. Send a test upload to Trunk +Run a command to upload your first test results to Trunk using your API token. + +4. Configure GitHub Actions +Add a step to your GitHub Actions workflow to automatically upload test results on every CI run. +``` + +### Error handling + +| Error | Cause | Resolution | +| ------------------------------------------ | --------------------------------------------- | ------------------------------------------------------ | +| `Test framework is required` | `testFramework` parameter missing | Provide a supported test framework from the list above | +| `CI provider is required` | `ciProvider` parameter missing | Provide a supported CI provider from the list above | +| `User is not authenticated` | Missing or invalid authentication | Ensure you're properly authenticated with Trunk | +| `User is not a member of any organization` | No organization access | Create or join a Trunk organization | +| `No organizations found` | No accessible organizations | Create an organization in the Trunk app | +| Multiple organizations note | User belongs to multiple orgs, none specified | Provide explicit `orgSlug` parameter | diff --git a/flaky-tests/webhooks.mdx b/flaky-tests/webhooks.mdx new file mode 100644 index 0000000..d496550 --- /dev/null +++ b/flaky-tests/webhooks.mdx @@ -0,0 +1,19 @@ +--- +title: "Webhooks" +description: "Use webhooks to automate custom flaky test workflows" +--- +Trunk provides webhooks for you to build custom integrations to automate workflows, like notifying your team when a test becomes flaky or automatically creating tickets to investigate flaky tests. Trunk already provides a Jira integration, and more are planned. Webhooks lets you build custom integrations for use cases that are not supported out of the box. + +[Svix](https://docs.svix.com/) powers webhooks for Trunk. You'll be using Svix to configure webhooks and you should familiarize yourself with the [Svix App Portal docs](https://docs.svix.com/app-portal) to learn more. + +### Supported Events + +Trunk lets you create custom workflows with **event-triggered webhooks**. Flaky Test events are named with a `test_case` prefix. You can find all the events that Trunk supports in the event catalog: + + +Open the referenced resource in a new tab. + + +You can also find guides for specific examples here: + +
Send a Slack Messageslack-integration
Create a GitHub Issuegithub-issues-integration
Send a Microsoft Teams Messagemicrosoft-teams-integration
Create a Linear Issuelinear-integration
diff --git a/flaky-tests/webhooks/github-issues-integration.mdx b/flaky-tests/webhooks/github-issues-integration.mdx new file mode 100644 index 0000000..369a510 --- /dev/null +++ b/flaky-tests/webhooks/github-issues-integration.mdx @@ -0,0 +1,209 @@ +--- +title: "GitHub Issues integration" +description: "Learn how to automatically create GitHub Issues with Flaky Test webhooks" +--- +Trunk allows you to automate GitHub Issue creation through webhooks. This will allow you to create GitHub issues and auto-assign them to [CODEOWNERS](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners) using Webhooks. + +

GitHub Issue created automatically with webhooks.

+ +This guide will walk you through integrating Trunk Flaky Tests with GitHub Issues through webhooks. You will be able to automatically generate GitHub issues for new flaky tests. This guide should take 15 minutes to complete. + +### 1. Create a GitHub Token + +Before you can create a webhook to automate GitHub Issue creation, you need to create an API token to authorize your requests. + +1. Navigate to GitHub Developer Settings under **Settings** > **Developer settings** +2. Under **Personal access token** > **Fine-grained tokens** > Click **Generate new token** +3. Name the token something like `Trunk Flaky Tests` so you can recognize your token and set it never to expire. +4. Select the repositories you need to create issues to under **Repository access** +5. Under **Permissions** > **Repository Permissions**, select **Read and Write** access for **Issues.** + +
+6. Click **Generate Token** and copy your API token. + +### 2. Add a new webhook + +Trunk uses Svix to integrate with other services, such as GitHub Issues through webhooks. + +You can create a new endpoint by: + +1. Login to [Trunk Flaky Tests](https://app.trunk.io/login?intent=flaky%20tests) +2. From your profile on the top right, navigate to **Settings** +3. Under **Organization > Webhooks**, click **Automate GitHub Issue Creation** + +
+4. Paste your GitHub repo's Issues endpoint into **Endpoint URL.** Your **Endpoint URL** should be formatted as: `https://api.github.com/repos/{OWNER}/{REPO}/issues`. You can verify the URL by visiting it in your browser, such as https://api.github.com/repos/trunk-io/docs/issues. +5. Review the transformation code automatically generated for GitHub issues. You can customize this transformation at any time. Learn more about [customizing transformations](#id-5.-customize-your-transformation). +6. Create the new endpoint. You will be redirected to the endpoint configuration view. + +If you're having trouble adding a new webhook endpoint with Svix, please see the [Adding Endpoint docs from Svix](https://docs.svix.com/receiving/using-app-portal/adding-endpoints). + +### **3. Add custom headers** + +The GitHub Issues API requires some custom headers. You can configure custom headers in the endpoint configuration: + +1. You can add custom headers under **Webhooks > Advanced > Custom Headers.** +2. Fill in the **Key** and **Value** referencing the table below, and click the **+** button to add each header. + +You'll need to configure the following headers. + +| Key | Value | +| ---------------------- | ----------------------------- | +| `Accept` | `application/vnd.github+json` | +| `Authorization` | `Bearer ` | +| `X-GitHub-Api-Version` | `2022-11-28` | + +### 4. Customize your transformation + +Transformations are custom code snippets you can write to customize the GitHub issues created by the webhook. A working template transformation will be added automatically for your webhook, but you can further customize the behavior of this webhook. + +1. In the endpoint configuration view, navigate to the **Advanced** tab. Under **Transformation**, toggle the **Enabled** switch. +2. Click **Edit transformation** to update your transformation code, and click **Save** to update the transformation. +3. You can test the transformation by selecting the `test_case.status_changed` payload and clicking **Run Test**. This will test the transformation but not send a message. You will learn to send a test message in [step 5](#id-5.-test-your-webhook). + +The generated webhook template contains several configurable constants out of the box: + +
ConstantDescription
GITHUB_ISSUE_LABEL_IDS(Optional) GitHub labels that will be assigned to issues created by Trunk.
PRS_IMPACTED_THRESHOLDIssues will be created only for flaky tests that have impacted more PRs than the PRS_IMPACTED_THRESHOLD.

You can adjust this value if you see many issues about low-impact flaky tests.
+ +Here is the provided transformation for context. You can customize your GitHub Issues integration by following the [GitHub](https://docs.github.com/en/rest/issues/issues?apiVersion=2022-11-28#create-an-issue) and [Svix transformations](https://docs.svix.com/transformations#using-transformations) documentation. + + +The default transformation only creates issues when `newStatus === "flaky"`. If you also want to create issues for tests marked as **Broken** (consistently failing at a high rate), update the filter condition. For example, change `newStatus !== "flaky"` to `newStatus !== "flaky" && newStatus !== "broken"` to handle both statuses. + + +```javascript +/** + * @param webhook the webhook object + * @param webhook.method destination method. Allowed values: "POST", "PUT" + * @param webhook.url current destination address + * @param webhook.eventType current webhook Event Type + * @param webhook.payload JSON payload + * @param webhook.cancel whether to cancel dispatch of the given webhook + */ +// IDs of any labels you want added to the GitHub issue. +const GITHUB_ISSUE_LABEL_IDS = []; + +// Below are various configs to fine-tune when an issue is created. + +// At least this many PRs need to be impacted for an issue to be created. +const PRS_IMPACTED_THRESHOLD = 2; + +function handler(webhook) { + const impacted_prs = webhook.payload.test_case.pull_requests_impacted_last_7d; + const newStatus = webhook.payload.status_change.current_status.value; + + // Filter for only flaky tests that impact more than the provided threshold + if (newStatus !== "flaky" || impacted_prs < PRS_IMPACTED_THRESHOLD) { + webhook.payload = "canceled"; + webhook.cancel = true; + return webhook; + } + webhook.payload = { + "title":`Flaky Test: ${webhook.payload.test_case.name.substring(0, 25)} transitioned to ${webhook.payload.status_change.current_status.value}`, + "body": summarizeTestCase(webhook.payload), + "labels": GITHUB_ISSUE_LABEL_IDS, + // Uncomment this function for auto asignment + // "assignees": webhook.payload.test_case.codeowners.map((assignee)=>{ + // // Strip the `@` symbol from codeowners + // return assignee.slice(1) + // }) + } + return webhook +} + +function summarizeTestCase(payload) { + const { + status_change: { + previous_status + }, + test_case: { + name, + file_path, + status, + quarantine, + repository, + codeowners, + failure_rate_last_7d, + most_common_failures, + pull_requests_impacted_last_7d, + ticket, + html_url + } + } = payload; + // Construct a comprehensive issue body with key details + const issueBody = `See all details on the [Trunk Test Detail page](${html_url}) + +Transition time: ${status.timestamp} + +Latest failure: Dec 9, 2024 + +Severity (last 7 days): ${(failure_rate_last_7d * 100).toFixed(2)}% failure rate; impacting ${pull_requests_impacted_last_7d} PRs + +Ownership: this test is owned by ${(codeowners || ['@unassigned']).join(', ')} + +___ +__The most common failure reason (out of ${most_common_failures.length} identified failure reason) are:__ + +${ + // most_common_failures is a beta feature currently being tested + // If you are not on the beta it will be an empty array + // Want to try it out? Ask in slack.trunk.io + most_common_failures.map((failure, index) => { + return `**Reason #${index + 1}**: "${failure.summary}" \n` + }) +} + ` + return issueBody +} +``` + +#### Automatically Assign Issues with CODEOWNERS + +If you have [CODEOWNERS](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners) configured for your GitHub repo, you can create issues with assignees using CODEOWNERS.\ +\ +You can uncomment the code block on lines 31-35 or use a snippet similar to: + +```javascript +"assignees": webhook.payload.test_case.codeowners.map((assignee)=>{ + // Strip the `@` symbol from codeowners + return assignee.slice(1) + }) +``` + + +**Limitations of CODEOWNERS** + +1. CODEOWNERS supports assigning files to teams, but GitHub doesn't support assigning issues to teams. **If you have team owners in your CODEOWNERS file, the requests will fail**. +2. If your code owners do not map 1:1 with GitHub users, you will need to provide your own mapping, or webhooks will fail. +3. The example payload provided for testing has the CODEOWNERS assigned to `@backend`. If you're testing following the instructions in [step 5](#id-5.-test-your-webhook), the delivery attempt can fail. + + +### 5. Test your webhook + +You can create test issues by delivering a mock webhook. You can do this by: + +1. In the endpoint configuration view, navigate to the **Testing** tab and select a **Send event** +2. Under **Subscribed events,** select `test_case.status_changed`as the event type to send. +3. Click **Send Example** to test your webhook + +### 6. Monitoring webhooks + +You can monitor the events and the webhook's delivery logs in the **Overview** tab of an endpoint configuration view. + +You can see an overview of how many webhook deliveries have been attempted, how many are successful, how many are in flight, and how many fail in the **Attempt Delivery Status** modal. + +
+ +You can see a list of past delivery attempts in the **Message Attempts** modal. You can filter this list by **Succeeded** and **Failed** status, and you can click on each message to see the **Message content**, response code, and error message of each attempt. You can learn more about [replaying messages](https://docs.svix.com/receiving/using-app-portal/replaying-messages) and [filtering logs](https://docs.svix.com/receiving/using-app-portal/filtering-logs) in the Svix docs. + +
+ +### Congratulations! + +A GitHub Issue will now be created when a test's health status changes. You can further modify your transformation script to customize your issues. + +[See the Trunk webhook event catalog](https://www.svix.com/event-types/us/org_2eQPL41Ew5XSHxiXZIamIUIXg8H/#test_case.status_changed) + +[Learn more about consuming webhooks in the Svix docs](https://docs.svix.com/receiving/introduction) + +[Learn more about the GitHub Issues API](https://docs.github.com/en/rest/issues/issues?apiVersion=2022-11-28#create-an-issue) diff --git a/flaky-tests/webhooks/linear-integration.mdx b/flaky-tests/webhooks/linear-integration.mdx new file mode 100644 index 0000000..49637c4 --- /dev/null +++ b/flaky-tests/webhooks/linear-integration.mdx @@ -0,0 +1,347 @@ +--- +title: "Linear integration" +description: "Learn how to automatically create Linear issues with Flaky Test webhooks" +--- +Trunk allows you to automate Linear Issue creation through webhooks. This will allow you to create Linear issues and auto-assign according to [CODEOWNERS](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners). + +
+ +This guide will walk you through integrating Trunk Flaky Tests with Linear Issues through webhooks. You will be able to automatically generate Linear issues for **new flaky tests** found in your repo that **impact more than 2 PRs**. This guide should take 15 minutes to complete. + +Trunk also has a [built-in Linear integration](/flaky-tests/webhooks/linear-integration) for ticket creation. You only need to use webhooks if you want to automate ticket creation or need additional customization. + +### 1. Create a Linear Personal Access Token + +Before you can create a webhook to automate GitHub Issue creation, you need to create an API token to authorize your requests. + +1. In the Linear app, navigate to settings by holding `G` and pressing `S`, or by clicking on your profile on the top left and clicking **Settings**. +2. Under **Account** > **Security & Access** > **Personal API Keys**, Click **New API Key** to create a new access token. +3. Copy the new API key and save it in a secure location. You'll need to use this later. + +### 2. Add a new webhook in Trunk + +Trunk uses Svix to integrate with other services, such as creating Linear Issues through webhooks. + +You can create a new endpoint by: + +1. Login to [Trunk Flaky Tests](https://app.trunk.io/login?intent=flaky%20tests) +2. From your profile on the top right, navigate to **Settings** +3. Under **Organization > Webhooks**, click **Automate Linear Issues Creation.** + +
+4. Paste the Linear GraphQL API endpoint into **Endpoint URL**, which is: `https://api.linear.app/graphql`. +5. Review the transformation code automatically generated for Linear issues, you can customize this transformation at any time. Learn more about [customizing transformations](#id-5.-customize-your-transformation). +6. Create the new endpoint. You will be redirected to the endpoint configuration view. + +If you're having trouble adding a new webhook endpoint with Svix, please see the [Adding Endpoint docs from Svix](https://docs.svix.com/receiving/using-app-portal/adding-endpoints). + +### **3. Add custom headers** + +The Linear GraphQL API requires some custom headers. You can configure custom headers in the endpoint configuration: + +1. You can add custom headers under **Webhooks > Advanced > Custom Headers.** +2. Fill in the **Key** and **Value** referencing the table below, and click the **+** button to add each header. + +You'll need to configure the following headers. + +| Key | Value | +| --------------- | ------------------ | +| `Authorization` | `` | + +### 4. Find your Linear Team, Project, and Label IDs + +You need to find your Linear team, project, and label IDs to create issues with the appropriate labeling. You can do this by querying your Linear project using cURL. + +#### Team ID + +First, you'll need to find your team ID so you can create Linear issues under the correct team. You can make a request in your terminal using cURL, or a similar tool. + +You'll need your Linear API key from [step 1](#id-1.-create-a-linear-personal-access-token). + +```bash +curl \ + -X POST \ + -H "Content-Type: application/json" \ + -H "Authorization: " \ + --data '{ + "query": "query Teams { teams { nodes { id name } }}" + }' \ + https://api.linear.app/graphql +``` + +You will receive a response that contains your team UID, for example: + +```json +{ + "data": { + "teams": { + "nodes": [ + { + "id": "9bd0672b-7766-4a7c-3233-8ce37fdbb790", + "name": "Your Linear Team" + } + ] + } + } +} +``` + +#### Project ID + +If you want to create issues under a specific project, you'll need to find its project ID. You can use a query like this: + +```bash +curl \ + -X POST \ + -H "Content-Type: application/json" \ + -H "Authorization: lin_api_vw3gMdb2NJN9TQ66JCgBKLqNSNY6I8cH5qxwM6EW" \ + --data '{ + "query": "query Projects { projects { nodes { id name } }}" + }' \ + https://api.linear.app/graphql +``` + +You'll receive a response that contains your projects and their IDs, for example: + +```json +{ + "data": { + "projects": { + "nodes": [ + { + "id": "ef19b35e-ce4f-4132-9705-811d4d6c8c08", + "name": "Flaky Tests" + } + ] + } + } +} +``` + +#### Label ID + +If you want to create issues with a specific label, you'll need to find its label ID. You can use a query like this: + +```bash +curl \ + -X POST \ + -H "Content-Type: application/json" \ + -H "Authorization: lin_api_vw3gMdb2NJN9TQ66JCgBKLqNSNY6I8cH5qxwM6EW" \ + --data '{ + "query": "query OrgLabels { organization { labels { nodes { id name } } }}" + }' \ + https://api.linear.app/graphql +``` + +You'll receive a response that contains your labels and their IDs, for example: + +```json +{ + "data": { + "organization": { + "labels": { + "nodes": [ + { + "id": "e0e9f98e-c90c-40cd-939e-06ff7bd57b45", + "name": "Feature" + }, + { + "id": "ce07d3bd-dee8-4bf6-979e-778dd94f15af", + "name": "Bug" + }, + { + "id": "536dd774-dc33-4e70-aecc-8b00d1f04a9d", + "name": "Improvement" + }, + ... + ] + } + } + } +} +``` + +### 5. Customize your transformation + +Transformations are custom code snippets you can write to customize the Linear issues created by the webhook. A working template transformation will be added automatically for your webhook, but you can further customize the behavior of this webhook. + +1. In the endpoint configuration view, navigate to the **Advanced** tab. Under **Transformation**, toggle the **Enabled** switch. +2. Click **Edit transformation** to update your transformation code, and click **Save** to update the transformation. +3. You can test the transformation by selecting the `test_case.status_changed` payload and clicking **Run Test**. This will test the transformation but not send a message. You will learn to send a test message[ in step 6](#id-6.-test-your-webhook). + +The generated webhook template contains several configurable constants out of the box: + +
ConstantDescription
LINEAR_TEAM_ID(Required) Your Linear team ID. Learn about finding your team ID.
LINEAR_PROJECT_ID(Optional) The Linear project ID assigned to new issues. Learn more about finding your project ID.
LINEAR_LABEL_IDS(Optional) Array of label IDs assigned to new issues. Learn about finding your label IDs.
PRS_IMPACTED_THRESHOLDIssues will be created only for flaky tests that have impacted more PRs than the PRS_IMPACTED_THRESHOLD.

You can adjust this value if you see many issues about low-impact flaky tests.
+ +Here is the provided transformation for context. You can customize your Linear Issues integration by following the[ Linear API](https://studio.apollographql.com/public/Linear-API/variant/current/schema/reference) and [Svix transformations](https://docs.svix.com/transformations#using-transformations) documentation. + + +The default transformation only creates issues when `newStatus === "flaky"`. If you also want to create issues for tests marked as **Broken** (consistently failing at a high rate), update the filter condition. For example, change `newStatus !== "flaky"` to `newStatus !== "flaky" && newStatus !== "broken"` to handle both statuses. + + +```javascript +/** + * @param webhook the webhook object + * @param webhook.method destination method. Allowed values: "POST", "PUT" + * @param webhook.url current destination address + * @param webhook.eventType current webhook Event Type + * @param webhook.payload JSON payload + * @param webhook.cancel whether to cancel dispatch of the given webhook + */ + +// Your Linear Team ID from step 3 above. This is required! +const LINEAR_TEAM_ID = ""; +// The Linear project ID you want issues assigned to from step 3 above. Optional. +const LINEAR_PROJECT_ID = ""; +// IDs of any labels you want added to the linear issue. Optional. +const LINEAR_LABEL_IDS = []; + +// Below are various configs to fine-tune when an issue is created. + +// At least many PRs need to be impacted for an issue to be created. +const PRS_IMPACTED_THRESHOLD = 2; + +function handler(webhook) { + const impacted_prs = webhook.payload.test_case.pull_requests_impacted_last_7d; + const newStatus = webhook.payload.status_change.current_status.value; + + const resolvedProjectId = LINEAR_PROJECT_ID ? `"${LINEAR_PROJECT_ID}"` : undefined; + const resolvedLinearLabels = LINEAR_LABEL_IDS.map((id) => `"${id}"`).join(","); + + // Filter for only flaky tests that impact more than the provided threshold + if (newStatus !== "flaky" || impacted_prs < PRS_IMPACTED_THRESHOLD) { + webhook.payload = "canceled"; + webhook.cancel = true; + return webhook; + } + + const description = summarizeTestCase(webhook.payload); + + // modify the webhook object... + webhook.payload = {query: `mutation IssueCreate { + issueCreate( + input: { + title: "Flaky Test: ${webhook.payload.test_case.name}" + description: """${description}""" + teamId: "${LINEAR_TEAM_ID}" + projectId: ${resolvedProjectId} + labelIds: [${resolvedLinearLabels}] + } + ) { + success + issue { + id + title + } + } + } ` }; + return webhook; +} + +function summarizeTestCase(payload) { + const { + status_change: { + previous_status + }, + test_case: { + name, + file_path, + status, + quarantine, + repository, + codeowners, + failure_rate_last_7d, + most_common_failures, + pull_requests_impacted_last_7d, + ticket, + html_url + } + } = payload; + // Construct a comprehensive issue body with key details + const issueBody = `See all details on the [Trunk Test Detail page](${html_url}) + +Transition time: ${status.timestamp} + +Latest failure: Dec 9, 2024 + +Severity (last 7 days): ${(failure_rate_last_7d * 100).toFixed(2)}% failure rate; impacting ${pull_requests_impacted_last_7d} PRs + +Ownership: this test is owned by ${(codeowners || ['@unassigned']).join(', ')} + +___ +__The most common failure reason (out of ${most_common_failures.length} identified failure reason) are:__ + +${ + // most_common_failures is a beta feature currently being tested + // If you are not on the beta it will be an empty array + // Want to try it out? Ask in slack.trunk.io + most_common_failures.map((failure, index) => { + return `**Reason #${index + 1}**: "${failure.summary}" \n` + }) +} + +View the full stack trace on the [Test Detail page](${html_url}) + ` + return issueBody +} +``` + +#### (Optional) Automatic issue assignment + +If you have CODEOWNERS configured in your repo, it will be reported by Trunk in the webhook payload. You can use this to map different CODEOWNERS to Linear assignees. You can access CODEOWNERS in the payload like this: `webhook.payload.test_case.codeowners`. + +Since the way your owners map to your Linear user is unique to your team, you'll need to provide your own mapping to convert code owners to their **Linear ID**. + +You can modify your issue create payload like this to include an assignee: + +```javascript +webhook.payload = {query: `mutation IssueCreate { + issueCreate( + input: { + title: "Flaky Test: ${webhook.payload.test_case.name}" + description: """${description}""" + teamId: "" + projectId: "" + labelIds: [""] + // Add you assignee here: + assigneeId: "" + } + ) { + success + issue { + id + title + } + } + } ` }; +``` + +### 6. Test your webhook + +You can create test issues by delivering a mock webhook. You can do this by: + +1. In the endpoint configuration view, navigate to the **Testing** tab and select a **Send event** +2. Under **Subscribed events,** select `test_case.status_changed`as the event type to send +3. Click **Send Example** to test your webhook + +### 7. Monitoring webhooks + +You can monitor the events and the webhook's delivery logs in the **Overview** tab of an endpoint configuration view. + +You can see an overview of how many webhook deliveries have been attempted, how many are successful, how many are in flight, and how many fail in the **Attempt Delivery Status** modal. + +
+ +You can see a list of past delivery attempts in the **Message Attempts** modal. You can filter this list by **Succeeded** and **Failed** status, and you can click on each message to see the **Message content**, response code, and error message of each attempt. You can learn more about [replaying messages](https://docs.svix.com/receiving/using-app-portal/replaying-messages) and [filtering logs](https://docs.svix.com/receiving/using-app-portal/filtering-logs) in the Svix docs. + +
+ +### Congratulations! + +A Linear Issue will now be created when a test's health status changes to **flaky** and **impacts more than 2 PRs**. You can further modify your transformation script to customize your issues. + +[See the Trunk webhook event catalog](https://www.svix.com/event-types/us/org_2eQPL41Ew5XSHxiXZIamIUIXg8H/#test_case.status_changed) + +[Learn more about consuming webhooks in the Svix docs](https://docs.svix.com/receiving/introduction) + +[Learn more about Linear's API](https://developers.linear.app/docs/graphql/working-with-the-graphql-api) diff --git a/flaky-tests/webhooks/microsoft-teams-integration.mdx b/flaky-tests/webhooks/microsoft-teams-integration.mdx new file mode 100644 index 0000000..b2ec6ec --- /dev/null +++ b/flaky-tests/webhooks/microsoft-teams-integration.mdx @@ -0,0 +1,228 @@ +--- +title: "Microsoft Teams integration" +description: "Trunk allows you to create custom workflows to send customized messages to Microsoft Teams channels through webhooks." +--- +Trunk allows you to create custom workflows to send customized messages to Microsoft Teams channels through webhooks. + +
+ +This guide will walk you through sending Microsoft Teams messages using event-triggered webhooks. By the end of this tutorial, you'll receive Microsoft Teams messages for test status changes. This guide should take 10 minutes to complete. + +### 1. Configure incoming webhooks for your team + +Microsoft has two different concepts for accepting incoming webhooks. **Connectors** that are being deprecated and **Workflows** that are for newly created teams. This guide is for the newer **Workflows**. The workflow for configuring webhooks is similar, but you may see small differences. You can find more about the soon to be deprecated connectors in [Microsoft's documentation](https://learn.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook?tabs=newteams%2Cdotnet). + +1. Open the team in which you want to add the webhook and select the kebab menu (•••) from the upper-right corner. +2. Select **Workflows** from the dropdown menu. +3. Search for `Post to a channel when a webhook request is received`, select the workflow, then click **Next**. +4. Configure the workflow's **Microsoft Teams Team** and **Microsoft Teams Channel**, then click **Add workflow**. +5. When the workflow is added correctly, you can copy the URL displayed, then click **Done.** + +### 2. Add a new webhook + +Trunk uses Svix to integrate with other services, such as Microsoft Teams messages through webhooks. + +You can create a new endpoint by: + +1. Login to [Trunk Flaky Tests](https://app.trunk.io/login?intent=flaky%20tests) +2. From your profile on the top right, navigate to **Settings** +3. Under **Organization > Webhooks**, click **Teams** + +
+4. Paste your Microsoft Teams Workflow URL from [the previous step ](#id-1.-configure-incoming-webhooks-for-your-team)into **Endpoint URL**. +5. Review the transformation code automatically generated for Teams messages. You can customize this transformation at any time. Learn more about [customizing transformations](#id-3.-customize-your-transformation). +6. Create the new endpoint. You will be redirected to the endpoint configuration view. + +### 3. Customize your transformation + +Transformations are custom code snippets you can write to customize the Microsoft Teams messages created by the webhook. A working template transformation will be added automatically for your webhook, but you can further customize the behavior of this webhook. + +1. In the endpoint configuration view, navigate to the **Advanced** tab. Under **Transformation**, toggle the **Enabled** switch. +2. Click **Edit transformation** to update your transformation code, and click **Save** to update the transformation. +3. You can test the transformation by selecting the `test_case.status_changed` payload and clicking **Run Test**. This will test the transformation but not send a message. You will learn to send a test message in [step 4](#id-4.-test-your-webhook). + +Below is an example of a webhook transformation to format the messages as [Actionable Messages](https://learn.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/connectors-using?tabs=cURL%2Ctext1). If you're having trouble adding a new webhook endpoint with Svix, please see the [Adding Endpoint docs from Svix](https://docs.svix.com/receiving/using-app-portal/adding-endpoints). + +```javascript +/** + * @param webhook the webhook object + * @param webhook.method destination method. Allowed values: "POST", "PUT" + * @param webhook.url current destination address + * @param webhook.eventType current webhook Event Type + * @param webhook.payload JSON payload + * @param webhook.cancel whether to cancel dispatch of the given webhook + */ +function handler(webhook) { + // See https://learn.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/connectors-using#send-adaptive-cards-using-an-incoming-webhook + webhook.payload = summarizeTestCase(webhook.payload); + + return webhook; +} + +function summarizeTestCase(payload) { + if (!payload || typeof payload !== 'object' || !payload.test_case) { + return { + type: "message", + attachments: [{ + contentType: "application/vnd.microsoft.card.adaptive", + contentUrl: null, + content: { + $schema: "http://adaptivecards.io/schemas/adaptive-card.json", + type: "AdaptiveCard", + version: "1.2", + body: [{ + type: "TextBlock", + text: "Error: Invalid or missing payload received by Trunk Flaky Test Webhook Transformation.", + color: "attention" + }] + } + }] + }; + } + + const { + test_case: { + name = "N/A", + file_path = "N/A", + status = {}, + quarantine = false, + repository = {}, + codeowners = [], + failure_rate_last_7d = 0, + most_common_failures = [], + pull_requests_impacted_last_7d = 0, + ticket = {}, + html_url = "N/A" + } + } = payload; + + const statusTimestamp = status.timestamp + ? new Date(status.timestamp).toLocaleString() + : "Unknown"; + + // most_common_failures is a beta feature currently being tested + // If you are not on the beta it will be an empty array + // Want to try it out? Ask in slack.trunk.io + const failureBlocks = most_common_failures.map(failure => ({ + type: "TextBlock", + text: `• ${failure.summary}`, + wrap: true, + spacing: "small" + })); + + return { + type: "message", + attachments: [{ + contentType: "application/vnd.microsoft.card.adaptive", + contentUrl: null, + content: { + $schema: "http://adaptivecards.io/schemas/adaptive-card.json", + type: "AdaptiveCard", + version: "1.2", + body: [ + { + type: "TextBlock", + text: name, + size: "large", + weight: "bolder" + }, + { + type: "TextBlock", + text: file_path, + isSubtle: true, + spacing: "none" + }, + { + type: "FactSet", + facts: [ + { + title: "Status", + value: `${status.value || "Unknown"} (${status.reason?.trim() || "N/A"})` + }, + { + title: "Last Updated", + value: statusTimestamp + }, + { + title: "Quarantine Status", + value: quarantine ? "Quarantined" : "Not Quarantined" + }, + { + title: "Failure Rate (7d)", + value: `${(failure_rate_last_7d * 100).toFixed(1)}%` + }, + { + title: "PRs Impacted (7d)", + value: pull_requests_impacted_last_7d.toString() + }, + { + title: "Codeowners", + value: codeowners.join(", ") || "None" + } + ] + }, + { + type: "TextBlock", + text: "Most Common Failures", + weight: "bolder", + spacing: "medium" + }, + ...failureBlocks, + { + type: "ActionSet", + actions: [ + { + type: "Action.OpenUrl", + title: "View Repository", + url: repository.html_url || "#" + }, + { + type: "Action.OpenUrl", + title: "View Test Details", + url: html_url || "#" + }, + { + type: "Action.OpenUrl", + title: "View Related Ticket", + url: ticket.html_url || "#" + } + ] + } + ] + } + }] + }; +} +``` + +### 4. Test your webhook + +You can send test messages to your Microsoft Teams channels as you make updates. You can do this by: + +1. In the endpoint configuration view, navigate to the **Testing** tab and select a **Send event** +2. Under **Subscribed events,** select `test_case.status_changed`as the event type to send. +3. Click **Send Example** to test your webhook + +### 5. Monitoring webhooks + +You can monitor the events and the webhook's delivery logs in the **Overview** tab of an endpoint configuration view. + +You can see an overview of how many webhook deliveries have been attempted, how many are successful, how many are in flight, and how many fail in the **Attempt Delivery Status** modal. + +
+ +You can see a list of past delivery attempts in the **Message Attempts** modal. You can filter this list by **Succeeded** and **Failed** status, and you can click on each message to see the **Message content**, response code, and error message of each attempt. You can learn more about [replaying messages](https://docs.svix.com/receiving/using-app-portal/replaying-messages) and [filtering logs](https://docs.svix.com/receiving/using-app-portal/filtering-logs) in the Svix docs. + +
+ +### Congratulations! + +You should now receive notifications in your Teams channel when a test's status changes. You can further modify your transformation script to customize your messages. + +
+ +[See the Trunk webhook event catalog](https://www.svix.com/event-types/us/org_2eQPL41Ew5XSHxiXZIamIUIXg8H/#test_case.status_changed) + +[Learn more about consuming webhooks in the Svix docs](https://docs.svix.com/receiving/introduction) + +[Learn more about Microsoft Teams Workflow Webhooks](https://support.microsoft.com/en-us/office/create-incoming-webhooks-with-workflows-for-microsoft-teams-8ae491c7-0394-4861-ba59-055e33f75498) diff --git a/flaky-tests/webhooks/slack-integration.mdx b/flaky-tests/webhooks/slack-integration.mdx new file mode 100644 index 0000000..4499bdf --- /dev/null +++ b/flaky-tests/webhooks/slack-integration.mdx @@ -0,0 +1,162 @@ +--- +title: "Slack integration" +description: "Learn how to use flaky test webhooks to power Slack notifications" +--- +Trunk allows you to create custom workflows to send customized messages to Slack through webhooks. + +
+ +This guide will walk you through sending Slack messages using event-triggered webhooks. By the end of this tutorial, you'll receive Slack messages for test status changes. This guide should take 10 minutes to complete. + +### 1. Configure Slack webhooks + +Trunk uses Svix to integrate with other services, such as Slack, through webhooks. + +You can add the new Slack Webhook URL to Svix by following these steps: + +1. Login to [Trunk Flaky Tests](https://app.trunk.io/login?intent=flaky%20tests) +2. From your profile on the top right, navigate to **Settings** +3. Under **Organization > Webhooks**, click **Slack** + +
+4. Click **Connect to Slack** and select the server and channel to connect to. + +
+5. Review the transformation code automatically generated for GitHub issues. You can customize this transformation at any time. Learn more about [customizing transformations](#id-2.-customize-your-transformation). +6. By default, this connection will send messages about Trunk Merge and Flaky Tests events. If you only want Flaky Test events, unselect all events other than `test_case.status_changed`. +7. Create the new endpoint. You will be redirected to the endpoint configuration view. + +If you're having trouble adding a new webhook endpoint with Svix, please see the [Adding Endpoint docs from Svix](https://docs.svix.com/receiving/using-app-portal/adding-endpoints). + +### 2. Customize your transformation + +Transformations are custom code snippets you can write to customize the Slack messages sent by the webhook. A working template transformation will be added automatically for your webhook, but you can further customize the behavior of this webhook. + +1. In the endpoint configuration view, navigate to the **Advanced** tab. Under **Transformation**, toggle the **Enabled** switch. +2. Click **Edit transformation** to update your transformation code, and click **Save** to update the transformation. +3. You can test the transformation by selecting the `test_case.status_changed` payload and clicking **Run Test**. This will test the transformation but not send a message. You will learn to send a test message in [step 3](#id-3.-test-your-webhook). + +An example transformation script is provided below and you can customize your Slack integration by following the [Slack](https://api.slack.com/messaging/webhooks) and [Svix transformations](https://docs.svix.com/transformations#using-transformations) documentation. + +```javascript +/** + * @param webhook the webhook object + * @param webhook.method destination method. Allowed values: "POST", "PUT" + * @param webhook.url current destination address + * @param webhook.eventType current webhook Event Type + * @param webhook.payload JSON payload + * @param webhook.cancel whether to cancel dispatch of the given webhook + */ +function handler(webhook) { + const payload = summarizeTestCase(webhook.payload) + + webhook.payload = payload + return webhook +} + +function summarizeTestCase(payload) { + if (!payload || typeof payload !== 'object' || !payload.test_case) { + return "Error: Invalid or missing payload."; + } + + const { + test_case: { + name = "N/A", + file_path = "N/A", + status = {}, + quarantine = false, + repository = {}, + codeowners = [], + failure_rate_last_7d = 0, + most_common_failures = [], + pull_requests_impacted_last_7d = 0, + ticket = {}, + html_url = "N/A" + } + } = payload; + + const statusSummary = `Status: ${status.value || "Unknown"} ` + + `(Reason: ${status.reason?.trim() || "N/A"}, ` + + `Updated: ${status.timestamp ? new Date(status.timestamp).toLocaleString() : "Unknown"})`; + + const quarantineStatus = quarantine + ? "This test is currently quarantined." + : "This test is not quarantined."; + + // most_common_failures is a beta feature currently being tested + // If you are not on the beta it will be an empty array + // Want to try it out? Ask in slack.trunk.io + const failureSummary = most_common_failures.map(failure => + `- ${failure.summary} (${failure.occurrence_count || 0} occurrences, ` + + `last seen: ${failure.last_occurrence ? new Date(failure.last_occurrence).toLocaleString() : "Unknown"})` + ).join("\n"); + + const repoLink = `Repository: ${repository.html_url || "N/A"}`; + const testLink = `Test Details: ${html_url}`; + const ticketLink = `Related Ticket: ${ticket.html_url || "N/A"}`; + const ownerSummary = `Codeowners: \`${codeowners.join(", ") || "None"}\``; + const statsSummary = `Failure rate (last 7 days): ${(failure_rate_last_7d * 100).toFixed(1)}% ` + + `| PRs Impacted: ${pull_requests_impacted_last_7d}`; + + return { + blocks: [ + { + type: "header", + text: { + type: "plain_text", + text: `Test Name: ${name}` + } + }, + { + type: "section", + text: { + type: "mrkdwn", + text: [ + `File Path: \`${file_path}\``, + statusSummary, + quarantineStatus, + `Most Common Failures:\n${failureSummary}`, + ownerSummary, + statsSummary, + repoLink, + testLink, + ticketLink + ].join("\n"), + }, + }, + ], + }; +}; +``` + +### 3. Test your webhook + +You can send test messages to your Slack channels as you make updates. You can do this by: + +1. In the endpoint configuration view, navigate to the **Testing** tab and select a **Send event** +2. Under **Subscribed events,** select `test_case.status_changed`as the event type to send. +3. Click **Send Example** to test your webhook + +### 4. Monitoring webhooks + +You can monitor the events and the webhook's delivery logs in the **Overview** tab of an endpoint configuration view. + +You can see an overview of how many webhook deliveries have been attempted, how many are successful, how many are in flight, and how many fail in the **Attempt Delivery Status** modal. + +
+ +You can see a list of past delivery attempts in the **Message Attempts** modal. You can filter this list by **Succeeded** and **Failed** status, and you can click on each message to see the **Message content**, response code, and error message of each attempt. You can learn more about [replaying messages](https://docs.svix.com/receiving/using-app-portal/replaying-messages) and [filtering logs](https://docs.svix.com/receiving/using-app-portal/filtering-logs) in the Svix docs. + +
+ +### Congratulations! + +
+ +You should now receive notifications in your Slack workspace when a test's status changes. You can further modify your transformation script to customize your messages. + +[See the Trunk webhook event catalog](https://www.svix.com/event-types/us/org_2eQPL41Ew5XSHxiXZIamIUIXg8H/#test_case.status_changed) + +[Learn more about consuming webhooks in the Svix docs](https://docs.svix.com/receiving/introduction) + +[Learn more about the Slack API](https://api.slack.com/messaging/webhooks) diff --git a/introduction.mdx b/introduction.mdx new file mode 100644 index 0000000..5705aef --- /dev/null +++ b/introduction.mdx @@ -0,0 +1,263 @@ +--- +title: "Home" +description: "Ship software as fast as AI writes it" +mode: "custom" +--- + +
+
+
+

+ Ship software as fast as AI writes it +

+

+ AI generates code at machine speed, but code review, CI, and delivery + still move at human pace. Trunk closes that gap with flaky test + management and a merge queue built for high-throughput teams. +

+ +
+
+
+ +
+

+ Quick Start +

+

+ Most teams schedule a 30-minute call before integrating. You can also + explore the platform on your own in four steps. +

+ + + Sign up in the [Trunk app](https://app.trunk.io/signup) and create an + organization for your team. + + + Follow [Account + Setup](/setup-and-administration/connecting-to-trunk) to create your + workspace and install the Trunk GitHub App with the right repository + access. + + + Start with the [Merge Queue getting + started guide](/merge-queue/getting-started) or the [Flaky Tests getting + started guide](/flaky-tests/get-started), depending on whether queue + depth or CI noise hurts more today. + + + Use your API token to query Trunk programmatically. + + ```bash + curl -X POST https://api.trunk.io/v1/getQueue \ + -H "Content-Type: application/json" \ + -H "x-api-token: $TRUNK_API_TOKEN" \ + -d '{ + "repo": { + "host": "github.com", + "owner": "my-org", + "name": "my-repo" + }, + "targetBranch": "main" + }' + ``` + + + +
+ +
+

+ Product Guides +

+

+ Trunk ships two products that work independently or together to keep + delivery fast and reliable. +

+ + + Detect, quarantine, and fix flaky tests with branch-aware analysis, + failure-type tracking, PR comments, and ticketing integrations. + + + Run a parallel merge queue with impacted targets, batching, bisection, + priority handling, and anti-flake protection. + + +
+ +
+

+ Platform Highlights +

+

+ The platform is built to work with the tools you already use while removing + the bottlenecks that slow delivery down at scale. +

+ + + Route independent pull requests through separate lanes instead of forcing + every change through one serialized line. + + + Keep flaky tests visible while preventing known failure modes from + blocking CI and backing up your queue. + + + Integrate Trunk with your own automation through REST APIs, webhooks, CLI + workflows, and the MCP server. + + +
+ +
+

+ Resources +

+

+ Dive deeper into setup, administration, integrations, and reference material + once your first workflow is live. +

+ + + Create your organization, manage access, review GitHub App permissions, + and understand billing, security, and support. + + + Explore Trunk REST APIs, webhook payloads, and auth patterns for custom + integrations. + + + Generate compatible test output across frameworks like Jest, Pytest, + XCTest, Cypress, Playwright, RSpec, JUnit, and more. + + + Connect Trunk's MCP server to Cursor, Claude Code, GitHub Copilot, or + Gemini for CI-assisted root cause analysis and automation. + + +
+ +
+

+ Support +

+

+ Get help with onboarding, evaluation, and ongoing rollout through the + channels your team already uses. +

+ + + Talk through your CI architecture, security review, and rollout plan with + the Trunk team. + + + Ask questions, share feedback, and get help from other engineers using + Trunk. + + + Review support availability, response expectations, and enterprise support + details. + + +
+ +
+
+
+
+

+ Ready to ship faster? +

+

+ Start with the guides, explore the APIs, or schedule time with the team + if you want help planning your first rollout. +

+
+ +
+
+
diff --git a/logo/dark.svg b/logo/dark.svg new file mode 100644 index 0000000..a299b17 --- /dev/null +++ b/logo/dark.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/logo/light.svg b/logo/light.svg new file mode 100644 index 0000000..530585c --- /dev/null +++ b/logo/light.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/merge-queue/administration.mdx b/merge-queue/administration.mdx new file mode 100644 index 0000000..95432aa --- /dev/null +++ b/merge-queue/administration.mdx @@ -0,0 +1,23 @@ +--- +title: "Administration" +description: "Configuration, integrations, and analytics for queue administrators." +--- +These pages are for repository administrators and platform engineers who configure and maintain the merge queue. Use this section to set up integrations, adjust queue behavior, and track performance metrics across your team. + +### Configuration + +[**Settings and configurations**](/merge-queue/administration/advanced-settings)\ +Manage queue behavior, GitHub integration, CI/CD configuration, and user preferences. + +### Integrations + +[**Slack integration**](/merge-queue/integration-for-slack)\ +Send real-time queue notifications to Slack channels. + +[**Webhooks**](/merge-queue/webhooks)\ +Integrate with external tools via HTTP webhooks for custom automation. + +### Analytics + +[**Metrics and monitoring**](/merge-queue/administration/metrics)\ +Track queue performance, identify bottlenecks, and measure optimization impact. diff --git a/merge-queue/administration/advanced-settings.mdx b/merge-queue/administration/advanced-settings.mdx new file mode 100644 index 0000000..899249b --- /dev/null +++ b/merge-queue/administration/advanced-settings.mdx @@ -0,0 +1,286 @@ +--- +title: "Settings and configurations" +description: "Explanation of settings for states, timeouts, concurrency, and branch protection." +--- +All of the following settings are specific to individual Merge Queues and can be accessed in two ways: + +* From the **Settings** menu: Navigate to `Settings > Repositories > Repo-Name > Merge Queue` +* From the **Merge Queue** tab: Select your repository, then click the **Settings** tab + + +Note that you must be an Organization admin to adjust any of these settings. + + +--- + +## Merge Queue state + +You can change the state of your Merge Queue to control whether new PRs can enter the queue and whether tested PRs will merge. PRs already testing will always complete their tests regardless of state. Below are the possible states: + +| State | Will PRs Enter the Queue? | Will PRs Merge After Testing? | Example use case | +| --- | --- | --- | --- | +| `Running` | Yes | Yes | Everyday merging: protect your mainline and merges successful PRs. | +| `Paused` | No | No | CI failure recovery: stop merges and testing in the queue until failure is resolved. | +| `Draining` | No | Yes | Code freeze: merge PRs currently in the queue but don't start testing additional PRs. | + +**Note:** The Merge Queue may automatically enter a `Switching Modes` state, which functions exactly like `Draining`. This occurs when you switch the queue mode while PRs are still being tested. + +### When to change merge queue state? + +The `Running` state is the default state of your merge queue, and will be the normal, day-to-day state of your queue. + +`Paused` is useful for CI incident response and failure recovery. For example, if there is a test infrastructure outage, a queue can be `Paused` until recovery is complete. The ordering of PRs in the queue is preserved, but no PRs are tested or merged. + +`Draining` is useful for managing events like code freezes. PRs currently in the queue will be tested and merged, but no new PRs will start testing. + +--- + +## Merge Queue mode + +> Merge Queues operate in one of two modes, **Single** (default) or [**Parallel**](/merge-queue/optimizations/parallel-queues)**.** + +**Single Queue** processes all pull requests in one line, testing each PR predictively against all changes ahead of it. Multiple PRs can be tested and merged simultaneously based on your [Testing Concurrency](#testing-concurrency) and [Batching](#batching) settings. + +**Parallel Queues** dynamically creates multiple independent testing lanes based on each PR's impacted targets (the parts of the codebase it changes). PRs affecting different parts of the code can be tested in separate lanes, reducing wait times for repositories with distinct, independently-testable components. + +**Requirements for Parallel mode:** + +* Requires configuring a workflow to calculate and upload impacted targets for each PR +* The queue will wait for impacted targets before processing PRs + +Read more about [Trunk's implementation of Parallel merge queues](/merge-queue/optimizations/parallel-queues), supported build systems ([Bazel](/flaky-tests/get-started/frameworks/bazel), [Nx](/merge-queue/optimizations/parallel-queues/nx), or [custom AP](/merge-queue/optimizations/parallel-queues/api)I), and [what impacted targets are](/merge-queue/optimizations/parallel-queues#what-are-impacted-targets). + +--- + +## Merge Method + +Choose how your PRs get merged into the target branch. Options are Squash (default), Merge Commit, or Rebase. + +
+ +### Available Methods + +**Squash** (default) + +* Combines all commits from the PR into a single commit on the target branch +* Creates a clean, linear history with one commit per feature +* The commit message is generated from the PR's title and description +* Best for: Teams that prefer a clean history with one commit per logical change + +**Merge Commit** + +* Preserves all individual commits from the PR +* Creates an additional merge commit to mark the integration +* Maintains complete commit history from feature branches +* Best for: Teams that want to preserve detailed development history and commit attribution + +**Rebase** + +* Replays all commits from the PR on top of the target branch +* Creates a linear history without merge commits +* Each commit from the PR appears individually in the target branch's history +* Best for: Teams that want a linear history while preserving individual commits + +### Changing the Merge Method + +You can change your merge method at any time: + +1. Navigate to **Merge Queue** tab **>** repository **> Settings** +2. Find the **Merge Method** dropdown +3. Select your preferred method: Squash, Merge Commit, or Rebase +4. The new method will apply to all PRs merged after the change + + +**Note:** Changing the merge method only affects future merges. PRs already merged will retain their original merge method. + + +### Considerations + +* **Commit History Style**: Choose the method that matches your team's Git workflow preferences +* **Traceability**: Merge commits and rebase preserve more commit-level detail than squash +* **Repository Size**: Squash merging can help keep repository history more concise +* **Existing Workflows**: Match your existing GitHub merge button preferences for consistency across your team + +The merge method is configured per repository, so different repositories in your organization can use different methods based on their needs. + +--- + +## Testing concurrency + +> Testing concurrency can be set to any value, options are **5 (average)**, **25 (high)**, **50 (very high),** and **Custom**. + +Configure how many PRs may be tested in parallel. A larger number may increase throughput since more PRs are tested in parallel, but at the expense of CI since more jobs are running in parallel. When the queue is at capacity, PRs will still be submitted to it, but they will not begin testing until a PR leaves the queue. + + +If your testing workload contains some flaky tests, a deeper queue (i.e., a higher concurrency) may struggle. Running Merge in Parallel mode can help with this, as it will reduce the average depth of your merge queue since all PRs won't be queued directly behind each other. + + +For example, assuming a concurrency of 3: + +* At 12:00, Alice submits PR 1000 to the Merge Queue, and it starts testing. +* At 12:05, Bob submits PR 888 to the Merge Queue, and it starts testing. +* At 12:10, Charlie submits PR 777 to the Merge Queue, and it starts testing. +* At 12:15, Alice submits PR 1001 to the Merge Queue. Tests do not start because the Merge Queue is at its concurrency limit. + +--- + +## Timeout for tests to complete + +> Select the number of hours from the dropdown, default is **5 hours**. + +Configure how long a PR's test can run before auto-cancelling while testing in the Merge Queue. If a long-running test is detected, Merge will automatically cancel the test. + +For example, assuming a timeout of 4 hours: + +* At 3:00, Bob submits PR 456 to the Merge Queue. +* At 3:05, PR 456 starts testing using Bob's CI system. +* At 7:05, Trunk cancels PR 456 since PR 456 is still testing. + +--- + +## Required Status Checks + +> Configure which CI status checks must pass before a PR can merge through the queue. + +By default, Trunk infers required status checks from your GitHub branch protection rules. You can override this by configuring required statuses directly in the Trunk UI, giving you independent control over which checks gate the merge queue. + +**When to configure in Trunk:** + +* **Different checks for the queue** - Your branch protection requires checks that shouldn't gate the merge queue (e.g., code coverage reports, deployment previews) +* **Stricter queue requirements** - You want the merge queue to require additional checks beyond what branch protection enforces +* **Multiple queues** - Each queue can have its own set of required statuses + +### How to configure + +1. Navigate to **Settings** > **Repositories** > your repository > **Merge Queue** +2. Find the **Required Status Checks** section +3. Use the CI job selector to choose which status checks must pass. The selector shows CI jobs that have been seen on recent PRs. +4. Selected statuses override the GitHub branch protection defaults for the merge queue + + +When required statuses are configured in Trunk, only those statuses are required for the merge queue. When not configured, Trunk falls back to your GitHub branch protection required checks. + + +--- + +## Optimistic Merge Queue + +> Toggle this feature **Enabled** or **Disabled**. Default is **Disabled**. + +[**Optimistic Merging**](/merge-queue/optimizations/optimistic-merging) allows multiple PRs to merge together at once when testing completes out of order. When [Testing Concurrency](#testing-concurrency) allows multiple PRs to test simultaneously, a PR later in the queue may finish before PRs ahead of it. Since that PR's tests include all the changes ahead of it, the system can safely merge all verified PRs together instead of waiting for each one individually, reducing merge time. + +--- + +## Direct Merge to Main + +Merge PRs immediately when they're already based on the tip of main and the queue is empty, skipping redundant testing. + +* **Default:** Enabled +* **Trigger conditions:** PR is up-to-date with main + queue is empty + tests passed +* **Benefit:** Eliminates 5-30 minutes of wait time for up-to-date PRs +* **Best for:** Teams that keep PRs current with main before merging + +Toggle this setting in **Settings** > **Repositories** > your repository > **Merge Queue**. Learn more in [Direct Merge to Main](/merge-queue/optimizations/direct-merge-to-main). + +--- + +## Pending Failure Depth + +> Pending Failure Depth can be set to any value, options are **0** (default), **1**, **2**, **3**, and **Custom**. + +[**Pending Failure Depth**](/merge-queue/optimizations/pending-failure-depth) allows a failed PR to remain in the queue temporarily while a configurable number of PRs behind it complete testing. Since predictive testing means the failed PR's code is retested as part of later PRs, this gives flaky tests multiple chances to pass before the PR is evicted from the queue. + +When set to **0** (default), failed PRs are immediately evicted from the queue. Any PRs behind the failed PR that were already testing will be restarted, since they were testing against a predicted future state of the branch that is no longer accurate. + +--- + +## Draft pull request creation + +> Toggle this feature **Enabled** or **Disabled**. Default is **Enabled**. + +[**Draft PR Creation**](/merge-queue/getting-started/configure-branch-protection#draft-pr-mode-recommended---default) determines whether Trunk Merge Queue creates draft PRs or push-triggered branches when testing changes. When enabled (default), the queue creates draft PRs to trigger your existing PR-based CI checks. When disabled, the queue creates `trunk-merge/` branches instead, requiring you to configure push-triggered workflows to run your required status checks. + +--- + +## GitHub comments + +> Toggle this feature **Enabled** or **Disabled**. Default is **Enabled**. + +When enabled, Trunk posts comments on pull requests with merge queue status updates and instructions (e.g., "To merge this pull request, check the box to the left or comment `/trunk merge`"). + +**When to disable:** + +* **Testing and evaluation** - Validate the merge queue works with your CI setup without notifying your development team. Once configured and ready, re-enable comments to roll out to developers. +* **Custom tooling** - You're building your own bot or integration that will provide merge queue instructions to developers, making Trunk's default comments redundant. + +
+ +--- + +## GitHub commands + +> Toggle this feature **Enabled** or **Disabled**. Default is **Enabled**. + +Whether or not GitHub slash commands like `/trunk merge` are enabled for this merge queue. + +**When to disable:** + +* **API-only workflows** - You want all queue submissions to go through the public API (e.g., via a bot or custom automation) rather than individual developer commands. +* **Holding pattern** - You're temporarily restricting queue submissions while investigating issues, performing maintenance, or coordinating with your team. (Note: Consider using the Paused or Draining queue state if you want to stop all new PRs from entering the queue.) + +--- + +## Connect with Slack + +[Connect Trunk Merge Queue to Slack](/merge-queue/integration-for-slack) to receive real-time notifications about queue activity in a designated channel. After connecting, you can choose which events trigger notifications. + +**Available notifications include:** + +* Pull requests submitted to or removed from the queue +* Testing status updates (ready, in progress, passed, failed) +* Successful merges +* Queue configuration changes (pausing, mode changes, concurrency adjustments) +* Pull request cancellations + +--- + +## Batching + +> Toggle this feature **Enabled** or **Disabled**. Default is **Disabled**. + +[**Batching**](/merge-queue/optimizations/batching) tests multiple pull requests as a single unit instead of individually, dramatically reducing CI costs. + +### Bisection Testing Concurrency + +Configure how many PRs can be tested simultaneously during batch failure isolation (bisection). This setting is independent from the main Testing Concurrency and only applies when batches fail and need to be split to identify the failing PR. + +**Default:** Same as Testing Concurrency (automatically mirrors your main concurrency setting) + +**Recommended:** Set 2-5x higher than your main Testing Concurrency for faster failure isolation + +#### How to Configure + +1. Navigate to **Settings** > **Repositories** > your repository > **Merge Queue** > **Batching** +2. Ensure **Batching** is enabled +3. Set **Bisection Testing Concurrency** to your desired value +4. Monitor CI resource usage and adjust as needed + +For detailed guidance on using this setting effectively, see [Bisection Testing Concurrency in the Batching](/merge-queue/optimizations/batching#bisection-testing-concurrency) documentation. + +--- + +## Delete Merge Integration + + +CAUTION: Any queued merge requests will not be merged and all data will be lost. + +**Before deleting:** Ensure all important PRs in the queue are either merged manually or that you're prepared to resubmit them to a new queue. + + +This setting will delete the Merge Queue configuration and any queued merge requests will not be merged and all data will be lost. + +**When to use this:** + +* **Switching target branches** - If you need to change which branch the queue merges into (e.g., switching from a test branch during POC to `main` for production use), you must delete the current queue and create a new one pointing to your desired branch. +* **Removing Merge Queue** - You're decommissioning Merge Queue for this repository entirely. +* **Starting fresh** - You want to reset all configuration. diff --git a/merge-queue/administration/metrics.mdx b/merge-queue/administration/metrics.mdx new file mode 100644 index 0000000..f6aa1da --- /dev/null +++ b/merge-queue/administration/metrics.mdx @@ -0,0 +1,301 @@ +--- +title: "Metrics and monitoring" +description: "The Metrics and Monitoring dashboard provides deep analytics on your merge queue's performance, helping you identify bottlenecks, measure improvements, and optimize your workflow." +--- +The Metrics and Monitoring dashboard provides deep analytics on your merge queue's performance, helping you identify bottlenecks, measure improvements, and optimize your workflow. + +Your merge experience directly impacts the velocity and productivity of your development team. Merge Queue Metrics provides observability for the **health** of your Trunk Merge Queue, so you can discover issues early and make informed optimizations. + +

The Health tab showing metrics in the Trunk Web App.

+ +### Access metrics + +You can access the metrics in your Trunk Merge Queue by navigating to the **Merge Queue** > **Health** tab. + + +CI Time and CI Jobs Triggered charts are only available for **GitHub Actions**. + + +### Filter Metrics by Impacted Targets + +When running in Parallel Mode, you can filter your merge queue health metrics by impacted targets to analyze performance for specific parts of your codebase. + +
+ +#### Why Filter by Impacted Targets? + +In repositories with multiple teams or distinct components (like a TypeScript/Python monorepo), different parts of your codebase may have different merge characteristics. Filtering by impacted targets helps you: + +* **Analyze team-specific performance** - See how PRs from different teams move through the queue +* **Identify bottlenecks by component** - Determine if certain targets have slower merge times +* **Optimize strategically** - Focus queue configuration improvements on your highest-priority code paths +* **Demonstrate value** - Show engineering leadership how parallel mode benefits specific teams or projects +* **Ensure fairness** - Verify that all teams experience similar queue performance + +#### How to Use the Filter + +1. Navigate to **Merge Queue** > your repository > **Health** tab in the Trunk web app +2. Locate the **Impacted Targets** filter dropdown at the top of the metrics dashboard +3. Select one or more targets to filter by: + * **All Targets** (default) - Shows aggregate metrics across all PRs + * **Specific target names** - Shows metrics only for PRs affecting that target (e.g., `frontend`, `backend`, `//services/api`) +4. All charts and metrics on the page will update to reflect only PRs impacting the selected targets + +#### Understanding the Data + +**Impacted targets are set when a PR enters the queue** + +Each PR's impacted targets are calculated based on which files changed and which parts of your codebase are affected. For details on how impacted targets are computed, see [Parallel Queues - Impacted Targets](/merge-queue/optimizations/parallel-queues#posting-impacted-targets-from-your-pull-requests). + +**PRs can affect multiple targets** + +A PR that changes both frontend and backend code will be counted in metrics when filtering by either `frontend` OR `backend`. This means the numbers may not sum to 100% when viewing multiple target filters separately. + +**"All Targets" shows aggregate performance** + +Selecting "All Targets" displays metrics for every PR, regardless of which targets it impacts. This is the default view and shows overall queue health. + +#### Requirements + +**Parallel Mode must be enabled** + +Impacted target filtering is only available when your merge queue is running in Parallel Mode. Repositories in Single Mode do not track impacted targets. + +**Impacted targets must be uploaded** + +Your CI workflow must calculate and upload impacted targets for each PR. See the Parallel Queues documentation for setup instructions using: + +* Bazel +* Nx +* Custom build systems + +### Time buckets + +The date ranges selector at the top left of the dashboard allows you to filter the data displayed by date and time. You can display time buckets by the day or hour in the day/hour dropdown. + +The metrics displayed only include data that have **completed within the time range**, jobs started but not completed during the selected time **will not be displayed**. + + +When working across multiple time zones, enable **Time in UTC** to ensure everyone sees the same data. + + +### Conclusion count + +Conclusion count displays the number of pull requests that exited the merge queue during each time bucket. This includes passes, failures, and cancellations. Passes and failures signal a PR that was tested in the queue to completion, while canceled signals that the request to merge terminated before testing finished or before testing began. + +Conclusion counts are an important signal to potential bottlenecks or underlying issues with your merging process, as a failure or cancellation in the merge queue can force other PRs to **restart their testing**. A spike in the number of failures or passes can indicate a potential problem to investigate. + +Conclusions are tagged with a reason to give further insights into how merges pass or fail in the queue. You can show or hide conclusions of a particular reason by using the **+ Add** button. + +
CategoryReasonDescription
✅ PassMerged by TrunkPassed all tests in Merge Queue and merged by Trunk
✅ PassMerged manuallyUser manually merged the PR in Git
❌ FailureTest run timeoutUser-defined timeout for tests exceeded
❌ FailureFailed TestsRequired test failed while testing the PR in the merge queue
❌ FailureMerge conflictA (git) merge conflict encountered
❌ FailureConfig parsing failureMalformed trunk.yaml that couldn't be parsed
❌ FailureConfig bad versionInvalid version field in trunk.yaml
❌ FailureConfig bad required statusesFailed to parse required statuses in trunk.yaml
❌ FailureNo required statusesNo source for required tests was found in trunk.yaml or branch protection settings
❌ FailureGitHub API FailedGitHub returned an error to us that could not be resolved while processing the PR
❌ FailurePR updated at merge timePR updated as Trunk was attempting to merge it
🚫 CancelCanceled by userPR explicitly canceled by user
🚫 CancelPR closedPR closed (not merged)
🚫 CancelPR pushed toNew commits pushed to the PR branch while in the merge queue
🚫 CancelPR draftPR was converted to a draft, which cannot be merged
🚫 CancelPR base branch changedBase branch of PR in the merge queue changed
🚫 CancelAdmin requestedTrunk employee canceled PR during a support session (extreme cases)
🚫 CancelA PR in the stack had its base branch changedA member of the PR stack had its base branch changed while in the queue (stacked PRs only)
🚫 CancelA PR in a PR stack was closedA member of the PR stack was closed while in the queue (stacked PRs only)
🚫 CancelPR was merged as part of a different stackThe PR was already merged through a different stack (stacked PRs only)
🚫 CancelPart of this PR's stack was pushed toNew commits were pushed to a PR in the stack while in the queue (stacked PRs only)
+ +### Time in queue + +Time in queue shows how long each PR spends in the Merge Queue from the moment the PR enters the queue to the moment when it exits the queue, either from merging, failing, or being canceled. + +Understanding the amount of time a pull request spends in the queue is important for ensuring your merge process continues to ship code quickly. A spike in the time to merge indicates a slowdown somewhere that's impacting all developers. For example, it's taking longer to run tests on PRs, PRs are waiting too long to start testing, or constant failures in the queue are causing PRs to take longer to merge + +The time in queue can be displayed as different statistical measures. You can show or hide them by using the **+ Add** button. + +| Measure | Explanation | +| ------- | --------------------------------------------------- | +| Average | Average of all time in queue during the time bucket | +| Minimum | The shortest time in queue in the time bucket. | +| Maximum | The longest time in queue in the time bucket. | +| Sum | The total of all time in queue added together. | +| P50 | The value below 50% of the time in queue falls. | +| P95 | The value below 95% of the time in queue falls. | +| P99 | The value below 99% of the time in queue falls. | + +--- + +### Prometheus metrics endpoint + +Trunk exposes merge queue metrics in [Prometheus text exposition format](https://prometheus.io/docs/instrumenting/exposition_formats/) via a scrapable API endpoint. Use this to build custom Grafana dashboards, set up alerts, or integrate merge queue health into your existing observability stack. + + +The Prometheus metrics endpoint is available to all Merge Queue users. + + +#### Endpoint + +``` +GET https://api.trunk.io/v1/getMergeQueueMetrics +``` + +Authenticate with your [Trunk API token](/setup-and-administration/apis#authentication) using the `x-api-token` header. + +**Query parameters:** + +| Parameter | Required | Description | +| ---------- | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `repo` | No | Repository in `owner/name` format (e.g., `my-org/my-repo`). If omitted, returns metrics for all repositories in the organization. Must be provided together with `repoHost`. | +| `repoHost` | Conditional | Repository host (e.g., `github.com`). Required if `repo` is specified. | + +The response uses content type `text/plain; version=0.0.4; charset=utf-8` (standard Prometheus format). + +#### Available metrics + +All metrics include these labels: + +| Label | Description | Example values | +| ------------ | ---------------- | --------------------- | +| `repo` | Repository name | `my-org/my-repo` | +| `branch` | Base branch name | `main`, `develop` | +| `queue_type` | Queue type | `main` or `bisection` | + +**Point-in-time gauges** + +These metrics reflect the current state of your merge queue. + +| Metric | Type | Description | +| -------------------------- | ----- | ----------------------------------------------------------------------------------------------------------------------- | +| `mq_depth_current` | Gauge | Number of PRs currently in the queue (excludes PRs that are waiting to be mergeable before being admitted to the queue) | +| `mq_awaiting_mergeability` | Gauge | Number of PRs waiting for prerequisites like required reviews or status checks | +| `mq_testing_slots_active` | Gauge | Number of PRs currently in TESTING state (active CI slots in use) | + +**Rolling 1-hour window metrics** + +These metrics summarize activity over a sliding 1-hour window. They update continuously as the window advances. + +| Metric | Type | Extra labels | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | ---------------------------------------- | ------------------------------------------------------------- | +| `mq_pr_conclusions_1h_total` | Gauge | `conclusion` (merged, failed, cancelled) | PRs that exited the queue in the last hour | +| `mq_pr_restarts_1h_total` | Gauge | — | PR restarts (TESTING to PENDING transitions) in the last hour | +| `mq_pr_wait_duration_1h_seconds` | Histogram | `le` (bucket boundary) | Distribution of time PRs spent waiting before testing starts | +| `mq_pr_test_duration_1h_seconds` | Histogram | `le` (bucket boundary) | Distribution of time PRs spent in the testing phase | +| Each histogram emits `_bucket{le="..."}`, `_sum`, and `_count` series. Bucket boundaries (in seconds): 60, 300, 600, 900, 1800, 3600, 5400, 7200, +Inf. | | | | + + +Rolling window metrics use **gauge semantics**, not true Prometheus counters. They represent a snapshot of the last hour, not cumulative totals. PromQL functions like `rate()` and `increase()` are **not meaningful** on these metrics. Use the values directly instead. + + +#### Scrape configuration + +Configure your Prometheus instance to scrape the Trunk metrics endpoint: + +```yaml +scrape_configs: + - job_name: trunk-merge-queue + scrape_interval: 60s + scheme: https + static_configs: + - targets: ['api.trunk.io'] + metrics_path: /v1/getMergeQueueMetrics + params: + repo: ['my-org/my-repo'] + repoHost: ['github.com'] + http_headers: + x-api-token: + values: [''] +``` + +To scrape metrics for all repositories in your organization, omit both the `repo` and `repoHost` parameters. + +#### Datadog Agent configuration + +You can ingest Trunk merge queue metrics into Datadog using the Datadog Agent's built-in [OpenMetrics integration](https://docs.datadoghq.com/integrations/openmetrics/). This lets Datadog scrape the Prometheus endpoint directly without requiring a separate Prometheus server. + +**1. Enable the OpenMetrics integration** + +Create or edit `/etc/datadog-agent/conf.d/openmetrics.d/conf.yaml`: + +```yaml +instances: + - openmetrics_endpoint: https://api.trunk.io/v1/getMergeQueueMetrics?repo=my-org/my-repo&repoHost=github.com + namespace: trunk_merge_queue + metrics: + - mq_.* + headers: + x-api-token: + min_collection_interval: 60 + send_distribution_buckets: true +``` + +To collect metrics for all repositories in your organization, omit the query parameters: + +```yaml + openmetrics_endpoint: https://api.trunk.io/v1/getMergeQueueMetrics +``` + +**2. Restart the Datadog Agent** + +```bash +sudo systemctl restart datadog-agent +``` + +**3. Validate** + +```bash +sudo -u dd-agent -- datadog-agent check openmetrics +``` + + +All metrics are prefixed with your configured `namespace` value. For example, `mq_depth_current` becomes `trunk_merge_queue.mq_depth_current` in Datadog. + + +#### Example queries + +**Queue health alerts:** + +```promql +# Alert when queue depth exceeds threshold +mq_depth_current{branch="main"} > 10 + +# Max queue depth over the last hour +max_over_time(mq_depth_current{branch="main"}[1h]) + +# CI utilization (if you have 8 concurrency slots) +mq_testing_slots_active{branch="main",queue_type="main"} / 8 +``` + +**Failure analysis:** + +```promql +# Failure rate over the last hour +mq_pr_conclusions_1h_total{conclusion="failed"} + / +ignoring(conclusion) sum(mq_pr_conclusions_1h_total) + +# Alert on high failure count +mq_pr_conclusions_1h_total{conclusion="failed"} > 5 +``` + +**Duration analysis:** + +```promql +# P90 wait time (time before testing starts) +histogram_quantile(0.90, sum(mq_pr_wait_duration_1h_seconds_bucket) by (le)) + +# Average wait time +mq_pr_wait_duration_1h_seconds_sum / mq_pr_wait_duration_1h_seconds_count + +# Restart ratio (restarts per merge) +mq_pr_restarts_1h_total / mq_pr_conclusions_1h_total{conclusion="merged"} +``` + +#### Sample output + +``` +# HELP mq_depth_current PRs currently in the queue +# TYPE mq_depth_current gauge +mq_depth_current{repo="my-org/my-repo",branch="main",queue_type="main"} 4 + +# HELP mq_awaiting_mergeability Number of PRs currently awaiting mergeability +# TYPE mq_awaiting_mergeability gauge +mq_awaiting_mergeability{repo="my-org/my-repo",branch="main",queue_type="main"} 1 + +# HELP mq_testing_slots_active PRs currently in TESTING state +# TYPE mq_testing_slots_active gauge +mq_testing_slots_active{repo="my-org/my-repo",branch="main",queue_type="main"} 3 + +# HELP mq_pr_conclusions_1h_total PRs exiting the queue in last hour +# TYPE mq_pr_conclusions_1h_total gauge +mq_pr_conclusions_1h_total{repo="my-org/my-repo",branch="main",queue_type="main",conclusion="merged"} 12 +mq_pr_conclusions_1h_total{repo="my-org/my-repo",branch="main",queue_type="main",conclusion="failed"} 1 +mq_pr_conclusions_1h_total{repo="my-org/my-repo",branch="main",queue_type="main",conclusion="cancelled"} 0 + +# HELP mq_pr_restarts_1h_total PR restarts in last hour +# TYPE mq_pr_restarts_1h_total gauge +mq_pr_restarts_1h_total{repo="my-org/my-repo",branch="main",queue_type="main"} 2 +``` diff --git a/merge-queue/getting-started.mdx b/merge-queue/getting-started.mdx new file mode 100644 index 0000000..1047bd7 --- /dev/null +++ b/merge-queue/getting-started.mdx @@ -0,0 +1,95 @@ +--- +title: "Getting Started" +description: "This guide walks you through setting up Trunk Merge Queue for your repository. The setup process involves installing the GitHub App, creating a queue, and configuring branch protec" +--- +This guide walks you through setting up Trunk Merge Queue for your repository. The setup process involves installing the GitHub App, creating a queue, and configuring branch protection rules to allow the merge queue to function properly. + +### Step 1: Install the GitHub App and create a Queue + + +**The Trunk GitHub App is required for Merge Queue to function.** It grants Trunk Merge Queue the necessary permissions to create test branches, read CI results, and merge PRs in your repository. View [detailed permissions and what Trunk uses them for](/setup-and-administration/github-app-permissions). + +The Trunk GitHub app can be added and removed from repositories within your org as needed. + + +1. [Sign in to app.trunk.io](https://app.trunk.io/login) and navigate to the **Merge Queue** tab. (First-time users will [create an organization](/setup-and-administration/connecting-to-trunk) before accessing Merge Queue.) +2. Click the **Create New Queue** button. + + +If the GitHub App is already installed, step 3 will be skipped automatically. + + +3. If the Trunk GitHub App is not already installed, you'll be prompted to install it. + + +**You must be a GitHub admin to complete the following steps.** If you are not a GitHub admin in your organization, go to `Settings` → `Team Members` to invite a GitHub admin to your Trunk organization so they can complete the following. + +The GitHub App installation must be initiated from the Trunk web app to properly associate your Trunk organization with the GitHub App. If you have previously installed the Trunk GitHub App directly through GitHub, you'll need to uninstall it first and then reinstall it by starting the installation process from the Trunk web app as described below. + + +* Click **Install GitHub App** and follow the installation flow: + * Select whether to install on all repositories or only specific ones + * Review and approve the required permissions + * Complete the installation + * After the GitHub App installation is complete, you'll be returned to the Trunk dashboard. + * In the Merge Queue tab click the "New Queue" button. + +4. Select a repository from the dropdown and enter the target branch to merge into. Click **Create Queue.** + +
+ +### Step 2: Configure Branch Protection + +The merge queue needs specific GitHub permissions to function. Follow the [Branch Protection & Required Status Checks](/merge-queue/getting-started/configure-branch-protection) guide to: + +1. **Configure push restrictions** - Allow the `trunk-io` bot to push to your protected branch +2. **Disable “Require branches to be up to date before merging.” -** This setting is one of the most common sources of confusion. Many teams enable it to keep their branch green, but it conflicts with how merge queues work. If this is on, PRs will often sit in the “Queued” state forever because GitHub blocks Trunk from updating them. +3. **Exclude Trunk's temporary branches** - Ensure `trunk-temp/*` and `trunk-merge/*` branches are not protected. They are created and cleaned up automatically by the queue. + + +**Without proper branch protection configuration, the merge queue will not work.** You may see errors like "Permission denied on `trunk-merge/*` branch" or PRs will remain stuck in "Queued" state. + + +#### Optional: Enforce Merge Queue-Only Merges + +If you want your organization to merge *exclusively* through the merge queue: + +* Restrict who can push to your protected branch (e.g., main). +* Then allow the Trunk GitHub App as the only actor permitted to push to that branch. + +This setup ensures all merges flow through the queue and prevents developers from bypassing it accidentally. + +### Step 3: Test your setup + +Now that branch protection is configured, test that the merge queue works correctly: + +1. Create a test pull request in your repository +2. Submit it to the merge queue using one of these methods: + * **Checking the box** in the Trunk bot comment on your PR, or + * **Commenting** `/trunk merge` on the pull request + +
+ + +You can submit a PR to the merge queue at any time, even before CI checks pass or code review is complete. The PR will remain in "**Queued**" state until all required conditions are met, then automatically begin testing. + + +3. You can check the PR in the [Trunk Dashboard](https://app.trunk.io/) - once your PR passes all required checks, it will move from 'Queued' to 'Testing'. The merge queue will then test it again with changes ahead of it in the queue. When those tests pass, it will automatically merge. + +#### Troubleshooting common issues + + +Visit [Trunk Support](/setup-and-administration/support) for additional assistance or to contact the support team. + + +If your test PR doesn't merge automatically: + +* **Check the status comments for the PR in** the [Trunk Dashboard](https://app.trunk.io/) to see what it's waiting for +* **Stuck in "Queued"**: Usually means branch protection rules haven't passed (missing required status checks or code review) or there are merge conflicts. If the status looks correct but the PR still won't enter the queue, try [removing](/merge-queue/using-the-queue/reference#submitting-and-cancelling-pull-requests) and re-adding by commenting `/trunk merge` again on the PR. +* **Fails when attempting to merge**: Check that squash merges are enabled for your repository in GitHub settings (`Settings > General > Allow squash merging`). Trunk Merge Queue requires squash merges to be enabled. +* **"Permission denied" errors**: Review the [Branch Protection](/merge-queue/getting-started/configure-branch-protection) guide to ensure `trunk-temp/*` and `trunk-merge/*` branches aren't protected by wildcard rules like `*/*`. +* **Status checks not running**: Verify your CI is configured to run on draft PRs (or `trunk-merge/**` branches if using push-triggered mode). See the [Branch Protection](/merge-queue/getting-started/configure-branch-protection) guide for details. + +### Step 4: Configure advanced features + +Once the basic merge queue is working, you can enable optimizations to improve performance, such as [batching](/merge-queue/optimizations/batching) PRs together or [allowing failed pull requests to merge](/merge-queue/using-the-queue/handle-failed-pull-requests) if others are passing. diff --git a/merge-queue/getting-started/configure-branch-protection.mdx b/merge-queue/getting-started/configure-branch-protection.mdx new file mode 100644 index 0000000..00b8142 --- /dev/null +++ b/merge-queue/getting-started/configure-branch-protection.mdx @@ -0,0 +1,119 @@ +--- +title: "Configure branch protection" +description: "Before configuring branch protection:" +--- +### Prerequisites + +Before configuring branch protection: + +* [ ] Trunk GitHub App installed and queue created (previous step) +* [ ] Repository has CI/CD configured (GitHub Actions, CircleCI, etc.) +* [ ] CI runs on pull requests and reports status checks to GitHub +* [ ] You have admin access to repository settings + +### How Trunk Merge Queue works + +Trunk Merge Queue respects GitHub's branch protection rules and works with both Classic branch protection rules and Rulesets. Since Merge Queue ultimately merges pull requests through GitHub, any protection rules on your target branch (like required code reviews or status checks) will still apply. + +### Choose your testing approach + +Trunk Merge Queue can test pull requests in two ways. Choose the approach that fits your CI setup: + +#### Draft PR mode (Recommended - Default) + + +**Best for:** Most teams who want the simplest setup with no additional configuration. + + +When a pull request enters the queue, Trunk creates a draft pull request to test the changes. This automatically triggers your existing pull request-based CI workflows, the same checks that run when you open a regular pull request. + +**Advantages:** + +* No additional CI configuration required +* Works immediately with your existing workflows +* Simple to set up and maintain + +Things to look out for: + +* This mode also creates a `trunk-merge/` branch +* Trunk automatically closes the draft PRs and merge the original PRs + +**When to use a different approach:** If you have expensive preview deployments, review-only workflows, or security scans that you don't want running during merge queue testing, consider Push-triggered mode instead. + +#### Push-Triggered mode (Advanced) + + +**Best for:** Teams who need different CI behavior for merge queue testing versus pull request review. + + +When a pull request enters the queue, Trunk creates a `trunk-merge/*` branch and pushes to it. You configure specific CI jobs to run on these branches. + +**Advantages:** + +* Complete control over which jobs run during queue testing +* Avoid triggering expensive preview environments or review-only workflows +* Can optimize for faster merge queue throughput + +**Requirements:** + +* Configure push-triggered workflows in your CI provider for `trunk-merge/**` branches +* Define required status checks in your `.trunk/trunk.yaml` [configuration file](/merge-queue/getting-started/configure-ci-status-checks#if-using-draft-pr-mode-default) + +**To enable:** Go to **Settings** > **Repositories** > repository > **Merge Queue** > toggle **off** **Trunk Draft PR Creation**. + +### Configure Branch Protection Rules + +#### Using Rulesets vs. Classic Rules + +You can use GitHub's Rulesets feature alongside Classic branch protection rules—both systems work together. However, **push permission restrictions must be configured using Classic branch protection rules only** because GitHub's API does not expose push restriction settings from Rulesets. + +All other branch protection settings (required reviews, status checks, signed commits, etc.) can be configured using either Classic rules or Rulesets. + +#### Configure Push Restrictions (Required) + +Trunk Merge Queue needs permission to push to your protected branch. Configure these settings using Classic branch protection rules: + +
+ +1. Go to **Settings > Branches** in your repository on GitHub. +2. Edit or create a Classic branch protection rule for your target branch (e.g., `main`) +3. Under "Rules applied to everyone including administrators," select: + * **Restrict who can push to matching branches** + * **Restrict pushes that create matching branches** +4. Add the `trunk-io` bot to the list of allowed actors +5. Optionally, add Organization admins and repository admins who need emergency merge access +6. Save your changes + + +**Important:** Regular users should use [pull request prioritization](https://file+.vscode-resource.vscode-cdn.net/merge-queue/pr-prioritization) with `--priority=urgent` or `--priority=high` to fast-track pull requests through the queue while maintaining validation. Direct push access is only needed for rare emergencies where the queue itself must be bypassed. + + +#### Exclude Trunk's temporary branches (Critical) + +Trunk Merge Queue creates temporary branches to test pull requests before merging them: + +* `trunk-temp/*` - Temporary testing branches +* `trunk-merge/*` - Merge testing branches + + +**Trunk needs unrestricted access** to create, push to, and delete these branches. If your branch protection rules apply to these branches, Merge Queue cannot function. + + +**To verify and fix:** + +1. Go to **Settings > Branches** in your repository +2. Review all Classic branch protection rules +3. Check for wildcard patterns like `*/*`, `**/*`, or similar that would match `trunk-temp/*` or `trunk-merge/*` +4. If you find matching rules, either: + * **Option A:** Remove the wildcard rules and create more specific rules for your actual branches + * **Option B:** Add the `trunk-io` bot to the bypass list for those rules + +**Example of a problematic rule:** A branch protection rule with pattern `*/*` would protect all branches including `trunk-temp/*` and `trunk-merge/*`. + +**What happens if these branches are protected:** Merge Queue will encounter GitHub permission errors and display messages like "Permission denied on trunk-merge/\* branch." + +### Next Steps + +→ [**Configure CI status checks**](/merge-queue/getting-started/configure-ci-status-checks) **-** Configure CI status checks for your branch. + +*Having trouble?* See our [Troubleshooting guide](/merge-queue/reference/troubleshooting) for common installation issues. diff --git a/merge-queue/getting-started/configure-ci-status-checks.mdx b/merge-queue/getting-started/configure-ci-status-checks.mdx new file mode 100644 index 0000000..f05543e --- /dev/null +++ b/merge-queue/getting-started/configure-ci-status-checks.mdx @@ -0,0 +1,84 @@ +--- +title: "Configure CI status checks" +description: "Your existing pull request-triggered CI workflows will automatically run when Trunk creates draft pull requests to test changes. No additional configuration is required." +--- +### If using Draft PR mode (Default) + +Your existing pull request-triggered CI workflows will automatically run when Trunk creates draft pull requests to test changes. **No additional configuration is required.** + +Trunk will wait for the same required status checks configured in your branch protection rules (either via Classic rules or Rulesets) before merging. + + +You can also configure required status checks directly in the Trunk UI instead of relying on GitHub branch protection. See [Required Status Checks](/merge-queue/administration/advanced-settings#required-status-checks) in the settings documentation. + + +See GitHub's documentation for configuring required status checks: + +* [Classic branch protection rules](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/managing-protected-branches/about-protected-branches#require-status-checks-before-merging) +* [Rulesets](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/managing-rulesets/about-rulesets) + +**You're done!** Skip to the Verification section below. + +### If using Push-Triggered mode + +You need to complete two additional steps: + +**Step 1: Configure Push-Triggered CI Workflows** + +Set up your CI provider to run status checks whenever Trunk pushes to `trunk-merge/*` branches. + +**Example for GitHub Actions:** + +```yaml +name: Merge Queue Tests +run-name: Merge Queue Checks for ${{ github.ref_name }} + +# Trigger when Trunk Merge Queue tests a pull request +on: + push: + branches: + - trunk-merge/** + +jobs: + unit_tests: + runs-on: ubuntu-latest + name: Unit Tests + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Run tests + run: npm test # Your actual test commands + + integration_tests: + runs-on: ubuntu-latest + name: Integration Tests + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Run integration tests + run: npm run test:integration # Your actual test commands +``` + +**For other CI providers:** Configure workflows triggered by pushes to branches matching `trunk-merge/**`. + +**Step 2: Define Required Status Checks in .trunk/trunk.yaml** + +Create or edit your `trunk.yaml` file in a directory named `.trunk` at the root of your repository (so, `.trunk/trunk.yaml`) to specify which status checks Trunk should wait for before merging: + +```yaml +version: 0.1 +merge: + required_statuses: + - Unit Tests + - Integration Tests +``` + +**Important:** The status check names in `.trunk/trunk.yaml` must exactly match the job names from your CI workflows. + +### Next Steps + +→ [**Test your setup**](/merge-queue/getting-started/test-your-setup) - Verify everything is configured correctly before using Merge Queue in production. + +*Having trouble?* See our [Troubleshooting guide](/merge-queue/reference/troubleshooting) for common installation issues. diff --git a/merge-queue/getting-started/install-and-create-your-queue.mdx b/merge-queue/getting-started/install-and-create-your-queue.mdx new file mode 100644 index 0000000..4905e21 --- /dev/null +++ b/merge-queue/getting-started/install-and-create-your-queue.mdx @@ -0,0 +1,53 @@ +--- +title: "Install and create your queue" +description: "This guide walks you through setting up Trunk Merge Queue for your repository. The setup process involves installing the GitHub App, creating a queue, and configuring branch protec" +--- +This guide walks you through setting up Trunk Merge Queue for your repository. The setup process involves installing the GitHub App, creating a queue, and configuring branch protection rules to allow the merge queue to function properly. + +### Prerequisites + +Before you begin, make sure you have: + +* [ ] Admin access to your GitHub organization +* [ ] A repository you want to protect with Merge Queue + + +**You must be a GitHub admin to complete the following steps.** If you are not a GitHub admin in your organization, go to `Settings` → `Team Members` to invite a GitHub admin to your Trunk organization so they can complete the following. + +The GitHub App installation must be initiated from the Trunk web app to properly associate your Trunk organization with the GitHub App. If you have previously installed the Trunk GitHub App directly through GitHub, you'll need to uninstall it first and then reinstall it by starting the installation process from the Trunk web app as described below. + + +### Install the Trunk GitHub App + +1. [Sign in to app.trunk.io](https://app.trunk.io/login) and navigate to the **Merge Queue** tab. (First-time users will [create an organization](/setup-and-administration/connecting-to-trunk) before accessing Merge Queue.) +2. Click the **Create New Queue** button at the top right corner of the window. + + +**The Trunk GitHub App is required for Merge Queue to function.** It grants Trunk Merge Queue the necessary permissions to create test branches, read CI results, and merge PRs in your repository. View [detailed permissions and what Trunk uses them for](/setup-and-administration/github-app-permissions). + +If the GitHub App is already installed, step 3 will be skipped automatically. + + +3. If the Trunk GitHub App is not already installed, you'll be prompted to install it. + 1. Click **Install GitHub App** and follow the installation flow: + 1. Select whether to install on all repositories or only specific ones + 2. Review and approve the required permissions + 3. Complete the installation + 4. After the GitHub App installation is complete, you'll be returned to the Trunk dashboard. + +### Create your first queue + +4. In the **Merge Queue** tab, click the **Create New Queue** button at the top right corner of the window. +5. Select a repository from the dropdown and enter the target branch to merge into. Click **Create Queue.** + +
+ +### What you just did + +You've installed the Trunk GitHub App on your organization and created your first merge queue for the specified branch (`main` in the example above). Trunk is now connected to your repository and ready to be configured. Your queue won't start processing pull requests until you complete the branch protection setup in the next step. + +### Next steps + +→ [**Configure branch protection**](/merge-queue/getting-started/configure-branch-protection) - Set up GitHub rules so Trunk can safely manage your merges + +*Having trouble?* See our [Troubleshooting guide](/merge-queue/reference/troubleshooting) for common installation issues. diff --git a/merge-queue/getting-started/test-your-setup.mdx b/merge-queue/getting-started/test-your-setup.mdx new file mode 100644 index 0000000..2a870c5 --- /dev/null +++ b/merge-queue/getting-started/test-your-setup.mdx @@ -0,0 +1,53 @@ +--- +title: "Test your setup" +description: "After completing configuration, verify your setup:" +--- +### Prerequisites + +After completing configuration, verify your setup: + +* [ ] `trunk-io` bot is added to push restrictions for your protected branch +* [ ] No branch protection rules apply to `trunk-temp/*` or `trunk-merge/*` branches +* [ ] If using Draft PR mode: Required status checks are configured in GitHub branch protection +* [ ] If using Push-triggered mode: + * [ ] CI workflows trigger on `trunk-merge/**` branches + * [ ] `merge.required_statuses` is defined in `trunk.yaml` + +#### **Test your configuration** + +1. Create a test pull request +2. Comment `/trunk merge` on the pull request +3. Check the [Trunk Dashboard](https://app.trunk.io/) to monitor your pull request status +4. The pull request should appear in the queue as "Queued" until all checks complete +5. Click on the pull request in the dashboard to see detailed status of what it's waiting for +6. You'll also see status updates in the comments on your pull request + +**Expected behavior:** Your pull request should progress through testing and merge automatically once all required checks pass. + +### Next Steps + +🎉 **Congratulations!** Your Merge Queue is working. You're ready to use it with your team. + +#### Start using Merge Queue + +→ [**Submit and cancel pull requests**](/merge-queue/using-the-queue/reference) - Learn how to use the queue day-to-day + +#### Optimize your queue + +Ready to make it even better? Explore these optimizations + +→ [**Predictive Testing**](/merge-queue/optimizations/predictive-testing) - Prevent queue collapse and increase throughput + +→ [**Batching**](/merge-queue/optimizations/batching) - Merge multiple PRs together for faster processing + +→ [**Priority merging**](/merge-queue/optimizations/priority-merging) - Fast-track urgent PRs + +→ [**Anti-flake protection**](/merge-queue/optimizations/anti-flake-protection) - Handle flaky tests automatically + +#### Configure integrations + +→ [**Slack integration**](/merge-queue/integration-for-slack) - Get notifications in Slack + +→ [**Metrics and monitoring**](/merge-queue/administration/metrics) - Track your queue's performance + +*Having trouble?* See our [Troubleshooting guide](/merge-queue/reference/troubleshooting) for common installation issues. diff --git a/merge-queue/integration-for-slack.mdx b/merge-queue/integration-for-slack.mdx new file mode 100644 index 0000000..6853ae8 --- /dev/null +++ b/merge-queue/integration-for-slack.mdx @@ -0,0 +1,160 @@ +--- +title: "Slack Integration" +description: "Push updates about your queue status to Slack to keep your team informed." +--- +Trunk Merge Queue integrates with Slack to send real-time notifications about queue activity and pull request state changes to a designated channel or directly to you as personal notifications. + +
+ +After you have Merge Queue set up and running in your repository, you can configure Slack notifications to receive alerts for various queue events. + +## Channel Notifications + +Send merge queue updates to a shared team Slack channel to keep everyone informed about queue activity. + +### Enable Merge Queue Notifications + +After you have Merge set up and running in your repository, you can set up your integration with Slack under **Merge Queue** tab **>** repository **> Settings >** **Connect with Slack**. + +
+ +1. Navigate to `Settings > Repositories > [your repository] > Merge Queue` +2. Find the **Connect with Slack** setting and click **Connect** to install the Trunk Slack application +3. Authorize the app to post to your desired channel +4. You'll be redirected back to your settings page once authorization is complete + +### Configuring Channel Notification Preferences + +After connecting to a Slack channel, you can customize which notifications you want to receive. By default, all Merge Queue notifications are enabled. + +
+ +You can toggle individual notification types on or off. See Available Notification Topics below for descriptions of each notification type. + + +**Tip:** Want to receive these notifications as personal DMs instead of in a shared channel? Check out our [Personal Slack Notifications](#personal-slack-notifications) setup guide. + + +## Personal Slack Notifications + +Get direct messages in Slack about your PRs in the merge queue, keeping you informed without adding noise to team channels. + +Personal Slack notifications allow you to receive personalized Slack DMs when your PRs are queued, start testing, pass tests, get merged, or encounter issues. This keeps you up-to-date on the progress of your code through the merge queue without needing to check the web UI or monitor shared channels. + +### Setting up Personal Notifications + +To receive personal Slack notifications, you'll need to connect both your GitHub and Slack accounts to Trunk and configure your notification preferences. + + +**Note:** If your organization isn't already using merge queue Slack notifications to a shared channel, a Slack workspace admin may need to approve the Trunk Slack app before you can connect your personal account. See [Enable Merge Queue Notifications](#enable-merge-queue-notifications) for details on setting up the initial Slack integration. + + +#### **Steps to Enable Personal Notifications** + +1. **Navigate to User Settings** + * Go to **User Settings > Notifications** in the Trunk web app + * These settings are specific to you, so you can customize them however you prefer +2. **Connect GitHub Account** + * Click **Connect GitHub** to begin the OAuth flow + * This verifies that you own your GitHub account and allows Trunk to link your PRs to your user profile +3. **Connect Slack Account** + + * Click **Connect to Slack** to authorize the Trunk Slack app to send you direct messages + + > **Important:** If your organization hasn't already installed the Trunk Slack app for channel notifications, a Slack workspace admin may need to approve the app before you can receive personal notifications. +4. **Configure Notification Preferences** + * Enable the specific notifications you want to receive via Slack DM + * See Available Personal Notification Topics below for descriptions of each notification type +5. **Manage Connections** + * You can disconnect your GitHub or Slack accounts at any time by clicking the respective **Disconnect** buttons in User Settings + + +**Tip:** Want to receive these same notifications in a shared Slack channel instead? Check out our team [Channel Slack Notifications](#channel-notifications) setup guide. + + +
+ +## Slack App Home Dashboard + +The Trunk Slack app's **Home** tab provides a personal merge queue dashboard directly in Slack. Open the Trunk app in Slack and click the **Home** tab to see an overview of your merge queue activity. + +### What you'll see + +The Home tab displays the following sections: + +* **Account connection status** — Shows whether your Trunk and GitHub accounts are linked. If GitHub is not connected, a warning explains that most notifications require it, with a button to start the GitHub OAuth flow. +* **Not Ready PRs** — PRs you've submitted to the queue that are waiting for prerequisites (e.g., passing required checks, no merge conflicts) before entering active testing. Grouped by queue. +* **PRs in Queue** — Your PRs that are currently pending, testing, or have passed tests, with status indicators. Grouped by queue. +* **Recently Merged PRs** — Up to 5 of your most recently merged PRs with merge dates. Grouped by queue. +* **Failed PRs** — Your PRs that failed in the queue. Grouped by queue. +* **Notification Preferences** — Toggle buttons for each notification topic. You can enable or disable individual notifications directly from Slack without visiting the web UI. + +Each PR entry shows the PR title, number, a link to the Trunk dashboard, and a link to the GitHub PR. All data is scoped to your PRs via your linked GitHub account. + +### Linking your account + +1. Open the Trunk app in Slack and go to the **Home** tab +2. Click **Link Account** to connect your Trunk account +3. Click **Connect GitHub** to link your GitHub account (required for PR tracking and most notifications) + +### Managing notification preferences + +You can toggle notification topics on or off directly from the Home tab — no need to visit the Trunk web UI. Changes take effect immediately. The available topics are the same as those listed in [Available Notification Topics](#available-notification-topics). + +Use the **Refresh** button at the top of the Home tab to update the view with the latest queue data. + +## Frequently Asked Questions + +
+ +Do I need both GitHub and Slack connected to receive personal notifications? + +Yes, both connections are required. The GitHub connection links your PRs to your Trunk user account, and the Slack connection enables direct messaging. + +
+ +
+ +What's the difference between personal notifications and channel notifications? + +[Personal notifications](#personal-slack-notifications) are sent directly to you via Slack DM and only include updates about your own PRs. + +[Channel notifications](#channel-notifications) are sent to a shared team channel and include updates about all PRs in the merge queue. + +You can use both simultaneously to stay informed personally while keeping your team updated. Learn more about setting up channel notifications. + +
+ +
+ +Can I customize which notifications I receive? + +Yes, in **Settings** > **Account** > **Notifications**, you can toggle individual notification topics on or off based on your preferences. For example, you might only want to be notified when your PR fails or gets merged, rather than at every stage. + +
+ +
+ +What happens if I disconnect my GitHub or Slack account? + +Disconnecting either account will stop personal Slack notifications. You can reconnect at any time through **Settings** > **Account** > **Notifications**. + +
+ +## Available Notification Topics + +Both channel and personal Slack notifications use the same notification topics. You can customize which events trigger notifications based on your preferences. + +
+ +| Notification | Description | +| --------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Merge is updated | The merge queue's configuration was changed. This covers anything that changes how the queue acts, including: pausing or draining the queue, changing its mode, changing testing concurrency, and so on. | +| Pull request is submitted for merging | A pull request has been [submitted to the queue](/merge-queue/using-the-queue/reference#submitting-and-cancelling-pull-requests) | +| Pull request is admitted to the queue and is waiting to be tested | A pull request has been admitted to the queue and will begin testing as soon as it can | +| Pull request is testing | Trunk merge has begun testing a pull request | +| Pull request has passed tests | Testing has passed on a pull request. The PR will be merged when it reached the top of the queue | +| Pull request is merged | A pull request submitted to the queue has successfully been merged into its target branch | +| Pull request fails | Testing failed on a pull request and it was removed from the queue or Trunk failed to merge the PR into its target branch | +| Pull request is canceled | A pull request has been canceled, either manually or due to it [reaching a configured testing timeout](/merge-queue/administration/advanced-settings#timeout-for-tests-to-complete) | +| Pull request failed and is waiting for PRs in front of it to finish testing |

A pull request failed testing, but the pull request is currently waiting before being kicked. This can happen for one of two reasons:
1. The pull request is not at the head of the queue, so it is waiting to determine if it is the source of the failure or if a PR it depends on is
2. Pending Failure Depth is enabled and the PR is waiting for other PRs below it to finish testing

| diff --git a/merge-queue/merge-queue.mdx b/merge-queue/merge-queue.mdx new file mode 100644 index 0000000..f700e5a --- /dev/null +++ b/merge-queue/merge-queue.mdx @@ -0,0 +1,73 @@ +--- +title: "Overview" +description: "Merge queue that guarantees branch stability and accelerates development at enterprise scale" +--- +If you've hit the limits of GitHub's serial merge queue - main turning red, CI costs spiraling, chaos at scale - Trunk Merge Queue is the enterprise upgrade built for reliability at any scale. Handle your noisiest pipelines, cut CI costs up to 90%, and fire and forget. + +--- + +### Benefits of using Trunk Merge Queue + +Trunk Merge Queue solves three critical problems that break traditional workflows at scale. + +#### #1: Stop main from turning red + +**The problem:** Flaky tests fail unpredictably. Your team mutes tests, locks branches, and gets paged to investigate. + +**How Trunk fixes it:** Failed PRs stay in queue while downstream PRs continue testing. If a later PR that includes the failed code passes, Trunk knows the failure was transient, both PRs merge together. + +**Key capabilities:** + +* Anti-flake protection with optimistic merging +* Pending failure depth prevents cascade failures +* Automatic quarantine of flaky tests + +→ Learn about [anti-flake protection](/merge-queue/optimizations/anti-flake-protection) + +--- + +#### #2: Stop CI costs from spiraling + +**The problem:** GitHub runs full CI for every PR. 50 PRs/day = 50 full runs. With growing teams, CI costs become seven figures. + +**How Trunk fixes it:** Intelligent batching tests up to 100 PRs in a single CI run. When a batch fails, automatic bisection isolates the culprit without ejecting the entire batch or requiring manual debugging. + +**Key capabilities:** + +* Intelligent Batching +* Batch up to 100 PRs +* Auto-Bisection +* Configurable batch size & wait time + +→ See how [batching](/merge-queue/optimizations/batching) works + +--- + +#### #3: Stop waiting in a serial queue + +**The problem:** Single-track queue means your 2-line fix waits 45 minutes behind a slow feature PR testing an unrelated part of the codebase. + +**How Trunk fixes it:** Parallel queues create independent test lanes for non-overlapping changes. Frontend merges in Lane A while backend runs in Lane B. Native Bazel/Nx integration analyzes impacted targets automatically. + +**Key capabilities:** + +* Parallel Queues +* Bazel/Nx integration +* Impacted targets analysis +* Priority merging + +→ Explore [parallel queues](/merge-queue/optimizations/parallel-queues) + +--- + +### Try Trunk Merge Queue + +**Start with free trial:** + +1. Install Trunk GitHub App (5 minutes) +2. Create your first queue (2 minutes) +3. Submit a test PR + +**Total setup time: < 10 minutes** + +→ [Get started](/merge-queue/getting-started) diff --git a/merge-queue/migrating-from-github-merge-queue.mdx b/merge-queue/migrating-from-github-merge-queue.mdx new file mode 100644 index 0000000..0c717ef --- /dev/null +++ b/merge-queue/migrating-from-github-merge-queue.mdx @@ -0,0 +1,57 @@ +--- +title: "Migrate from GitHub Merge Queue" +description: "For teams switching from GitHub Merge Queues to Trunk Merge Queue, the process is straight forward." +--- +For teams switching from [GitHub Merge Queues](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/incorporating-changes-from-a-pull-request/merging-a-pull-request-with-a-merge-queue) to Trunk Merge Queue, the process is straight forward. + + +Looking for a more detailed comparison between Trunk and GitHub's Merge Queues? [Learn more](https://trunk.io/trunk-vs-github-merge-queue) + + +--- + +### Turn off GitHub Merge Queue + +To start, you will need to disable the exising merge queue for the target repository. This can be done by navigating to the repository and opening **Settings > Branches >** branch rule **>** toggle **off Require merge queue.** Be sure to click **Save changes** to confirm the settings. + + +Note that only users with admin permissions can manage merge queues for pull requests targeting selected branches of a repository. More information on [manage merge queues](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/managing-protected-branches/managing-a-branch-protection-rule#creating-a-branch-protection-rule) can be found in the GitHub documentation. + + +--- + +### Enable Trunk Merge Queue + +Follow the [Getting Started](/merge-queue/getting-started) to setup your repo with Trunk Merge Queue and configure the [settings](/merge-queue/administration/advanced-settings) for your repository. + +--- + +### Pre migration + +Before migrating fully, it may be useful to evaluate the workflows "quiety" and confirm settings before converting your repository to an entirely new workflow. + +Here are some useful steps to get you familiar with the Trunk Merge Queue workflow without disrupting engineers. + +#### Enable Trunk Merge for testing but with the automatic comments disabled + +While evaluating and testing Trunk Merge Queue for your team, we suggest disabling automatic comments on PRs. This can be done by toggling off GitHub Comments in the Trunk web app. + +This setting is found under **Merge Queue** tab **>** repository **> Settings >** toggle **off GitHub Comments.** + +#### Make the switch + +Once you have [configured settings](/merge-queue/administration/advanced-settings) and tested out the workflow quietly, turn off other merge tools (like [GitHub merge queue](#turn-off-github-merge-queue)), re-enable GitHub comments in the Trunk web app under the **Merge Queue** tab **>** repository **> Settings >** toggle **on GitHub Comments** + + +It is important that a repository is configured to use ONLY Trunk Merge Queue and no other merge queue tools for best results. + + +#### Share the news + +Now that you have migrated to Trunk Merge Queue, be sure to share the workflow with your team, [using-the-queue](/merge-queue/using-the-queue "mention")as a great place to start. + +--- + +### Getting help + +If you or your team are running into issues be sure to join the [Trunk Slack community](https://slack.trunk.io/) for assistance. diff --git a/merge-queue/optimizations.mdx b/merge-queue/optimizations.mdx new file mode 100644 index 0000000..1a97472 --- /dev/null +++ b/merge-queue/optimizations.mdx @@ -0,0 +1,33 @@ +--- +title: "Optimizations" +description: "The core concept of any merge queue is Predictive Testing: testing your pull request against the head of the main branch, including all pull requests ahead of it in the queue." +--- +The core concept of any merge queue is [**Predictive Testing**](/merge-queue/optimizations/predictive-testing): testing your pull request against the head of the `main` branch, including all pull requests ahead of it in the queue. + +While this is the foundation, achieving the scale necessary to merge thousands of PRs per day requires more advanced strategies. Trunk Merge Queue introduces a suite of powerful concepts designed to maximize throughput and maintain velocity, even in complex, high-traffic repositories. In fact, hitting a high scale is nearly impossible without leveraging features like optimistic merging, pending failure depth, and batching. + +This section explains each of these key concepts: + +#### Throughput and speed + +* [**Batching**](/merge-queue/optimizations/batching): Groups multiple compatible pull requests together into a single test run. This significantly increases merge throughput and can dramatically reduce CI costs by validating an entire batch with a single test run instead of one for each individual pull request. It is an essential feature for achieving high throughput. +* [**Parallel Queues**](/merge-queue/optimizations/parallel-queues): Allows for the creation of multiple independent queues that test and merge PRs in parallel. This feature is necessary for large monorepos and transforms the queue from a simple "line" into a more complex and efficient "graph". +* [**Testing Concurrency**](/merge-queue/administration/advanced-settings#testing-concurrency): A setting that defines the maximum number of pull requests that can be tested simultaneously. Fine-tuning this number is a powerful way to maximize merge velocity. It ensures a continuous flow of validated pull requests by keeping your CI runners fully utilized. + +#### Resilience and flake handling + +* [**Optimistic Merging**](/merge-queue/optimizations/optimistic-merging): Increases merge speed by leveraging test results from pull requests that are later in the queue. When a pull request (e.g., pull request 'c') passes testing, its success also verifies the changes from the pull requests ahead of it ('a' and 'b'). This allows the entire group of pull requests to be safely merged at once. +* [**Pending Failure Depth**](/merge-queue/optimizations/pending-failure-depth): Allows the queue to continue testing subsequent pull requests even if an earlier one fails. Because predictive testing re-tests the failed PR's code along with the subsequent PRs, this feature gives the failed PR additional chances to pass. This prevents a single flaky test from halting all forward progress and makes the queue more resilient to intermittent failures. +* [**Anti-Flake Protection**](/merge-queue/optimizations/anti-flake-protection): Combining Optimistic Merging and Pending Failure Depth makes the queue more resilient to flaky tests. This inherent outcome allows the successful test of a later pull request to retroactively validate an earlier one that failed due to a transient issue. + + +**Note on flaky tests** + +While Anti-Flake Protection provides resilience to flaky tests through queue mechanics, they still delay merges. Trunk Flaky Tests addresses the root cause by automatically [detecting](/flaky-tests/detection) and [quarantining](/flaky-tests/quarantining) flaky tests at runtime while maintaining test visibility. For maximum throughput, [integrate Flaky Tests](/flaky-tests/get-started) to work alongside Anti-Flake Protection. + + +* [**Flaky Tests Quarantining**](/flaky-tests/quarantining) (via [Flaky Tests](/flaky-tests/overview)): Automatically detects and quarantines flaky tests to prevent their failures from blocking the merge queue. Quarantined tests continue running and uploading results for visibility, allowing your team to identify and fix them while eliminating false-negative blockages. This foundation of clean test signals is essential for achieving maximum queue throughput. + +#### Prioritization + +* [**Priority Merging**](/merge-queue/optimizations/priority-merging): Provides the ability to prioritize certain pull requests, allowing urgent changes or hotfixes to bypass the standard queue order and be tested and merged more quickly. diff --git a/merge-queue/optimizations/anti-flake-protection.mdx b/merge-queue/optimizations/anti-flake-protection.mdx new file mode 100644 index 0000000..ae92f4d --- /dev/null +++ b/merge-queue/optimizations/anti-flake-protection.mdx @@ -0,0 +1,111 @@ +--- +title: "Anti-flake protection" +description: "Some CI jobs fail for reasons unrelated to a PR's code change, such as due to flaky tests or a CI runner disconnecting. These failures are usually cleared when the CI job is rerun." +--- +### What it is + +Some CI jobs fail for reasons unrelated to a PR's code change, such as due to [flaky tests](https://trunk.io/blog/the-ultimate-guide-to-flaky-tests) or a CI runner disconnecting. These failures are usually cleared when the CI job is rerun. If a second PR that depends on the first **does** pass, it is very likely that the first PR was good and simply experienced a transient failure. + +Trunk Merge Queue can use the combination of [**Optimistic Merging** ](/merge-queue/optimizations/optimistic-merging)and [**Pending Failure Depth**](/merge-queue/optimizations/pending-failure-depth) to merge pull requests that would otherwise be rejected from the queue. + + +If you have a lot of flaky tests in your projects, you should track and fix them with [Trunk Flaky Tests](/flaky-tests/overview). Anti-flake protection helps reduce the impact of flaky tests but doesn't help you detect, track, and eliminate them. + + +In the video below, you can see an example of this anti-flake protection: + + +Anti-flake protection with optimistic merging + pending failure depth + + +
what's happening?queue
A, B, C begin predictive testingmain <- A <- B+a <- C+ba
B fails testingmain <- A <- B+a <- C+ba
predictive failure depth keeps B from being evicted while C testsmain <- A <- B+a (hold) <- C+ba
C passesmain <- A <- B+a <- C+ba
optimistic merging allows A, B, C to mergemerge A B C
+ + +Optimistic Merging only works when the [Pending Failure Depth](#pending-failure-depth) is set to **a value greater than zero**. When zero or disabled, Merge will not hold any failed tests in the queue. + + +### Why use it + +* **Eliminate false negatives** - Flaky tests cause 20-40% of PR failures in typical pipelines. Anti-flake protection helps get these under control, so developers don't waste time investigating non-issues. +* **Maintain developer confidence** - When the queue rejects PRs for real reasons (not flaky tests), developers trust the system. Reduces "it's probably just flaky" dismissiveness of real failures. +* **Reduce manual retries** - Developers don't need to manually resubmit PRs or click "retry" when tests flake. Trunk handles it automatically, saving time and frustration. +* **Keep queue moving** - Flaky tests don't stall the queue. PRs that would have been blocked by transient failures merge successfully, increasing overall throughput. + +### How to enable + + +Anti Flake Protection is active when [**Optimistic Merge Queue**](/merge-queue/optimizations/optimistic-merging) is **On** and [**Pending Failure Depth**](/merge-queue/optimizations/pending-failure-depth) is **set to a value greater than zero** + + +Enable Optimistic merging in **Settings** > **Repositories** > your repository > **Merge Queue** > toggle **On** **Optimistic Merge Queue**. + +Configure Pending Failure Depth in **Settings** > **Repositories** > your repository > **Merge Queue** > select a value from the **Pending Failure Depth** dropdown. + +### Tradeoffs and considerations + +#### What you gain + +* **80-90% reduction in flaky test blocks** - Most flaky failures are caught and handled automatically +* **Developer time saved** - No manual retries or investigation of flaky failures +* **Higher queue throughput** - Flaky tests don't stall the queue +* **Better developer experience** - Less frustration with non-deterministic failures + +#### What you give up or risk + +* **Increased CI cost** - Retrying tests costs additional CI resources (typically 10-20% increase) +* **Slightly longer merge times** - PRs that fail then retry take longer than PRs that pass first time +* **Potential false positives** - Occasionally a legitimate failure might be retried (though Trunk is conservative) +* **Masks underlying problems** - Flaky tests indicate test quality issues; retrying treats symptom, not cause + +#### When NOT to use anti-flake protection + +Don't enable anti-flake protection if: + +* **Your tests are not flaky (< 2% flake rate)** - No benefit, only cost +* **CI resources are extremely limited** - Retries double test costs for flaky PRs +* **You're actively fixing flaky tests** - Better to fix than to mask +* **Flaky tests indicate real issues** - Sometimes "flaky" failures reveal race conditions or timing issues in your code + +#### When to use anti-flake protection + +Do enable anti-flake protection when: + +* **Flaky tests are blocking PRs (5-15% flake rate)** - Clear benefit outweighs cost +* **Fixing flaky tests will take time** - Use this as interim solution while improving test quality +* **Infrastructure flakiness** - Network timeouts, resource contention you can't control +* **Third-party dependencies are flaky** - External APIs or services cause transient failures + +#### The right long-term solution + + +️ **Anti-flake protection is a band-aid, not a cure.** + + +**The right approach:** + +1. **Enable anti-flake protection** - Unblock your team immediately +2. **Identify flaky tests** - Use CI analytics to find which tests flake most +3. **Fix the root causes** - Make tests deterministic, add retries at test level, improve infrastructure +4. **Reduce flake rate over time** - Goal should be < 2% flake rate +5. **Consider disabling** - Once tests are stable, anti-flake protection becomes unnecessary + +**Red flags indicating systemic issues:** + +* Flake rate > 20% (your tests are broken) +* Same tests flake repeatedly (specific tests need fixing) +* All flakes are in one area (infrastructure or test framework issue) + +#### Common misconceptions + +* **Misconception:** "Anti-flake protection lets me ignore flaky tests" + * **Reality:** NO! This is a temporary solution. Flaky tests are a code/test quality problem that must be fixed. Anti-flake protection buys you time to fix them properly. +* **Misconception:** "It retries all failures automatically" + * **Reality:** Trunk is selective. Only failures that match flaky patterns are retried. Legitimate failures still block PRs immediately. +* **Misconception:** "Anti-flake protection wastes tons of CI resources" + * **Reality:** Typical cost increase is 10-20% for teams with moderate flake rates. This is far less than the developer time wasted investigating flaky failures. +* **Misconception:** "I should set retry limit to 10 to catch all flakes" + * **Reality:** If you need 10 retries, your tests are catastrophically broken. Fix the tests! Retry limit should be 1-3 max. + +### Next Steps + +If you have a lot of flaky tests in your projects, you should track and fix them with [Trunk Flaky Tests](/flaky-tests/overview). Anti-flake protection helps reduce the impact of flaky tests but doesn't help you detect, track, and eliminate them. diff --git a/merge-queue/optimizations/batching.mdx b/merge-queue/optimizations/batching.mdx new file mode 100644 index 0000000..7c336bf --- /dev/null +++ b/merge-queue/optimizations/batching.mdx @@ -0,0 +1,390 @@ +--- +title: "Batching" +description: "Batching allows Trunk Merge Queue to test multiple pull requests together as a single unit, rather than testing them one at a time." +--- +### What it is + +Batching allows Trunk Merge Queue to test multiple pull requests together as a single unit, rather than testing them one at a time. + +When batching is enabled, Trunk intelligently groups compatible PRs and runs your test suite once for the entire batch. If the batch passes, all PRs in the batch merge together, dramatically reducing total test time. + +### Why use it + +* **Reduce total test time by 60-80%** - Instead of running your full test suite 10 times for 10 PRs, you run it 2-3 times for the same PRs grouped into batches. More PRs merged with less CI time. +* **Increase merge throughput** - Process 3-5x more PRs per hour compared to testing individually. A queue that handled 20 PRs/hour can now handle 60-100 PRs/hour with batching. +* **Lower CI costs** - Fewer test runs means lower CI/CD infrastructure costs. Teams report 50-70% reduction in CI minutes consumed by merge queue testing. +* **Faster time-to-production** - PRs spend less time waiting in queue. What used to take hours can now take minutes, getting features and fixes to production faster. + +### How to enable + + +Batching is **disabled by default** and must be explicitly enabled. + + +Batching is enabled in the Merge Settings of your repo at **Settings** > **Repositories** > your repository > **Merge Queue** > **Batching** and toggle batching **On**. + +#### Configuration options + +With Batching enabled, you can configure two options: + +* **Maximum wait time** - The maximum amount of time the Merge Queue should wait to fill the target batch size before beginning testing. A higher maximum wait time will cause the Time-In-Queue metric to increase but have the net effect of reducing CI costs per pull request. +* **Target batch size** - The largest number of entries in the queue that will be tested in a single batch. A larger target batch size will help reduce CI cost per pull request but require more work to be performed when progressive failures necessitate bisection. + + +A good place to start is with the defaults, Maximum wait time set to 5 (minutes) and Target batch size set to 4 (PRs). + + +### Excluding PRs from Batching + +Sometimes you need a specific PR to test in isolation, even when batching is enabled for your queue. You can prevent individual PRs from batching without changing your overall batching configuration. + +#### When to use this + +* **High-risk changes** — Infrastructure updates, database migrations, or changes that could affect other PRs in unpredictable ways +* **Debugging batch failures** — Isolate a suspected problematic PR to confirm it tests correctly on its own +* **Critical hotfixes** — Ensure a time-sensitive fix isn't delayed or affected by other PRs in a batch +* **Flaky PR isolation** — Test a PR with known flaky behavior separately to avoid impacting other PRs + +#### How to exclude a PR from batching + +**Option 1: Using the `/trunk merge` command** + +Add the `--no-batch` flag when submitting your PR: + +``` +/trunk merge --no-batch +``` + +**Option 2: Using the API** + +Set `noBatch: true` when calling the [`/submitPullRequest`](/merge-queue/reference/merge#post-submitpullrequest) endpoint: + +```bash +curl -X POST https://api.trunk.io/v1/submitPullRequest \ + -H "Content-Type: application/json" \ + -H "x-api-token: $TRUNK_API_TOKEN" \ + -d '{ + "repo": { + "host": "github.com", + "owner": "my-org", + "name": "my-repo" + }, + "targetBranch": "main", + "pr": { + "number": 123 + }, + "noBatch": true + }' +``` + +#### How it works + +When a PR is submitted with no-batch: + +* **Queue position is unchanged** — The PR maintains its position in the queue based on when it was submitted +* **No restarts triggered** — Submitting a no-batch PR doesn't restart testing for other PRs already in the queue +* **Tests in isolation** — The PR is guaranteed to test by itself, not grouped with other PRs +* **Other PRs unaffected** — Batching continues normally for all other PRs in the queue + + +Excluding a PR from batching only affects that specific PR. Your queue's batching settings and other PRs remain unaffected. + + +### Bisection Testing Concurrency + +When a batch fails, Trunk automatically splits it apart (bisects) to identify which PR caused the failure. You can configure a separate, higher concurrency limit specifically for these bisection tests to isolate failures faster without impacting your main queue. + +
+ +#### Why Separate Bisection Concurrency? + +By default, bisection tests use the same concurrency limit as your main queue. This means: + +* Bisection can slow down other PRs waiting to merge +* Developers wait longer to learn which PR broke the batch +* Your main queue's throughput decreases during failure investigation + +With independent bisection concurrency, you can: + +* **Speed up failure isolation** - Run bisection tests at higher concurrency to identify problems faster +* **Maintain queue throughput** - Keep your main queue running at optimal capacity during bisection +* **Optimize each workflow independently** - Be aggressive about isolating failures without impacting successful PR flow + +#### How It Works + +When you set a higher bisection concurrency: + +1. **Main queue concurrency** controls how many PRs test simultaneously in the normal queue +2. **Bisection concurrency** controls how many PRs test simultaneously during failure isolation +3. Both run independently - bisection tests don't count against your main queue limit + +
+ +Example scenario: + +* Main queue concurrency: 5 +* Bisection concurrency: 15 +* Batch `ABCD` fails and needs to be split + +The bisection process can spin up 15 test runners to quickly isolate which PR failed, while your main queue continues processing 5 PRs normally. Developers get faster feedback about failures without slowing down successful merges. + +
+ +#### Configuring Bisection Concurrency + +Navigate to **Settings** > **Repositories** > your repository > **Merge Queue** > **Batching**: + +1. Enable **Batching** (if not already enabled) +2. Find the **Bisection Testing Concurrency** setting +3. Set a value higher than your main **Testing Concurrency** for faster failure isolation +4. Monitor your CI resource usage and adjust as needed + +#### Recommended Settings + + + + + +* Main queue concurrency: 5 +* Bisection concurrency: 10 +* Good for: Teams managing CI costs carefully + + + + + +* Main queue concurrency: 10 +* Bisection concurrency: 25 +* Good for: Teams with moderate CI capacity + + + + + +* Main queue concurrency: 25 +* Bisection concurrency: 50 +* Good for: Teams prioritizing fast feedback over CI costs + + + + + +#### When to Use Higher Bisection Concurrency + +Consider increasing bisection concurrency if: + +* Developers frequently wait for bisection results to know what to fix +* Your CI system has spare capacity during failure investigation +* Large batches fail and take a long time to isolate the culprit +* Fast feedback on failures is critical to your workflow + +#### Monitoring and Optimization + +Track these metrics to optimize your bisection concurrency: + +* **Time to isolate failures** - How long it takes to identify which PR broke a batch +* **CI resource usage during bisection** - Are you maxing out your runners? +* **Developer wait time** - How long developers wait for failure feedback +* **Main queue throughput during bisection** - Is bisection slowing down other PRs? + + +Start with bisection concurrency 2x your main queue concurrency, monitor the impact, and adjust based on your team's priorities and CI capacity. + + +#### Best Practices + +✅ **Set bisection concurrency higher than main queue** - This is the whole point of the feature + +✅ **Monitor CI costs** - Higher bisection concurrency means more runners during failures + +✅ **Start conservative** - Begin with 2x main concurrency and increase gradually + +✅ **Combine with other optimizations** - Works best alongside Pending Failure Depth and Anti-flake Protection + +❌ **Don't set too high** - Extremely high bisection concurrency can overwhelm CI systems + +❌ **Don't set lower than main queue** - This defeats the purpose and slows down bisection + +### Test Caching During Bisection + +When a batch fails and Trunk splits it apart to identify the failing PR, the merge queue intelligently reuses test results it has already collected during the bisection process. This avoids redundant CI runs and speeds up failure isolation. + +#### How It Works + +During bisection, Trunk maintains a cache of test results as it progressively splits the failed batch. If the queue knows with certainty that a particular combination of PRs will fail (because it already tested that exact combination earlier in the bisection process), it skips running the test again and reuses the previous result. + +
+ +Example bisection with test caching + +1. Batch `ABCD` fails testing (main ← ABCD) +2. Trunk splits the batch: `AB` and `CD` +3. Tests `AB` (passes) and `CD` (fails) +4. Now Trunk needs to split `CD` further: `C` and `D` +5. Before testing, Trunk checks: "Have I already tested `C` or `D` individually?" +6. If `main ← ABCD` failed and `main ← AB` passed, Trunk knows `CD` contains the failure +7. When testing `main ← AB ← C`, if this combination was already tested earlier, reuse that result +8. Skip redundant CI runs and identify the failing PR faster + +
+ +#### Benefits + +**Faster failure isolation**: Skip tests you've already run during bisection, reducing time to identify the culprit PR + +**Significant CI cost savings**: Especially important for large batches or expensive test suites where redundant tests would waste substantial resources + +**Quicker developer feedback**: Developers learn which PR broke the batch sooner, allowing them to fix issues faster + +**Automatic optimization**: No configuration required - the merge queue automatically detects and reuses applicable test results + +#### When Test Caching Applies + +Test caching only applies during the bisection process when: + +1. **Batching is enabled** - This is a batching-specific optimization +2. **A batch has failed** and is being split to identify the failure +3. **The merge queue has already tested** a specific combination of PRs during the current bisection +4. **The test result is definitive** - The queue has high confidence the result would be the same + +Test caching does **not** apply to: + +* Initial batch testing (before any failures) +* PRs in the main queue that aren't undergoing bisection +* Tests that haven't been run yet in the current bisection process + +#### Example Scenario + +**Without test caching:** + +* Batch `ABCDEF` (6 PRs) fails +* First bisection: Test `ABC` and `DEF` (2 CI runs) +* `DEF` fails, need to split further +* Second bisection: Test `DE` and `F` (2 CI runs) +* `DE` fails, need to split further +* Third bisection: Test `D` and `E` (2 CI runs) +* Total: 6 CI runs to isolate the failure + +**With test caching:** + +* Batch `ABCDEF` fails - we know `ABCDEF` combination fails +* First bisection: Test `ABC` (passes) and identify `DEF` fails (no new test needed - we know from original batch) +* Second bisection: Test `DE` - if we've already tested this combination, reuse result +* Third bisection: Test `D` or `E` - reuse any already-known results +* Total: 2-4 CI runs instead of 6 + +The exact savings depend on your batch size, bisection pattern, and which combinations have already been tested. + +#### Best Practices + +✅ **Use with larger batch sizes** - More PRs in a batch means more opportunities to cache results + +✅ **Combine with bisection concurrency** - Fast bisection + test caching = maximum efficiency + +✅ **Enable batching** - This feature only works when batching is enabled + +✅ **Monitor your metrics** - Track CI spend and bisection time to see the impact + +❌ **Don't try to configure it** - Test caching is automatic and always enabled when batching + +❌ **Don't rely on it for flaky tests** - Caching assumes consistent test behavior; flaky tests may bypass caching for safety + +#### How This Works with Other Features + +Test caching complements other batching optimizations: + +* **Bisection Testing Concurrency** - Run bisection tests faster AND skip redundant ones +* **Pending Failure Depth** - Keep more PRs in queue during failure recovery +* **Optimistic Merging** - Merge successful batches while bisection runs in background + +Together, these features create a highly efficient batch failure recovery system that minimizes both time and CI cost. + + +**Note:** Test caching for batch failure isolation is automatically enabled for all repositories using batching mode. No configuration is required. + + +### Fine tuning batch sizes + +**Signs your batch size is too large:** + +* Batches frequently fail and need to be split +* Long wait times to form full batches +* Test suite times out or becomes unstable + +**Signs your batch size is too small:** + +* Not seeing significant throughput improvement +* Batches form immediately (could handle more PRs) +* Still consuming lots of CI resources + +**Optimal batch size depends on:** + +* Test suite speed (faster tests = larger batches) +* Test stability (more flaky tests = smaller batches) +* PR submission rate (more PRs = larger batches) + +### Tradeoffs and considerations + +The downsides here are very limited. Since batching combines multiple pull requests into one, you essentially give up the proof that every pull request in complete isolation can safely be merged into your protected branch. + +In the unlikely case that you have to revert a change from your protected branch or do a rollback, you will need to retest that revert or submit it to the queue to ensure nothing has broken. In practice, this re-testing is required in almost any case, regardless of how it was originally merged, and the downsides are fairly limited. + +#### Common misconceptions + +* **Misconception:** "Batching merges multiple PRs into a single commit" + * **Reality:** No! Each PR is still merged as a separate commit. Batching only affects testing, not merging. +* **Misconception:** "If a batch fails, all PRs in the batch fail" + * **Reality:** Trunk automatically splits the batch and retests to identify only the failing PR(s). Passing PRs still merge. +* **Misconception:** "Batching always makes the queue faster" + * **Reality:** Batching is most effective with stable tests and high PR volume. For low-traffic repos or flaky tests, the overhead may outweigh benefits. + +### Related features + +Batching works exceptionally well with these optimizations: + +**Predictive testing** - Batching builds on predictive testing. Batches are tested against the projected future state of main, just like individual PRs. These features complement each other perfectly. + +**Optimistic merging** - While a batch is testing, the next batch can begin forming and testing optimistically. Combining batching with optimistic merging provides maximum throughput. Configure both for best results. + +**Pending failure depth** - When a batch fails and is being split/retested, pending failure depth controls how many other PRs can test simultaneously. Higher pending failure depth helps maintain throughput during batch failures. + +**Anti-flake protection** - Essential companion to batching. Reduces false batch failures caused by flaky tests, making batching more reliable and efficient. + +### Batching + Optimistic Merging and Pending Failure Depth + +Enabling batching along with Pending Failure Depth and Optimistic Merging can help you realize the major cost savings of batching while still reaping the [anti-flake](/merge-queue/optimizations/anti-flake-protection) protection of optimistic merging and pending failure depth. + + +example of testing pull requests in batches of 3 + + +
eventqueue
Enqueue A, B, C, D, E, F, Gmain <- ABC <- DEF +abc
Batch ABC failsmain <- ABC
pending failure depth keeps ABC from being evicted while DEFmain <- ABC (hold) <- DEF+abc
DEF passesmain <- ABC <- DEF+abc
optimistic merging allows ABC and DEF to mergemerge ABC, DEF
+ +Combined, Pending Failure Depth, Optimistic Merging, and Batching can greatly improve your CI performance because now Merge can optimistically merge whole batches of PRs with far less wasted testing. + +### Next steps + +**Start with batching:** + +1. Enable batching with conservative settings (batch size: 3-5) +2. Monitor for a few days and observe behavior +3. Gradually increase batch size as you gain confidence +4. Check [Metrics and monitoring](/merge-queue/administration/metrics) to measure impact + +**Optimize further:** + +* [Optimistic merging](/merge-queue/optimizations/optimistic-merging) - Combine with batching for maximum throughput +* [Anti-flake protection](/merge-queue/optimizations/anti-flake-protection) - Reduce false batch failures +* [Pending failure depth](/merge-queue/optimizations/pending-failure-depth) - Tune behavior during batch failures + +**Monitor performance:** + +* [Metrics and monitoring](/merge-queue/administration/metrics) - Track throughput improvements and CI cost savings +* Watch batch failure rate (should be <10%) +* Measure time-to-merge improvements + +**Troubleshoot issues:** + +* If batches fail frequently → Lower batch size or enable [Anti-flake protection](/merge-queue/optimizations/anti-flake-protection) +* If not seeing improvements → Check PR volume and test stability +* For detailed help → [Troubleshooting](/merge-queue/reference/troubleshooting) diff --git a/merge-queue/optimizations/direct-merge-to-main.mdx b/merge-queue/optimizations/direct-merge-to-main.mdx new file mode 100644 index 0000000..202d326 --- /dev/null +++ b/merge-queue/optimizations/direct-merge-to-main.mdx @@ -0,0 +1,194 @@ +--- +title: "Direct merge to main" +description: "Direct Merge to Main is an optimization that allows PRs to merge immediately without waiting in the queue when retesting would provide no value." +--- +### Overview + +Direct Merge to Main is an optimization that allows PRs to merge immediately without waiting in the queue when retesting would provide no value. + +The merge queue's purpose is to test your PR against the latest version of main and all PRs ahead of it in the queue. However, if your PR is already based on the tip of main AND the queue is empty, running tests again provides no additional confidence—you've already tested against the exact state your PR will merge into. + +With Direct Merge to Main enabled, Trunk recognizes this situation and merges your PR immediately, skipping the redundant test run and eliminating unnecessary wait time. + +#### How It Works + +**Without Direct Merge to Main:** + +1. PR enters the queue based on tip of main +2. Queue creates a test branch +3. Tests run (even though they just passed on the same code) +4. After tests pass, PR merges +5. Total time: Test duration + queue overhead + +**With Direct Merge to Main:** + +1. PR enters the queue based on tip of main +2. Queue recognizes: PR is up-to-date AND queue is empty +3. PR merges immediately +4. Total time: \~seconds + +#### When Direct Merge Happens + +Direct Merge to Main only activates when **ALL** of these conditions are met: + +✅ **PR is based on the tip of main** - The PR's base commit matches the current HEAD of your main branch + +✅ **Queue is empty** - No other PRs are currently in the queue waiting to test or merge + +✅ **PR's tests have passed** - The PR's CI checks passed on GitHub (before entering the queue) + +✅ **Direct Merge is enabled** - The setting is turned on in your merge queue configuration + +If any of these conditions are not met, the PR enters the queue normally and tests predictively as usual. + +
+ +Example Scenarios + +**Scenario 1: Perfect candidate for Direct Merge** + +* Developer updates their PR to tip of main using "Update branch" on GitHub +* All CI checks pass on the PR +* Developer submits to merge queue +* Queue is currently empty +* **Result:** PR merges immediately (seconds instead of minutes) + +**Scenario 2: PR not up-to-date** + +* PR was created yesterday and main has advanced +* Developer submits to merge queue +* Queue is empty +* **Result:** PR enters queue normally, tests against current main + +**Scenario 3: Queue has other PRs** + +* PR is based on tip of main +* Another PR is already in the queue +* **Result:** PR enters queue normally behind existing PR, tests predictively + +**Scenario 4: Tests haven't passed yet** + +* PR is based on tip of main +* Queue is empty +* But CI checks are still running or failed +* **Result:** PR cannot enter queue until checks pass + +
+ +### When to Enable + +**Enable Direct Merge to Main if:** + +* You enforce "branch must be up-to-date with main" GitHub protection +* Developers frequently update PRs to latest main before merging +* Your test suite takes 5+ minutes to run +* You have good test coverage and trust your main branch tests + +**Don't enable if:** + +* You rarely keep PRs up-to-date with main (feature won't trigger often) +* You want every PR to test in the queue regardless (for additional validation) +* Your tests are very fast (< 1 minute) and the optimization is negligible + +### Configuration + +#### Enable Direct Merge to Main + +1. Navigate to **Settings** > **Repositories** > your repository > **Merge Queue** +2. Locate the **Direct Merge to Main** toggle +3. Enable the setting +4. Changes take effect immediately + +
+ +#### Verify It's Working + +When a PR is directly merged, you'll see different timeline messages and notifications: + +**In Trunk Dashboard:** + +> "Merged to main without going through the queue, as it was up-to-date with main and the queue was empty" + +**In GitHub comments:** + +> "This PR was merged directly to main because it was already up-to-date and the queue was empty." + +**In** [**Slack notifications**](/merge-queue/integration-for-slack) **(if configured):** + +> "✅ PR #123 merged directly (was up-to-date, queue empty)" + +These messages confirm that the optimization triggered and your PR skipped the queue. + +### How This Works with Other Features + +Direct Merge to Main complements other optimizations: + +[**Predictive Testing**](/merge-queue/optimizations/predictive-testing) + +* When direct merge doesn't trigger, predictive testing takes over +* PRs not at tip of main test against predicted future state +* Both features work together seamlessly + +[**Optimistic Merging**](/merge-queue/optimizations/optimistic-merging) + +* Optimistic merging handles PRs deeper in queue +* Direct merge handles the special case at the front +* Both reduce unnecessary waiting + +[**Batching**](/merge-queue/optimizations/batching) + +* If queue has batching enabled and isn't empty, direct merge won't trigger +* Batching takes priority when multiple PRs are present +* Direct merge is for the empty queue case + +[**Parallel Queues**](/merge-queue/optimizations/parallel-queues) + +* Works in both Single and Parallel mode +* In parallel mode, checks if PR's specific lane is empty +* Provides benefit across all queue configurations + +### Troubleshooting + +
+ +Why didn't my PR merge directly? + +Check these conditions: + +1. Was your PR based on the tip of main? (Check GitHub branch status) +2. Was the queue completely empty when you submitted? (Check queue dashboard) +3. Had your PR's tests passed? (Check GitHub status checks) +4. Is Direct Merge to Main enabled? (Check Merge Queue settings) + +If all conditions were met but direct merge didn't happen, contact support with the PR number. + +
+ +
+ +Does this bypass security checks? + +No. Direct merge only skips the queue testing step. Your PR must still: + +* Pass all required status checks on GitHub +* Meet all branch protection requirements +* Have the necessary approvals +* Be based on the latest main branch + +
+ +
+ +Will this slow down other PRs? + +No. Direct merge only happens when the queue is empty, so there are no other PRs to slow down. When other PRs are present, direct merge doesn't trigger and the queue operates normally. + +
+ +
+ +What if tests are flaky? + +Direct merge relies on the tests that ran on your PR branch (before entering the queue). If those tests are flaky and gave a false positive, the issue existed before direct merge. Focus on fixing flaky tests rather than disabling the optimization. + +
diff --git a/merge-queue/optimizations/optimistic-merging.mdx b/merge-queue/optimizations/optimistic-merging.mdx new file mode 100644 index 0000000..c7beaf3 --- /dev/null +++ b/merge-queue/optimizations/optimistic-merging.mdx @@ -0,0 +1,115 @@ +--- +title: "Optimistic merging" +description: "Optimistic merging allows pull requests that fail tests to still get merged if pull requests behind them in the queue pass their tests. The assumption is that the queue has proof t" +--- +### What it is + +Optimistic merging allows pull requests that fail tests to still get merged if pull requests behind them in the queue pass *their* tests. The assumption is that the queue has proof that while one specific PR might fail tests, it passes them when combined with a pull request that is going to merge soon behind it. + +The foundation of our merge queue starts with [predictive testing](/merge-queue/optimizations/predictive-testing). When a predictive test is being run, concurrent tests sometimes finish before the work ahead of it. This creates a situation where the system knows that all code ahead of it collectively `passes` tests, and it is safe to merge all those changes into your protected branch (`main)`.\ +\ +With optimistic merging enabled, we can leverage results from pull requests later in the queue to merge faster. In the illustration below you can see that pull request 'c' includes the verified testing results of pull requests 'b' and 'a'. As soon as 'c' passes testing, we can safely merge 'a', 'b', and 'c' and know they will all work correctly together. + + +Optimistic merging to merge faster + + +### Why use it + +* **Eliminate idle time** - The queue doesn't sit idle waiting for merges to complete. As soon as a PR enters the "merging" phase, the next PR begins testing. Result: 20-30% reduction in average PR wait time. +* **Increase throughput** - More PRs can be in-flight simultaneously. Queues using optimistic merging process 1.5-2x more PRs per hour compared to sequential testing. +* **Faster time-to-production** - PRs merge faster because they don't wait for the previous PR to fully complete. What used to take 30 minutes might now take 20 minutes. +* **Better resource utilization** - Your CI infrastructure isn't sitting idle between tests. Continuous testing means more efficient use of your CI capacity. + +### How to enable + + +Optimistic merging is **disabled by default** and should be enabled after you're confident in your basic queue setup. + + +Enable Optimistic merging in **Settings** > **Repositories** > your repository > **Merge Queue** > toggle **On** **Optimistic Merge Queue**. + +#### Verify it's working + +After enabling, watch your queue: + +* ✅ Multiple PRs should show "Testing" status simultaneously +* ✅ New PR starts testing before previous PR shows "Merged" +* ✅ In your CI, you'll see overlapping test runs + +**Start conservative:** Enable optimistic merging after you've validated basic queue functionality. Don't enable it on day one. + +### Tradeoffs and considerations + +The downsides here are very limited. You essentially give up the proof that every pull request in complete isolation can safely be merged into your protected branch. + +In the unlikely case that you have to revert a change from your protected branch, you will need to retest that revert or submit it to the queue to ensure nothing has broken. In practice, this re-testing is required in almost any case, regardless of how it was originally merged, and the downsides are fairly limited. + +#### What you gain + +* **faster average merge time** - Less idle time between tests +* **higher throughput** - More PRs processing simultaneously +* **Better CI utilization** - Continuous testing instead of start-stop +* **Faster incident response** - Critical PRs merge quicker + +#### What you give up or risk + +* **Wasted CI on retests (rare)** - If an optimistically-tested PR needs to retest, you've used some CI resources unnecessarily +* **More complex queue state** - Multiple PRs in "testing" can be confusing initially +* **Requires stable tests** - Flaky tests cause more retests with optimistic merging + +#### When NOT to use optimistic merging + +Don't enable optimistic merging if: + +* **Your tests are highly flaky (>5% flake rate)** - Retests will negate the benefits +* **Your queue is rarely busy** - If you only have 1-2 PRs per hour, there's nothing to optimize +* **You're still learning the queue** - Get comfortable with basic functionality first +* **Your merges frequently fail** - If PRs often fail during merge (not testing), optimistic assumptions will be wrong often + +#### Best practices + +**Start without it:** Use Trunk Merge Queue for a week or two before enabling optimistic merging. Understand normal flow first. + +**Enable when stable:** Once your queue is working reliably and you have consistent PR volume, optimistic merging provides significant benefits. + +**Combine with other optimizations:** Optimistic merging works best alongside: + +* [Batching](/merge-queue/optimizations/batching) - Test batches optimistically +* [Predictive testing](/merge-queue/optimizations/predictive-testing) - Required foundation for optimistic merging +* [Anti-flake protection](/merge-queue/optimizations/anti-flake-protection) - Reduces unnecessary retests + +#### Common misconceptions + +* **Misconception:** "Optimistic merging is risky - it might merge broken code" + * **Reality:** No! Trunk still requires all tests to pass. Optimistic merging only affects *when* testing starts, not *whether* testing happens. Safety is never compromised. +* **Misconception:** "Optimistic merging causes lots of wasted retests" + * **Reality:** Retests are rare (< 5% of PRs in typical queues). The throughput gains far outweigh the occasional retest cost. +* **Misconception:** "I should enable every optimization immediately" + * **Reality:** Start with just predictive testing. Add batching once stable. Add optimistic merging last. Build confidence in each layer. + +### Next Steps + +**Before enabling optimistic merging:** + +1. Ensure basic queue is working well +2. Verify test stability (< 5% flake rate recommended) +3. Enable [Anti-flake protection](/merge-queue/optimizations/anti-flake-protection) first +4. Check that you have consistent PR volume + +**After enabling:** + +* [Metrics and monitoring](/merge-queue/administration/metrics) - Track throughput improvements +* Watch for retest rate (should be < 5%) +* Measure time-to-merge improvements + +**Optimize further:** + +* [Batching](/merge-queue/optimizations/batching) - Combine with optimistic merging for maximum effect +* [Pending failure depth](/merge-queue/optimizations/pending-failure-depth) - Fine-tune simultaneous testing behavior + +**Troubleshooting:** + +* If seeing frequent retests → Check test stability or disable temporarily +* If not seeing improvements → Check PR volume and queue activity +* For detailed help → [Troubleshooting](/merge-queue/reference/troubleshooting) diff --git a/merge-queue/optimizations/parallel-queues.mdx b/merge-queue/optimizations/parallel-queues.mdx new file mode 100644 index 0000000..eb0b469 --- /dev/null +++ b/merge-queue/optimizations/parallel-queues.mdx @@ -0,0 +1,74 @@ +--- +title: "Parallel queues" +description: "Create dynamic parallel queues to reduce queue time" +--- +Normally, a merge queue behaves by enqueueing all submitted pull requests into a single line. Under this mode of operation, every pull request is [predictively tested ](/merge-queue/optimizations/predictive-testing)against the pull requests ahead of it. While this guarantees the correctness of the protected branch at all times, under a high submission load, the wait time for an item in the queue can be negatively impacted.\ +\ +A regular merge queue operates like a grocery store with only a single checkout lane. When a lot of folks are trying to checkout at the same time - the line will grow (sometimes intolerably). With a dynamic parallel queue, trunk merge creates additional checkout lanes in real-time while still guaranteeing that the protected branch doesn't break.​  + + +track impacted code of each pull request to create dynamic queues + + +For example, the following four pull requests: + +* PR A with impacted target list `[ frontend ]` +* PR B with impacted target list `[ backend ]` +* PR C with impacted target list `[ frontend, backend]` +* PR D with impacted target list `[ docs]` + +Without parallelization, the PRs **A**, **B**, **C**, and **D** would all be tested in a single predictive path **A** <- **B** <- **C** <- **D**. Using the impacted target information we can instead build three dynamically provisioned queues and the predictive testing can yield higher throughput - which means your pull request spends less time in the queue stuck testing with unrelated code changes. + +Three Dynamic Parallel Queues + +#### **How does it work?** + +To run in parallel mode, each pull request needs to be inspected for its impacted targets. This is a fancy way of saying that each pull request needs to report what parts of the codebase are changing.\ +\ +In the example above, the pull requests **A**, **B**, and **D** can be tested in isolation since they affect distinct targets - `backend`, `frontend` and `docs`. The **C** pull request affects both `frontend` and `backend` and would be tested predictively with the changes in both **A** and **B**.\ +\ +To understand the interactions or dependent changes between pull requests, Trunk Merge Queue provides an API for posting the list of **impacted targets** that result from code changes in every PR. When Trunk Merge Queue is running in parallel mode, pull requests will not be processed until the list of impacted targets are uploaded. + +#### **What are Impacted Targets?** + +Impacted targets are metadata that describe the logical changes of a pull request. An impacted target is a string that can be as expressive as a Bazel target or the name of a file folder. Calculating impacted targets with a purpose-built build system will provide absolute correctness for the merge queue, but more lightweight glob or folder-based approaches can also work with fewer guarantees around correctness. + +#### **Posting impacted targets from your pull requests** + +We ship several pre-built solutions for popular build systems to automatically calculate and post the impacted targets of a pull request. If you are using another build system, we would be happy to work with you to add support for your specific build system. + +
Bazelbazel-dark.pngbazel
NxNX.pngnx
OtherGroup 1277.pngapi
+ +**Enable Parallel Modes**\ +Merge can be swapped between `Single` and `Parallel` mode at any time. If there are no PRs in the merge queue when switching, the switch will be immediate. If there are PRs in the queue, then Merge will go into the `Switching Modes` state, where it'll wait for all currently testing PRs to merge before switching modes. During this time, PRs will not be able to enter the queue. + +Switching modes can be done from the `Merge Queue Mode` section of the `Settings > Repositories > repo name > Merge` panel + +

enabling parallel mode

+ +**Find your Trunk API Token** + + +Explore the interactive walkthrough in a new tab. + + +#### Store your Organization Token as a GitHub Secret + + +Explore the interactive walkthrough in a new tab. + + +### Monitoring Parallel Queue Performance + +Once you've enabled parallel mode and configured impacted targets, you can analyze how well the parallel workflow performs for different parts of your codebase. + +The Health dashboard allows you to filter all metrics by impacted targets, so you can: + +* Compare merge times between different targets (e.g., frontend vs backend) +* Identify which targets experience the most failures +* Optimize queue configuration for your highest-priority code paths +* Demonstrate the value of parallel mode to engineering leadership + + +See [Filter Metrics by Impacted Targets ](/merge-queue/administration/metrics#filter-metrics-by-impacted-targets)for detailed guidance on using this feature. + diff --git a/merge-queue/optimizations/parallel-queues/api.mdx b/merge-queue/optimizations/parallel-queues/api.mdx new file mode 100644 index 0000000..3ce7292 --- /dev/null +++ b/merge-queue/optimizations/parallel-queues/api.mdx @@ -0,0 +1,41 @@ +--- +title: "Custom Build Systems" +description: "Upload custom list of impacted targets" +--- +Impacted Targets should be computed for every PR. The list of impacted targets should be computed by comparing two different SHAs: the **head of the target branch**, and the **merge commit of the pr**. + + +Our [reference implementation](https://github.com/trunk-io/bazel-action/tree/main/src/scripts) may be useful in guiding your implementation. + + +**POST** the list of impacted targets here:`https://api.trunk.io:443/v1/setImpactedTargets`. + +```ssml +HEADERS: + Content-Type: application/json, + x-api-token: , + x-forked-workflow-run-id: ${{github.run_id}}, + +BODY: { + repo: { + host: "github.com", + owner: , + name: , + }, + pr: { + number: , + sha: , + }, + targetBranch: , + impactedTargets: ["target-1", "target-2", ...] OR "ALL" + } +``` + +`impactedTargets` allows specifying either an array of strings representing the impacted targets from the PR or the string "ALL" (note that this is explicitly not in an array and is just the string "ALL"). Specifying "ALL" is the equivalent of saying that everything that comes into the graph after this PR should be based on this one, which is useful when your PR contains changes that affect the whole repo (such as editing `trunk.yaml` or a GitHub workflow). + +**Handling Forked Pull Requests**\ +The HTTP POST must contain the `x-api-token` to prove that it is a valid request from a workflow your org controls. *Workflows that come from forked PRs most likely will not have access to the Trunk org token* required for the HTTP POST above. In this case, you should provide the **run ID** of the workflow as the `x-forked-workflow-run-id` header in place of the `x-api-token`. This ID can be obtained from [the GitHub context](https://docs.github.com/en/actions/learn-github-actions/contexts#github-context) as `${{ github.run_id }}`. Trunk Merge Queue will verify that the ID belongs to a currently running workflow originating from a forked PR with a SHA that matches the one provided in the request and allow it through. + + +We do not recommend using an event trigger like `pull_request_target.` This would allow workflows from forked PRs to get secrets, which is a security risk and would open your repo to attackers making forks, adding malicious code, and then running it against your repo to exfiltrate information. (see[ Keeping your GitHub Actions and workflows secure](https://securitylab.github.com/research/github-actions-preventing-pwn-requests/)). + diff --git a/merge-queue/optimizations/parallel-queues/bazel.mdx b/merge-queue/optimizations/parallel-queues/bazel.mdx new file mode 100644 index 0000000..eb90182 --- /dev/null +++ b/merge-queue/optimizations/parallel-queues/bazel.mdx @@ -0,0 +1,35 @@ +--- +title: "Bazel" +description: "Instructions for enabled dynamic parallel queues powered by your bazel graph" +--- +Leveraging [parallel mode](/merge-queue#single-mode-vs.-parallel-mode) for Trunk Merge Queue is easy for Bazel-enabled repos because Bazel already knows the structure of your code and can automatically generate a dependency graph. Merge can use this information in parallel mode to create dynamic parallel queues enabling your pull requests to run through your Merge Queue faster.\ +\ +**How do we create parallel queues?**\ +By understanding which Bazel targets a pull request affects, we can build a real-time graph and detect intersection points and where distinct non-overlapping graphs exist. This information is essentially a list of unique target names, which can then be used in real time to understand along which targets pull requests might overlap. + +**Calculating impacted targets in GitHub Actions**\ +Trunk ships a [GitHub action](https://github.com/trunk-io/bazel-action) that will generate the list of impacted targets for a pull request and post that information to the Trunk Merge Queue service. + +```yaml +name: Upload and Test Impacted Targets +on: pull_request + +jobs: + impacted_targets: + name: Impacted Targets + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Upload and Test Impacted Targets + uses: trunk-io/bazel-action@v1 + with: + upload-targets: "true" + ### store your trunk api token to authenticate with merge service + trunk-token: ${{ secrets.TRUNK_API_TOKEN }} + ### (optional if your bazel setup is not in the root of your repo) + # workspace-path: {your bazel workspace path} +``` + +The above sample GitHub action code will calculate the impacted targets of your pull request and post that information to the trunk merge service. That data will be used to run your trunk merge queue in parallel mode. diff --git a/merge-queue/optimizations/parallel-queues/nx.mdx b/merge-queue/optimizations/parallel-queues/nx.mdx new file mode 100644 index 0000000..a018c7b --- /dev/null +++ b/merge-queue/optimizations/parallel-queues/nx.mdx @@ -0,0 +1,32 @@ +--- +title: "Nx" +description: "Instructions for enabled dynamic parallel queues powered by your Nx graph" +--- +Leveraging [parallel mode](/merge-queue#single-mode-vs.-parallel-mode) for Trunk Merge Queue is easy for Nx-enabled repos because Nx already knows the structure of your code and can automatically generate a dependency graph. Merge can use this information in parallel mode to create dynamic parallel queues enabling your pull requests to run through your Merge Queue faster.\ +\ +**How do we create parallel queues?**\ +By understanding which Nx targets a pull request affects, we can build a real-time graph and detect intersection points and where distinct non-overlapping graphs exist. This information is essentially a list of unique target names, which can then be used in real time to understand along which targets pull requests might overlap. + +**Calculating impacted targets in GitHub Actions**\ +Trunk ships a [GitHub action](https://github.com/trunk-io/nx-action) that will generate the list of impacted targets for a pull request and post that information to the Trunk Merge Queue service. + +```yaml +name: Upload and Test Impacted Targets +on: pull_request + +jobs: + impacted_targets: + name: Impacted Targets + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: compute impacted targets + uses: trunk-io/nx-action@v1 + with: + ### store your trunk api token to authenticate with merge service + trunk-token: ${{ secrets.TRUNK_API_TOKEN }} +``` + +The above sample GitHub action code will calculate the impacted targets of your pull request and post that information to the trunk merge service. That data will be used to run your trunk merge queue in parallel mode. diff --git a/merge-queue/optimizations/pending-failure-depth.mdx b/merge-queue/optimizations/pending-failure-depth.mdx new file mode 100644 index 0000000..fe47638 --- /dev/null +++ b/merge-queue/optimizations/pending-failure-depth.mdx @@ -0,0 +1,139 @@ +--- +title: "Pending failure depth" +description: "Pending failure depth allows pull requests to wait until other pull requests behind them in the queue complete testing before getting removed from the queue." +--- +### What it is + +Pending failure depth allows pull requests to wait until other pull requests behind them in the queue complete testing before getting removed from the queue. + +By default, a PR that fails testing will be evicted from the queue. The **Pending Failure Depth** feature allows a failed PR to remain in the queue for pull requests behind it so that testing can be finished before this eviction occurs. The number of PRs that the queue will wait for is the *Pending Failure Depth.* This depth is configurable and reflects the number of pull requests behind this one that should complete testing before eviction is assessed. + +### Why use it + +* **Prevent queue stalls** - When a PR fails, the queue doesn't grind to a halt. Other PRs continue testing, assuming the failure was isolated. Keeps merge velocity high even during issues. +* **Faster failure recovery** - If PR #3 fails but PR #4 fixes the issue, both can be processed quickly because they tested in parallel. Without pending failure depth, you'd wait for #3 to fail, then wait for #4 to test sequentially. +* **Optimize for your team size** - Small teams benefit from lower values (fewer wasted tests), large teams benefit from higher values (maintain throughput despite occasional failures). +* **Balance risk vs. throughput** - Tune the setting to match your team's tolerance for wasted CI resources vs. need for high queue velocity. + +### How to enable + + +Pending failure depth is **set to zero by default** and should be enabled after you're confident in your basic queue setup. + + +Configure Pending Failure Depth in **Settings** > **Repositories** > your repository > **Merge Queue** > select a value from the **Pending Failure Depth** dropdown. + +### Configuration options + + +Just getting started with tuning Pending Failure Depth? Try a value of 2, and work from there with your team to find the right balance. + + +**Start with a small value** and observe: + +* If your queue frequently stalls when PRs fail → Increase value +* If you see lots of wasted test runs (many PRs test then all fail) → Decrease value +* If your CI infrastructure is constrained → Use lower value (3-4) +* If you have abundant CI capacity → Use higher value (7-10) + +#### Verify it's working + +When a PR fails, watch for: + +* ✅ Multiple other PRs continue testing (up to your configured depth) +* ✅ Queue doesn't stop entirely +* ✅ Failed PR is removed, but others keep going + +### Tradeoffs and considerations + +#### What you gain + +* **Queue never fully stops** - Failures don't block all subsequent PRs +* **Faster recovery** - Independent PRs can merge while others fail +* **Tunable throughput** - Adjust for your team's needs +* **Better CI utilization** - Tests keep running instead of stopping + +#### What you give up or risk + +* **Wasted CI resources** - PRs may test against a state that includes failing PRs, then need to retest +* **Cascading failures** - If one PR breaks something, multiple subsequent PRs might fail before the issue is caught +* **Complexity** - More PRs testing simultaneously = harder to understand queue state + +#### When to decrease pending failure depth + +Lower the value (3-4) if: + +* **Your tests are flaky (>5% flake rate)** - Flaky tests cause false failures, leading to wasted retests +* **CI resources are expensive/limited** - Lower parallelism reduces waste +* **PRs frequently conflict** - Related changes often fail together, so testing them in parallel wastes resources +* **You're seeing excessive retests** - Many PRs testing, failing, retesting pattern + +#### When to increase pending failure depth + +Raise the value (7-10) if: + +* **Your queue stalls frequently when PRs fail** - Low depth is blocking throughput +* **PRs are mostly independent** - Failures are isolated, not cascading +* **You have abundant CI capacity** - Waste isn't a concern +* **Large team, high PR volume** - Need parallelism to maintain velocity + +#### Understanding the cost + +**Example cost calculation:** + +Scenario: Pending failure depth = 5, PR #101 fails testing + +* PRs #102, #103, #104, #105, #106 all test against a state including #101 +* All 5 fail because #101 broke something +* All 5 retest after #101 is removed +* **Wasted**: 5 test runs + +**But consider:** + +* Without pending failure depth, PRs would test sequentially (much slower) +* In most cases, failures ARE independent, so PRs merge successfully +* Occasional waste is preferable to frequent queue stalls + +**Typical waste rate:** 5-10% of test runs are wasted retests in well-configured queues + +#### Common misconceptions + +* **Misconception:** "Higher pending failure depth always means faster queue" + * **Reality:** Too high = wasted CI resources and cascading failures. Too low = queue stalls. The sweet spot depends on your team size and test stability. +* **Misconception:** "Pending failure depth should be set to 1 to avoid waste" + * **Reality:** Value of 1 means queue stops on every failure (defeats the purpose of predictive testing). Start at 5 and adjust. +* **Misconception:** "This setting isn't important" + * **Reality:** Poorly tuned pending failure depth can either waste significant CI resources or cause frequent queue stalls. It's worth monitoring and adjusting. + +### Next Steps + +**Initial setup:** + +1. Start with a small value (2) +2. Monitor queue behavior +3. Check metrics for wasted test runs +4. Adjust based on observations + +**Optimize the value:** + +* Queue stalls frequently? → Increase depth +* Excessive retests (>15%)? → Decrease depth +* Make small adjustments and observe impact + +**Monitor performance:** + +* [Metrics and monitoring](/merge-queue/administration/metrics) - Track retest rate and queue throughput +* Watch for patterns: Do failures cascade? Are they independent? +* Adjust pending failure depth based on data + +**Combine with other optimizations:** + +* [Anti-flake protection](/merge-queue/optimizations/anti-flake-protection) - Reduce false failures first +* [Batching](/merge-queue/optimizations/batching) - Understand how pending failure depth affects batch splitting +* [Predictive testing](/merge-queue/optimizations/predictive-testing) - Read the full explanation of how these work together + +**Troubleshooting:** + +* Too many wasted tests → Lower pending failure depth +* Queue stops on every failure → Increase pending failure depth +* Unclear which value to use → Start at 2, monitor for a week diff --git a/merge-queue/optimizations/predictive-testing.mdx b/merge-queue/optimizations/predictive-testing.mdx new file mode 100644 index 0000000..faac193 --- /dev/null +++ b/merge-queue/optimizations/predictive-testing.mdx @@ -0,0 +1,52 @@ +--- +title: "Predictive testing" +description: "Trunk Merge Queue tests pull requests against the projected future state of your main branch, not just the current state.&x20;" +--- +### What it is + +Trunk Merge Queue tests pull requests against the projected future state of your main branch, not just the current state. + +This means when multiple PRs are in the queue, each PR is tested as if all the PRs ahead of it have already been merged. This prevents "queue collapse" - where PRs pass tests individually but fail when merged together. + +### Why use it + +Normally, pull requests are tested against a snapshot of the head of `main` when the pull request is posted to your source control provider. This can mean that by the time the pull request is actually merged - the results of the automated testing are **stale**.\ +\ +When you merge a pull request with stale results, you are effectively merging in **un-tested code**. The changes to the protected branch since the test was run create a blind spot in your testing regimen. With predictive testing, you no longer have a blind spot because the merge queue ensures that the pull request is tested against the state of `main` that will exist when your pull request is merged. + +### What's Happening? The "Happy Path" + +This example shows how pull requests (PRs) are tested in a queue. PR `B` is tested with the changes from `A`, and `C` is tested with the changes from both `A` and `B`. + + +Test your pull request with the changes ahead of it in the queue + + +
What's Happening?Queue
A begins testingmain <- A
B begins predictive testing by including the changes in Amain <- A <- B+a <- C+ba
C begins predictive testing by including the changes in both A and Bmain <- A <- B+a <- C+ba
as testing completes - pull requests can merge safelymerge A, B, C
+ +### The "Unhappy Path": How the Queue Handles Test Failures + +Predictive testing is powerful, but it creates a new challenge: **failure cascades**. + +In the "Happy Path" example, if PR `A` introduces a failing test, the predictive tests for `B` and `C` are *also* guaranteed to fail, because they both include the broken code from `A`. + +A simple queue would kick `B` and `C` as soon as their tests failed. This would disrupt their authors, who did nothing wrong, and force them to restart their PRs multiple times, wasting valuable CI time . + +This is solved by **Pending Failure**. + +#### How Pending Failure Works + +The main purpose of "pending failure" is to **minimize disruptions to the queue** by intelligently finding the ***true*** source of a failure. + +Instead of immediately kicking a PR just because its test run failed, the queue follows this logic: + +1. **A Test Fails**: Let's say PR `C`'s test run fails. +2. **Enter** `Pending Failure` **State**: `C` is *not* kicked. It enters a `Pending Failure` state and *waits* for the PRs it depends on (`A` and `B`) to finish testing. +3. **Identify the Root Cause:** The queue's goal is to determine: "Did this PR fail because of its own code, or did it fail because of a change in a PR ahead of it?". + * `C` (failed) waits for `B`. + * `B` (also fails) waits for `A`. + * When `A` (at the top of the queue) fails, the queue knows it *must* be the PR that introduced the failure, as it only depends on `main`. +4. **Minimize Disruption:** The queue only kicks the first faulty PR (`A`). +5. **Automatic Recovery:** PRs `B` and `C` (which are likely healthy) stay in the queue. They are automatically re-scheduled for testing with a new predicted state that *excludes* the bad PR (e.g., `B` now tests against `main`, and `C` tests against `main + B`). + +**Pending Failure** is the essential recovery mechanism that makes **Predictive Testing** practical. It ensures the queue is resilient and that engineers are not disrupted by test failures they didn't cause. diff --git a/merge-queue/optimizations/priority-merging.mdx b/merge-queue/optimizations/priority-merging.mdx new file mode 100644 index 0000000..f22d04b --- /dev/null +++ b/merge-queue/optimizations/priority-merging.mdx @@ -0,0 +1,166 @@ +--- +title: "Priority merging" +description: "Priority merging allows you to fast-track critical pull requests to the front of the merge queue." +--- +### What it is + +Priority merging allows you to fast-track critical pull requests to the front of the merge queue. + +By assigning a priority level to a PR, you can ensure urgent changes (like hotfixes, security patches, or critical bug fixes) merge ahead of regular feature work. PRs with higher priority are tested and merged before lower-priority PRs, regardless of when they were submitted. + +### Why use it + +* **Fast-track critical fixes** - Get security patches and hotfixes to production in minutes, not hours. Priority PRs bypass the normal queue order and merge immediately after testing. +* **Respond to incidents quickly** - When production is down, you can't wait for 20 feature PRs to merge first. Priority merging lets you deploy fixes urgently while maintaining merge queue safety. +* **Prevent emergency merges** - Instead of bypassing the queue entirely (risky!), use priority merging to maintain safety while getting urgent PRs through fast. All PRs still get tested, but yours goes first. +* **Balance urgency with safety** - Priority PRs still go through full testing and validation. You get speed without sacrificing the protection that merge queues provide. + +### How to enable + +Priority merging is **built in** - you don't need to enable a setting. You set priority on individual PRs when you need them to be fast-tracked. + +#### Enable via GitHub comment + +On any pull request, comment: + +``` +/trunk merge --priority= +``` + +or + +``` +/trunk merge -p +``` + +#### Enable via Command Line argument + +From the command line enter the following command: + +```shell +trunk merge --priority= +``` + +or + +``` +trunk merge -p +``` + +#### Valid priority levels + +
labelnumbernote
urgent0Production outages, security vulnerabilities.

urgent items will interrupt running jobs and begin testing immediately
high10Urgent bug fixes, important hotfixes
medium100Regular feature work (default)
low200Non-urgent refactors, documentation
255lowest possible priority
+ +### How priority affects PR order + +PRs with a higher priority will always begin testing before any other PR that isn't currently being tested, ensuring that prioritized PRs move into the queue as soon as they can. A PR without a priority will use the default `medium` (100) priority. If there is already a PR in the queue with the same priority, then the new one will be behind it. + +When prioritizing a PR, Merge will explicitly **not interrupt** any currently testing PR, as restarting testing on PRs is usually costly, even if you want another PR to be sooner. Because of this, if a PR is submitted with a priority and there is still room in the queue to begin testing PRs, it will begin testing as normal without interrupting other PRs. + +**There is an exception to this rule.** Sometimes, when there is a PR urgent enough to get in that it is worth the cost of restarting a currently testing PR, you can move the new PR to the front using the `"urgent"` priority. This is the only time Merge will reschedule a PR that is already in testing. + + +Another exception: Admins can still merge PRs in absolutely necessary cases outside of the merge queue. Merge Queue handles thes gracefull and will react properly to restart the rest of the queue. + + +#### Example: + +Say you have a queue that is configured to test two PRs at once. The queue currently looks like this: + +Queue with two testing PRs and one pending + +If you submit a PR D with a `"high"` priority it will be put in front of C (since it is a higher priority than C and C is not testing). D will begin as soon as either A or B finishes, like this: + +Queue with two testing PRs and a new higher priority pending PR + +Instead, if you submit PR D with an `"urgent"` priority, then D would be tested immediately, A would be restarted, and B would be bumped back to pending, like this: + +Queue with an urgent PR moved to the front and a normal PR restarting + +### Visual indicators in the dashboard + +When a PR is queued with a non-default priority, the merge queue graph view displays a badge on the PR node so you can quickly identify fast-tracked items: + +| Priority | Badge | Description | +| -------- | ------------------------------------ | ---------------------------------------------------------------------- | +| Urgent | Red pulsing badge labeled **URGENT** | Draws immediate attention; this PR is interrupting in-progress testing | +| High | Orange badge labeled **HIGH** | PR is being fast-tracked ahead of normal-priority items | +| Medium | No badge | Default priority — no visual indicator | +| Low | No badge | Lower-than-default priority — no visual indicator | + +Priority badges appear on individual PR nodes in the [graph view](/merge-queue/using-the-queue/monitor-queue-status#graph-view). Batch nodes (multiple PRs tested together) do not display priority badges. + +### Tradeoffs and considerations + +#### What you gain + +* **Emergency response capability** - Critical fixes merge in minutes +* **Incident resolution speed** - Production issues resolved faster +* **Maintained safety** - Unlike bypassing the queue, priority PRs still get tested +* **Team confidence** - Developers know urgent fixes can get through quickly + +#### What you give up or risk + +* **Queue disruption for others** - Normal PRs wait longer when priority PRs jump ahead +* **Potential for abuse** - If everyone marks their PR as high priority, the system loses effectiveness +* **Fairness concerns** - Team members may feel frustrated if priorities are used unfairly + +#### When NOT to use priority merging + +Don't use high priority for: + +* **Feature work marked "urgent" by product** - This should be normal priority +* **"I want my PR to merge faster"** - Wait your turn +* **End-of-sprint rushing** - Better sprint planning is the solution +* **Every PR from senior engineers** - Priority should be about the PR, not the author + +**Use high priority ONLY for:** + +* Production outages and incidents +* Security vulnerabilities +* Critical bug fixes blocking users +* Emergency hotfixes + +#### Best practices + +**Establish priority guidelines:** Document when priority should and shouldn't be used. Example: + +* `high` = Production down, security issue, critical bug affecting users +* `medium` = Everything else + +**Monitor priority usage:** Track how often priority is used. If >10% of PRs are high priority, you have a process problem, not a priority problem. + +**Rotate on-call priority power:** Only the current on-call engineer can set high priority. This prevents abuse and ensures judgment calls are made by whoever is dealing with the incident. + +#### Common misconceptions + +* **Misconception:** "Priority PRs skip testing" + * **Reality:** No! Priority PRs go through full testing, they just test first. Safety is never compromised. +* **Misconception:** "I can mark my PR priority to avoid waiting" + * **Reality:** Priority should be reserved for genuine emergencies. Overuse makes the system meaningless for everyone. +* **Misconception:** "Priority PRs interrupt currently running tests" + * **Reality:** Priority PRs queue at the front but don't cancel in-progress tests. The system remains stable. + +### Next Steps + +**Set up priority guidelines:** + +1. Document when to use high priority (share with your team) +2. Configure permissions (limit who can set priority) +3. Communicate guidelines in team onboarding docs + +**Use priority responsibly:** + +* Reserve for true emergencies only +* Monitor usage to prevent abuse +* Consider rotation-based permission (only on-call can set priority) + +**For true emergencies:** + +* If priority isn't fast enough → Emergency pull requests +* If you need to pause the queue → **Settings** > **Repositories** > your repository > **Merge Queue** > **Merge Queue State** and select **Paused** from the dropdown. + +**Monitor impact:** + +* [Metrics and monitoring](/merge-queue/administration/metrics) - Track priority PR usage and merge times +* Watch for priority overuse (should be <5% of PRs) diff --git a/merge-queue/reference.mdx b/merge-queue/reference.mdx new file mode 100644 index 0000000..cb895d0 --- /dev/null +++ b/merge-queue/reference.mdx @@ -0,0 +1,21 @@ +--- +title: "Reference" +description: "Technical documentation and troubleshooting resources." +--- +Quick-lookup documentation for command-line tools, APIs, and troubleshooting. This section provides technical details for developers and administrators who need to automate workflows, debug issues, or find answers to common questions. + +### Command-line and API + +[**CLI reference**](/merge-queue/reference/merge-queue-cli-reference)\ +Complete command reference for the Trunk CLI tool. + +[**API reference**](/merge-queue/reference/merge)\ +Programmatic access to queue operations via REST API. + +### Help and troubleshooting + +[**FAQ**](/merge-queue/reference/common-problems)\ +Common questions about merge queue behavior and features. + +[**Troubleshooting**](/merge-queue/reference/troubleshooting)\ +Diagnose and resolve common issues with queue setup and operation. diff --git a/merge-queue/reference/common-problems.mdx b/merge-queue/reference/common-problems.mdx new file mode 100644 index 0000000..cebdd0f --- /dev/null +++ b/merge-queue/reference/common-problems.mdx @@ -0,0 +1,171 @@ +--- +title: "FAQ" +description: "Troubleshooting and FAQ" +--- +#### Entering the Queue + +
+ +Can I add a pull request to the queue before all required checks pass? + +Yes. A pull request can be submitted to the queue at any time, even if it's not yet ready to merge. The pull request will enter the queue in a "Queued" state and wait for all branch protection rules (like passing status checks and required reviews) to be met. Once the PR is ready, Trunk Merge Queue will automatically move it into the testing phase. + +
+ +
+ +Why isn’t my pull request entering the queue? + +First, check the Trunk web app to see what Trunk is waiting on before putting your PR into the merge queue. + +Next, if something on that page doesn't look right, for example, it says that GitHub is still checking the mergeability of the PR, comment `/trunk merge` again in the PR. + +
+ +
+ +Why aren't my required checks triggering, even though my pull request is being tested in queue? + +Most likely, you did not set up the required status checks to trigger for `trunk-merge/` branches. It is also possible that your CI provider just randomly never started testing on the Trunk Merge Queue branch, even after setting the required status checks to trigger. To assist with this, you can [configure a testing timeout](/merge-queue/administration/advanced-settings#timeout-for-tests-to-complete). + +
+ +#### Merge Behavior + +
+ +Can I choose the merge strategy for my pull requests? + +Yes! Trunk Merge Queue supports three merge methods: + +* **Squash** (default) - Combines all commits into a single commit +* **Merge Commit** - Preserves all individual commits and creates a merge commit +* **Rebase** - Replays commits on top of main for a linear history + +The merge method is configured at the repository level in Settings > Repositories > \[your repository] > Merge Queue. All PRs merged through the queue will use the selected method. + + +**Note:** The merge method applies to the entire repository, not on a per-PR basis. See [Merge Method documentation](/merge-queue/administration/advanced-settings#merge-method) for detailed information on each option and how to configure your preference. + + +
+ +
+ +How does Trunk handle commit messages + +Commit messages depend on your configured merge method: + +* **Squash** (default): The commit message is automatically generated from the pull request's title and description, following GitHub's default behavior +* **Merge Commit**: Preserves all individual commit messages from the PR and creates an additional merge commit message +* **Rebase**: Preserves all individual commit messages from the PR as they are replayed onto the target branch + +You can configure your preferred merge method in [Advanced Settings](/merge-queue/administration/advanced-settings#merge-method). + +
+ +
+ +Do Optimistic Merging or Batching ever merge multiple pull requests into a single commit? + +No. Pull requests are always merged individually, and each PR will result in a separate commit in your `main` branch's history, regardless of your configuration. + +Features like Optimistic Merging and Batching are validation and testing strategies, not merging strategies. + +* [Optimistic Merging](/merge-queue/optimizations/optimistic-merging) uses the successful test of a pull request later in the queue to validate all the PRs ahead of it in the queue, allowing the entire sequence to be merged without waiting for the earlier PRs to finish testing. +* [Batching](/merge-queue/optimizations/batching) allows the queue to *test* multiple PRs in a single CI job to save time and resources. After the test passes, the PRs in the batch are still merged one by one. + +
+ +#### Queue Configuration + +
+ +Can I create multiple merge queues for a single repository? + +Currently, Trunk Merge Queue supports one merge queue per repository. If this is critical for your use case, [talk to us](/setup-and-administration/support) and we'll consider adding support for your use case. + +For validating significant changes to your CI process or queue configuration without impacting your primary workflow, the recommended approach is to use a fork of your repository. You can set up and test a separate merge queue on the fork to ensure your changes work as expected before applying them to your primary repository. + +
+ +
+ +What are trunk-temp/* branches, and should CI run on them? + +No, you should configure your CI to completely ignore `trunk-temp/*` branches. Running workflows on them will only create unnecessary or canceled builds. + +The `trunk-temp/*` branch is a temporary, intermediate branch that the merge queue uses to assemble the necessary commits for a test run. Once the build is prepared, this branch is immediately renamed to a `trunk-merge/*` branch. + +
+ +#### Priority & Overrides + +
+ +How can I merge a pull request immediately? + +**Recommended approach:** Use [PR Prioritization](/merge-queue/optimizations/priority-merging) to fast-track your PR through the queue while still validating it: + +``` +/trunk merge --priority=urgent +``` + +The `urgent` priority is the only level that will interrupt currently testing PRs. Your PR will immediately begin testing, and other PRs will restart after yours completes. + +
+ +
+ +How do I merge an emergency pull request right now? + +**Recommended approach:** Use [PR Prioritization](/merge-queue/optimizations/priority-merging) to fast-track your PR through the queue while still validating it: + +``` +/trunk merge --priority=urgent +``` + +The `urgent` priority is the only level that will interrupt currently testing PRs. Your PR will immediately begin testing, and other PRs will restart after yours completes. + +
+ +### Failures, Errors & Debugging + +
+ +How am I notified if my pull request fails in the queue? + +When a pull request is removed from the queue due to a failure, the Trunk bot updates its comment on the original PR. This update includes direct links to the specific workflows that failed, allowing you to quickly investigate and resolve the issue. Example below. + +
Example of a Trunk bot PR comment, detailing a failed status check that caused the PR to be removed from the merge queue.
+ +
+ +
+ +Why does my pull request consistently fail during testing due to "GitHub errors"? + +Most likely, you have a [branch protection rule](/merge-queue/getting-started/configure-branch-protection) that affects merge branches. + +For example, the wild card rule `*/*` applies to `trunk-merge/...`. The Trunk GitHub app does not have admin privileges, so it fails to do some actions on protected branches. To resolve this, you must remove this rule or reach out to Trunk on our community Slack if that is not possible. + +
+ +
+ +Why does my pull request keep failing to merge in the queue? + +The two most likely problems are that you are restricting **who can merge** or that you have **disabled squash merges** into your repo. Trunk Merge Queue needs to use squash merges. To fix this, turn on `'allow squash merges'` for this repo in your GitHub setup. + +
+ +
+ +Why do Dependabot and Renovate pull requests keep getting kicked from the queue? + +By default, both [dependabot](https://docs.github.com/en/code-security/dependabot/working-with-dependabot/managing-pull-requests-for-dependency-updates#changing-the-rebase-strategy-for-dependabot-pull-requests) and [renovate](https://docs.renovatebot.com/updating-rebasing/#updating-and-rebasing-branches) both will rebase their PRs whenever other PRs merge into their base branch. If that rebase happens when those PRs are in the queue, they will get kicked since they were updated. There are two ways to mitigate this: + +1. Both dependabot and renovate can be configured to not automatically rebase, while renovate can specifically be configured to only rebase if there's a merge conflict ([dependabot](https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#rebase-strategy), [renovate](https://docs.renovatebot.com/configuration-options/#rebasewhen)) +2. Their PRs can be manually merged, and anything currently in the merge queue will restart with those merged changes + +
diff --git a/merge-queue/reference/merge-queue-cli-reference.mdx b/merge-queue/reference/merge-queue-cli-reference.mdx new file mode 100644 index 0000000..f37d3c9 --- /dev/null +++ b/merge-queue/reference/merge-queue-cli-reference.mdx @@ -0,0 +1,201 @@ +--- +title: "CLI reference" +description: "The Trunk CLI allows you to insert and remove PRs into the Merge Queue. You can also pause and resume the queue from the CLI." +--- +The Trunk CLI allows you to insert and remove PRs into the Merge Queue. You can also pause and resume the queue from the CLI. + +### Installation + + + + + +```bash +curl -LO https://trunk.io/releases/trunk +chmod +x trunk +``` + + + + + +```bash +brew install trunk-io +``` + + + + + +```bash +curl https://get.trunk.io -fsSL | bash +``` + + + + + +### Prerequisites + +Before using the CLI: + +* **Authenticate:** Run `trunk login` +* **Git repository:** Execute commands from your repository root + +### Usage + +```bash +trunk merge [flags] +trunk merge [subcommand] +``` + +#### **Submit a PR to the Queue** + +```bash +trunk merge [flags] +``` + +**Arguments** + +`` - Pull request number to submit. Can be specified with or without `#` prefix (e.g., `1234` or `#1234`) + +**Flags** + +`-p, --priority <0-255>` - Priority determines the order PRs are tested and merged. When a PR is submitted with a priority, it will begin testing before any lower priority PR that isn't currently being tested. Levels: + +* `0` (Urgent) **Interrupts currently testing PRs** to begin testing immediately. Use sparingly as restarting tests is costly. This is the only priority that interrupts running tests +* `10` (High) Tests before default priority PRs +* `100` (Default) Used when no priority is specified +* `200` (Low) Tests after default priority PRs +* `255` ( Lowest) Lowest possible priority + +If multiple PRs have the same priority, they are processed in the order they were submitted. See [Priority Levels](/merge-queue/optimizations/priority-merging). + +**Examples** + + + + + +```bash +trunk merge 1234 +``` + + + + + +```bash +trunk merge 1234 -p 10 +``` + + + + + +```bash +trunk merge 1234 -p 200 +``` + + + + + + +Other PRs testing in the queue will be restarted behind PR 1234 + + +```bash +trunk merge 1234 -p 0 +``` + + + + + +#### Check Queue Status + +```bash +trunk merge status +``` + +Display a snapshot of merge queue activity for the current branch, including: + +* Recently merged PRs +* Currently testing PRs +* PRs waiting in queue + +``` +Trunk Merge Status [main] [Running] + + ✔ Merged #1835 fix: smoke test workflows + Updated 12 minutes ago + + Testing #1840 Add new feature + Testing for 3 minutes + + Queued #1842 Update documentation + Waiting in queue +``` + +#### **View PR Status and History** + +```bash +trunk merge status +``` + +Display detailed merge history and timeline for a specific pull request, including: + +* When it was submitted +* Status changes over time +* Test results +* Links to test runs + +Example output: + +```bash +trunk merge status 1835 + +#1835 fix: smoke test workflows (username) +Learn more at https://app.trunk.io/gh/org/repo/merge/main/1835 + +10-03 21:01:36 Not Ready Pull request submitted to Merge. Waiting for branch protection rules. +10-03 21:04:34 Pending Added to merge queue +10-03 21:04:38 Testing Tests running +10-03 21:17:02 Tests Passed All required tests passed +10-03 21:17:05 ✔ Merged Pull request merged successfully +``` + +#### **Cancel a PR** + +Remove a pull request from the merge queue. Use cases: + +* Cancel before making additional changes to the PR +* Cancel to resubmit with different priority + +```bash +trunk merge cancel +``` + +#### Pause Queue \[admin only] + +Pause the merge queue, changing its state from `Running` to `Paused`. The ordering of PRs in the queue is preserved while paused. + +```bash +trunk merge pause +``` + +**When to use:** + +* **CI failure recovery** - Stop merges and testing until infrastructure issues are resolved +* **Test infrastructure outages** - Pause until systems are back online +* **Emergency situations** - Prevent any merges during incident response +* **Scheduled maintenance** - Block merges during maintenance window + +#### Resume Queue \[admin only] + +Resume merge queue processing, changing its state from `Paused` back to `Running`. The queue will begin testing waiting PRs in priority order. + +```bash +trunk merge resume +``` diff --git a/merge-queue/reference/merge.mdx b/merge-queue/reference/merge.mdx new file mode 100644 index 0000000..a6d927f --- /dev/null +++ b/merge-queue/reference/merge.mdx @@ -0,0 +1,238 @@ +--- +title: "API reference" +description: "Public Trunk Merge Queue API. All requests should be authenticated." +--- +The Trunk Merge Queue API lets you manage pull requests, configure queues, and monitor queue health programmatically. Use it to integrate merge queue operations into your CI/CD pipelines, build custom dashboards, or automate queue management across repositories. + +The API is an HTTP REST API hosted at `https://api.trunk.io/v1`. It returns JSON from all requests and uses standard HTTP response codes. + +All requests must be [authenticated](/setup-and-administration/apis#authentication) by providing the `x-api-token` header. + +## Endpoint summary + +| Endpoint | Method | Description | +| ------------------------------------------------------------------- | ------ | ---------------------------------------------------------------------------------------------------------------------------- | +| [`/submitPullRequest`](#post-submitpullrequest) | POST | Submit a PR to the merge queue for testing and merging | +| [`/cancelPullRequest`](#post-cancelpullrequest) | POST | Remove a PR from the merge queue | +| [`/restartTestsOnPullRequest`](#post-restarttestsonpullrequest) | POST | Re-run tests on a PR currently in the queue | +| [`/getSubmittedPullRequest`](#post-getsubmittedpullrequest) | POST | Check the status of a submitted PR | +| [`/setImpactedTargets`](#post-setimpactedtargets) | POST | Set impacted targets for a PR (used with [parallel queues](/merge-queue/optimizations/parallel-queues)) | +| [`/getMergeQueueTestingDetails`](#post-getmergequeuetestingdetails) | POST | Get details about in-progress merge queue testing | +| [`/createQueue`](#post-createqueue) | POST | Create a new merge queue for a branch | +| [`/deleteQueue`](#post-deletequeue) | POST | Delete an empty merge queue | +| [`/getQueue`](#post-getqueue) | POST | Get queue state, configuration, and enqueued PRs | +| [`/updateQueue`](#post-updatequeue) | POST | Update queue configuration (mode, concurrency, batching, etc.) | +| [`/getMergeQueueMetrics`](#prometheus-metrics) | GET | Get Prometheus-format metrics for monitoring | + +## Common use cases + +**CI/CD automation** — Submit PRs to the queue, check their status, and restart tests automatically from your CI pipelines. + +**Queue management** — Create and configure queues for different branches, adjust concurrency and batching settings, or pause and drain queues during maintenance windows. + +**Monitoring dashboards** — Use the Prometheus metrics endpoint to build custom Grafana dashboards or feed queue health data into your existing observability stack. + +**PR status checks** — Query the status of submitted PRs to build custom notifications or gate downstream workflows. + +## Request format + +Most endpoints accept a JSON request body with these common fields: + +```json +{ + "repo": { + "host": "github.com", + "owner": "my-org", + "name": "my-repo" + }, + "targetBranch": "main", + "pr": { + "number": 123 + } +} +``` + +| Field | Type | Required | Description | +| -------------- | ------- | -------- | --------------------------------------------------- | +| `repo.host` | string | Yes | Repository host (e.g., `github.com`) | +| `repo.owner` | string | Yes | Repository owner or organization | +| `repo.name` | string | Yes | Repository name | +| `targetBranch` | string | Yes | The branch the merge queue targets | +| `pr.number` | integer | Varies | The pull request number (required for PR endpoints) | + +## Examples + +### Submit a PR to the queue + +```bash +curl -X POST https://api.trunk.io/v1/submitPullRequest \ + -H "Content-Type: application/json" \ + -H "x-api-token: $TRUNK_API_TOKEN" \ + -d '{ + "repo": { + "host": "github.com", + "owner": "my-org", + "name": "my-repo" + }, + "targetBranch": "main", + "pr": { + "number": 123 + } + }' +``` + +### Check PR status + +```bash +curl -X POST https://api.trunk.io/v1/getSubmittedPullRequest \ + -H "Content-Type: application/json" \ + -H "x-api-token: $TRUNK_API_TOKEN" \ + -d '{ + "repo": { + "host": "github.com", + "owner": "my-org", + "name": "my-repo" + }, + "targetBranch": "main", + "pr": { + "number": 123 + } + }' +``` + +The response includes the PR state (`NOT_READY`, `PENDING`, `TESTING`, `TESTS_PASSED`, `MERGED`, `FAILED`, `CANCELLED`, or `PENDING_FAILURE`), priority information, and whether the PR is currently submitted to the queue. + +### Get queue state + +```bash +curl -X POST https://api.trunk.io/v1/getQueue \ + -H "Content-Type: application/json" \ + -H "x-api-token: $TRUNK_API_TOKEN" \ + -d '{ + "repo": { + "host": "github.com", + "owner": "my-org", + "name": "my-repo" + }, + "targetBranch": "main" + }' +``` + +The response includes the queue state (`RUNNING`, `PAUSED`, `DRAINING`, or `SWITCHING_MODES`), configuration settings, and a list of all enqueued pull requests with their current states. + +--- + +## Pull request endpoints + +## POST /cancelPullRequest + +> Cancel a pull request in a merge queue. + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/cancelPullRequest":{"post":{"summary":"Cancel a pull request in a merge queue.","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string"},"owner":{"type":"string"},"name":{"type":"string"}},"required":["host","owner","name"]},"pr":{"type":"object","properties":{"number":{"type":"integer","minimum":0,"maximum":4294967295}},"required":["number"]},"targetBranch":{"type":"string"}},"required":["repo","pr","targetBranch"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /getSubmittedPullRequest + +> Get a submitted pull request from a merge queue. + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/getSubmittedPullRequest":{"post":{"summary":"Get a submitted pull request from a merge queue.","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string"},"owner":{"type":"string"},"name":{"type":"string"}},"required":["host","owner","name"]},"pr":{"type":"object","properties":{"number":{"type":"integer","minimum":0,"maximum":4294967295}},"required":["number"]},"targetBranch":{"type":"string"}},"required":["repo","pr","targetBranch"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"type":"object","properties":{"id":{"type":"string"},"state":{"type":"string","enum":["NOT_READY","PENDING","TESTING","TESTS_PASSED","MERGED","FAILED","CANCELLED","PENDING_FAILURE"]},"readiness":{"type":"object","properties":{"hasImpactedTargets":{"type":"boolean"},"requiresImpactedTargets":{"type":"boolean"},"doesBaseBranchMatch":{"type":"boolean"},"gitHubMergeability":{"type":"string","enum":["UNSPECIFIED","IN_PROGRESS","MERGEABLE","NOT_MERGEABLE"]}},"required":["requiresImpactedTargets","doesBaseBranchMatch","gitHubMergeability"]},"stateChangedAt":{"type":"string"},"priorityValue":{"type":"number"},"priorityName":{"type":"string"},"usedDefaultPriorityName":{"type":"string"},"skipTheLine":{"type":"boolean"},"forceEnqueued":{"type":"boolean"},"isCurrentlySubmittedToQueue":{"type":"boolean"},"prNumber":{"type":"number"},"prTitle":{"type":"string"},"prSha":{"type":"string"},"prBaseBranch":{"type":"string"},"prAuthor":{"type":"string"}},"required":["stateChangedAt","priorityValue","priorityName","skipTheLine","forceEnqueued","isCurrentlySubmittedToQueue","prNumber","prTitle","prSha","prBaseBranch","prAuthor"]}}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /restartTestsOnPullRequest + +> Restart tests on a pull request in a merge queue. + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/restartTestsOnPullRequest":{"post":{"summary":"Restart tests on a pull request in a merge queue.","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string"},"owner":{"type":"string"},"name":{"type":"string"}},"required":["host","owner","name"]},"pr":{"type":"object","properties":{"number":{"type":"integer","minimum":0,"maximum":4294967295}},"required":["number"]},"targetBranch":{"type":"string"}},"required":["repo","pr","targetBranch"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /setImpactedTargets + +> Set impacted targets for a pull request. + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/setImpactedTargets":{"post":{"summary":"Set impacted targets for a pull request.","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string"},"owner":{"type":"string"},"name":{"type":"string"}},"required":["host","owner","name"]},"pr":{"type":"object","properties":{"number":{"type":"integer","minimum":1,"maximum":4294967295},"sha":{"type":"string"}},"required":["number","sha"]},"targetBranch":{"type":"string"},"impactedTargets":{"anyOf":[{"type":"array","items":{"type":"string"}},{"type":"string","const":"ALL"}]}},"required":["repo","pr","targetBranch","impactedTargets"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /submitPullRequest + +> Submit a pull request to a merge queue. + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/submitPullRequest":{"post":{"summary":"Submit a pull request to a merge queue.","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string"},"owner":{"type":"string"},"name":{"type":"string"}},"required":["host","owner","name"]},"pr":{"type":"object","properties":{"number":{"type":"integer","minimum":0,"maximum":4294967295}},"required":["number"]},"targetBranch":{"type":"string"},"priority":{"anyOf":[{"type":"integer","minimum":0,"maximum":4294967295},{"type":"string"},{"type":"null"}]},"noBatch":{"type":"boolean"}},"required":["repo","pr","targetBranch"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /getMergeQueueTestingDetails + +> Get details about testing that Merge Queue is performing + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/getMergeQueueTestingDetails":{"post":{"summary":"Get details about testing that Merge Queue is performing","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string"},"owner":{"type":"string"},"name":{"type":"string"}},"required":["host","owner","name"]},"testRunId":{"type":"string"},"targetBranch":{"type":"string"}},"required":["repo","testRunId","targetBranch"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"type":"object","properties":{"requiredStatuses":{"type":"array","items":{"type":"string"}},"requiredStatusesSource":{"type":"string","enum":["TRUNK_CONFIG","REPO_PROVIDER_BRANCH_PROTECTION"]},"testBranch":{"type":"string"},"testBranchSha":{"type":"string"},"createdAt":{"type":"string"},"status":{"type":"string","enum":["IN_PROGRESS","FAILED","CANCELLED","SUCCEEDED"]},"checkSuites":{"type":"array","items":{"type":"object","properties":{"checkRuns":{"type":"array","items":{"type":"object","properties":{"name":{"type":"string"},"url":{"type":"string"},"status":{"type":"string","enum":["QUEUED","IN_PROGRESS","COMPLETED"]},"conclusion":{"type":"string","enum":["ACTION_REQUIRED","CANCELLED","FAILURE","NEUTRAL","SUCCESS","SKIPPED","STALE","TIMED_OUT"]}},"required":["name","url"]}}},"required":["checkRuns"]}},"statusChecks":{"type":"array","items":{"type":"object","properties":{"context":{"type":"string"},"url":{"type":"string"},"state":{"type":"string","enum":["ERROR","FAILURE","PENDING","SUCCESS"]}},"required":["context"]}},"testedPullRequests":{"type":"array","items":{"type":"object","properties":{"prNumber":{"type":"number"},"prUrl":{"type":"string"},"title":{"type":"string"}},"required":["prNumber","prUrl","title"]}},"impactedTargets":{"type":"array","items":{"type":"string"}},"dependentPrs":{"type":"array","items":{"type":"number"}}},"required":["requiredStatuses","testBranch","testBranchSha","checkSuites","statusChecks","testedPullRequests"]}}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## Metrics endpoints + +### Prometheus metrics + +`GET /v1/getMergeQueueMetrics` + +Returns merge queue metrics in Prometheus text exposition format. Authenticate with the `x-api-token` header. + +| Parameter | Required | Description | +| ---------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | +| `repo` | No | Repository in `owner/name` format. If omitted, returns metrics for all repositories in the organization. Must be provided together with `repoHost`. | +| `repoHost` | Conditional | Repository host (e.g., `github.com`). Required if `repo` is specified. | + +Response content type: `text/plain; version=0.0.4; charset=utf-8` + +See [Prometheus metrics endpoint](/merge-queue/administration/metrics#prometheus-metrics-endpoint) for the full list of available metrics, scrape configuration, and example queries. + +## Queue endpoints + +Use these endpoints to create, configure, and manage merge queues. Each queue targets a specific branch in your repository. For more on running multiple queues, see [parallel queues](/merge-queue/optimizations/parallel-queues). + +## POST /createQueue + +> Create a new merge queue. + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/createQueue":{"post":{"summary":"Create a new merge queue.","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string"},"owner":{"type":"string"},"name":{"type":"string"}},"required":["host","owner","name"]},"targetBranch":{"type":"string"},"mode":{"type":"string","enum":["single","parallel"]},"concurrency":{"type":"integer","minimum":1,"maximum":4294967295}},"required":["repo","targetBranch"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /deleteQueue + +> Delete the specified merge queue. The queue must be empty in order to be deleted. + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/deleteQueue":{"post":{"summary":"Delete the specified merge queue. The queue must be empty in order to be deleted.","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string"},"owner":{"type":"string"},"name":{"type":"string"}},"required":["host","owner","name"]},"targetBranch":{"type":"string"}},"required":["repo","targetBranch"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + + +The queue must be empty before it can be deleted. Cancel or merge all enqueued PRs first. + + +## POST /getQueue + +> Get the merge queue. + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/getQueue":{"post":{"summary":"Get the merge queue.","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string"},"owner":{"type":"string"},"name":{"type":"string"}},"required":["host","owner","name"]},"targetBranch":{"type":"string"}},"required":["repo","targetBranch"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"type":"object","properties":{"state":{"type":"string","enum":["RUNNING","PAUSED","DRAINING","SWITCHING_MODES"]},"branch":{"type":"string"},"concurrency":{"type":"number"},"testingTimeoutMins":{"type":"number"},"mode":{"type":"string","enum":["SINGLE","PARALLEL"]},"canOptimisticallyMerge":{"type":"boolean"},"pendingFailureDepth":{"type":"number"},"isBatching":{"type":"boolean"},"batchingMaxWaitTimeMins":{"type":"number"},"batchingMinSize":{"type":"number"},"createPrsForTestingBranches":{"type":"boolean"},"enqueuedPullRequests":{"type":"array","items":{"type":"object","properties":{"id":{"type":"string"},"state":{"type":"string","enum":["NOT_READY","PENDING","TESTING","TESTS_PASSED","MERGED","FAILED","CANCELLED","PENDING_FAILURE"]},"stateChangedAt":{"type":"string"},"priorityValue":{"type":"number"},"priorityName":{"type":"string"},"usedDefaultPriorityName":{"type":"string"},"skipTheLine":{"type":"boolean"},"prNumber":{"type":"number"},"prTitle":{"type":"string"},"prSha":{"type":"string"},"prBaseBranch":{"type":"string"},"prAuthor":{"type":"string"}},"required":["stateChangedAt","priorityValue","priorityName","skipTheLine","prNumber","prTitle","prSha","prBaseBranch","prAuthor"]}}},"required":["branch","concurrency","testingTimeoutMins","canOptimisticallyMerge","pendingFailureDepth","isBatching","batchingMaxWaitTimeMins","batchingMinSize","createPrsForTestingBranches","enqueuedPullRequests"]}}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /updateQueue + +> Update the merge queue. + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/updateQueue":{"post":{"summary":"Update the merge queue.","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string"},"owner":{"type":"string"},"name":{"type":"string"}},"required":["host","owner","name"]},"targetBranch":{"type":"string","description":"The branch that the merge queue is targeting."},"state":{"type":"string","enum":["RUNNING","PAUSED","DRAINING"],"description":"The desired state of the merge queue. Valid values: RUNNING, PAUSED, DRAINING."},"concurrency":{"type":"integer","minimum":1,"maximum":4294967295,"description":"The number of PRs or batches of PRs the queue can test at once."},"bisectionConcurrency":{"type":"integer","minimum":1,"maximum":4294967295,"description":"The number of tests the merge queue can run when bisecting a batch to figure out what PR in the batch failed."},"testingTimeoutMinutes":{"type":"integer","minimum":1,"maximum":4294967295,"description":"The maximum number of minutes the merge queue will wait for tests to complete before timing out."},"pendingFailureDepth":{"type":"integer","minimum":1,"maximum":4294967295,"description":"When enabled, PRs that fail tests will wait for the specified number of PRs below them to finish testing before getting kicked from the queue. This works best with optimistic merging enabled."},"canOptimisticallyMerge":{"type":"boolean","description":"When enabled, a PR that passes tests will also cause any PR ahead of it in the queue to also get marked as passing, since tests have passed with those commits."},"batch":{"type":"boolean","description":"Enable or disable batching. When enabled, the merge queue will group PRs into batches for testing."},"batchingMaxWaitTimeMinutes":{"type":"integer","minimum":1,"maximum":4294967295,"description":"The maximum number of minutes the merge queue will wait to collect PRs into a batch before starting tests."},"batchingMinSize":{"type":"integer","minimum":1,"maximum":4294967295,"description":"The minimum number of PRs required to form a batch."},"mode":{"type":"string","enum":["single","parallel"],"description":"The queue mode. 'single' processes PRs one at a time. 'parallel' processes multiple PRs concurrently."},"commentsEnabled":{"type":"boolean","description":"Whether or not Merge Queue will post GitHub comments on PRs."},"commandsEnabled":{"type":"boolean","description":"Whether or not users are allowed to submit PRs to the merge queue by commenting `/trunk merge`."},"createPrsForTestingBranches":{"type":"boolean","description":"Whether or not the merge queue will create PRs for its testing branches, allowing CI to run on them."},"directMergeMode":{"type":"string","enum":["OFF","ALWAYS"],"description":"Allow PRs to merge directly into the target branch if they're up to date with the target branch when submitting them to the queue instead of running tests on them in the merge queue."},"optimizationMode":{"type":"string","enum":["OFF","BISECTION_SKIP_REDUNDANT_TESTS"],"description":"The optimization strategy for the merge queue. 'OFF' disables optimizations. 'BISECTION_SKIP_REDUNDANT_TESTS' uses bisection and skips redundant tests."},"mergeMethod":{"type":"string","enum":["MERGE_COMMIT","SQUASH","REBASE"],"description":"The Git merge method used when merging PRs into the target branch. Valid values: MERGE_COMMIT, SQUASH, REBASE."},"statusCheckEnabled":{"type":"boolean","description":"Post a GitHub status check on PRs with the status of the PR in the merge queue."},"requiredStatuses":{"type":"array","items":{"type":"string"},"description":"Allows setting the statuses that must pass when the merge queue performs tests in order for a PR to merge. Setting the statuses here will override GitHub branch protection settings or your `.trunk/trunk.yaml`."},"deleteRequiredStatuses":{"type":"boolean","description":"Removes a manually specified set of required statuses. After this, the statuses that must pass when the merge queue performs testing will be pulled from either GitHub branch protection settings or your `.trunk/trunk.yaml`."}},"required":["repo","targetBranch"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## Related resources + +* [CLI reference](/merge-queue/reference/merge-queue-cli-reference) — Command-line interface for merge queue operations +* [Metrics and monitoring](/merge-queue/administration/metrics) — Dashboard analytics and Prometheus endpoint details +* [Webhooks](/merge-queue/webhooks) — Event-driven notifications for queue activity +* [Settings and configurations](/merge-queue/administration/advanced-settings) — Queue settings available in the Trunk web app +* [Authentication](/setup-and-administration/apis#authentication) — API token setup and management diff --git a/merge-queue/reference/troubleshooting.mdx b/merge-queue/reference/troubleshooting.mdx new file mode 100644 index 0000000..2647a27 --- /dev/null +++ b/merge-queue/reference/troubleshooting.mdx @@ -0,0 +1,59 @@ +--- +title: "Troubleshooting" +description: "If your test PR doesn't merge automatically:" +--- +#### Troubleshooting common issues + + +Visit [Trunk Support](/setup-and-administration/support) for additional assistance or to contact the support team. + + +If your test PR doesn't merge automatically: + +* **Check the status comments for the PR in** the [Trunk Dashboard](https://app.trunk.io/) to see what it's waiting for +* **Stuck in "Queued"**: Usually means branch protection rules haven't passed (missing required status checks or code review) or there are merge conflicts. If the status looks correct but the PR still won't enter the queue, try [removing](/merge-queue/using-the-queue/reference) and re-adding by commenting `/trunk merge` again on the PR. +* **Fails when attempting to merge**: Check that squash merges are enabled for your repository in GitHub settings (`Settings > General > Allow squash merging`). Trunk Merge Queue requires squash merges to be enabled. +* **"Permission denied" errors**: Review the [Branch Protection](/merge-queue/getting-started/configure-branch-protection) guide to ensure `trunk-temp/*` and `trunk-merge/*` branches aren't protected by wildcard rules like `*/*`. +* **Status checks not running**: Verify your CI is configured to run on draft PRs (or `trunk-merge/**` branches if using push-triggered mode). See the [Branch Protection](/merge-queue/getting-started/configure-branch-protection) guide for details. + +### + +### Troubleshooting common issues + +
+ +"Permission denied on trunk-merge/* branch" + +**Cause:** Branch protection rules are applying to Trunk's temporary branches. + +**Solution:** Follow the "Exclude Trunk's Temporary Branches" section above to ensure `trunk-temp/*` and `trunk-merge/*` are not protected. + +
+ +
+ +Pull request stuck as "Queued" in the queue + +**Cause:** Required status checks are not completing or not configured correctly. + +**Solution:** + +* Click on the pull request in the Trunk Dashboard to see which checks it's waiting for +* Verify those checks are running in your CI provider +* If using Push-triggered mode, ensure the check names in `trunk.yaml` exactly match your CI job names + +
+ +
+ +Required status checks not running + +**If using Draft PR mode:** Verify your CI workflows are triggered by pull requests (including draft pull requests). + +**If using Push-triggered mode:** + +* Verify your CI workflows trigger on pushes to `trunk-merge/**` branches +* Check that the workflows actually ran in your CI provider's interface +* Ensure the `trunk-io` bot has permission to push to create these branches + +
diff --git a/merge-queue/using-the-queue.mdx b/merge-queue/using-the-queue.mdx new file mode 100644 index 0000000..59e6f6d --- /dev/null +++ b/merge-queue/using-the-queue.mdx @@ -0,0 +1,26 @@ +--- +title: "Using the Queue" +description: "Daily operations for working with Trunk Merge Queue." +--- +These pages cover the daily operations that developers perform when working with Trunk Merge Queue. Whether you're submitting your first PR or handling a failed test, this section provides practical guidance for getting your code merged safely and efficiently. + +### Submit and manage pull requests + +[**Submit and cancel pull requests**](/merge-queue/using-the-queue/reference)\ +How to add PRs to the queue via GitHub comments, CLI, or UI, and remove them when needed. + +[**Work with stacked pull requests**](/merge-queue/using-the-queue/stacked-pull-requests)\ +Manage dependent PRs that build on each other. + +### Monitor and troubleshoot + +[**Monitor queue status**](/merge-queue/using-the-queue/monitor-queue-status)\ +Track your PR's progress through the queue in real-time via dashboard or CLI. + +[**Handle failed pull requests**](/merge-queue/using-the-queue/handle-failed-pull-requests)\ +Diagnose failures, retry flaky tests, fix issues, and resubmit. + +### Emergency procedures + +[**Emergency pull requests**](#emergency-procedures)\ +Bypass the queue for critical production fixes (use sparingly). diff --git a/merge-queue/using-the-queue/emergency-pull-requests.mdx b/merge-queue/using-the-queue/emergency-pull-requests.mdx new file mode 100644 index 0000000..a6027e5 --- /dev/null +++ b/merge-queue/using-the-queue/emergency-pull-requests.mdx @@ -0,0 +1,23 @@ +--- +title: "Emergency pull requests" +description: "Emergency merges bypass the queue entirely and push directly to your main branch. This is the most disruptive action you can take and should be reserved for true emergencies only." +--- +Emergency merges bypass the queue entirely and push directly to your main branch. This is the **most disruptive action** you can take and should be reserved for true emergencies only. + + +**Warning:** Emergency merges bypass all safety checks. Use sparingly. + + +### **Emergency bypass** + +If you need to completely bypass the merge queue, you can merge the PR directly through GitHub as you normally would. The merge queue will restart everything currently testing to account for the new head of the merge branch. However, this means your emergency PR won't be validated by the merge queue's predictive testing. + +#### **Recommended approach** + +Use [PR Prioritization](/merge-queue/optimizations/priority-merging) to fast-track your PR through the queue while still validating it: + +``` +/trunk merge --priority=urgent +``` + +The `urgent` priority is the only level that will interrupt currently testing PRs. Your PR will immediately begin testing, and other PRs will restart after yours completes. diff --git a/merge-queue/using-the-queue/handle-failed-pull-requests.mdx b/merge-queue/using-the-queue/handle-failed-pull-requests.mdx new file mode 100644 index 0000000..31e2d21 --- /dev/null +++ b/merge-queue/using-the-queue/handle-failed-pull-requests.mdx @@ -0,0 +1,72 @@ +--- +title: "Handle failed pull requests" +description: "When a PR fails in the merge queue, it's automatically removed so it doesn't block other PRs. Understanding why it failed helps you fix it quickly." +--- +### Understanding failures + +When a PR fails in the merge queue, it's automatically removed so it doesn't block other PRs. Understanding why it failed helps you fix it quickly. + +#### Why PRs fail in the queue + +**Legitimate test failures (most common):** + +* Tests fail because your code has a bug +* Tests fail due to conflicts with recently merged changes +* Tests fail because of missing or incorrect changes +* Integration tests reveal issues not caught locally + +**Flaky tests:** + +* Tests fail randomly, pass on retry +* Timing issues, race conditions, or external dependencies +* Network timeouts or resource contention +* See Anti-flake protection to handle these automatically + +**GitHub/CI infrastructure issues:** + +* GitHub API errors or rate limits +* CI runner out of resources or crashed +* Network connectivity problems +* Temporary infrastructure failures + +**Configuration problems:** + +* Branch protection rules misconfigured +* Required status checks not running +* Trunk unable to create test branches +* Missing CI configuration + +**Merge conflicts:** + +* Another PR merged and created conflicts +* Your branch needs to be rebased +* Files were moved or renamed + +### Manually restarting failed pull requests + +The PR Details panel has a dropdown "**Actions"** menu, where you can: + +1. **Restart tests.** Use this to manually restart testing of this PR. +2. **Remove from queue**. If the PR is "Queued", then it will cancel it, preventing it from going into the queue until it is re-queued. If the PR is currently in the queue, it will be removed from the queue, which will restart all PRs that depended on it. + +Trunk Merge Queue will automatically restart failed PRs when it can under certain conditions (see PR states). Since the restart is usually from a failed PR being removed from the queue, other PRs behind it will also be restarted. If you want to manually restart a PR, you can restart it ***in place*** by clicking the **Details** link in the Failures summary screen to open the merge details screen. Then, click the **Actions** dropdown, and select **Restart** + +
+ +There are a couple of reasons you might want to manually retry a PR. First, if a PR ends up in the `PENDING_FAILURE` state because of something transient like a CI runner disconnecting or flakey tests, you can retry the PR right away instead of waiting for PRs in front of it to pass or fail. + +Another reason to restart a PR is if the proper tests don't get kicked off due to a failure in the CI system. For example, if GitHub has an outage and is not triggering workflows or actions properly. + +--- + +## Failures + +A tabulated view of all the items that have failed in the Merge Queue, e.g. due to testing. + +
+ +### Retry failed pull requests + +When a PR has been dropped from the queue, you can manually retry the PR by clicking the **Details** link in the Failures summary screen to open the merge details screen. Then, click the **Actions** dropdown, and select **Retry** + +

Re-queue a PR if it is currently not in the queue that has failed or been cancelled.

diff --git a/merge-queue/using-the-queue/monitor-queue-status.mdx b/merge-queue/using-the-queue/monitor-queue-status.mdx new file mode 100644 index 0000000..43791b7 --- /dev/null +++ b/merge-queue/using-the-queue/monitor-queue-status.mdx @@ -0,0 +1,96 @@ +--- +title: "Monitor queue status" +description: "The Trunk Merge Queue dashboard gives you real-time visibility into your queue's activity." +--- +### Access the Merge Queue dashboard + +The Trunk Merge Queue dashboard gives you real-time visibility into your queue's activity. + +**Access the dashboard:** + +1. **Navigate to Trunk:** [https://app.trunk.io](https://app.trunk.io/) +2. **Select your organization** (if you're in multiple) +3. **Click** the **Merge Queue** tab in the upper left +4. Select your repository + +**Quick access from GitHub:** + +* Trunk bot comments include dashboard links +* Click any link in bot comments to go directly to that PR's status + +### Queue overview + +The main dashboard shows a high-level view of your merge queue activity. + +

Clicking on a queue item navigates you to the details page.

+ +### Queue view + +View status of the queue and recent activity in the **Queue** tab + +#### **Active queue status:** + +* **Currently testing**: Which PR is running tests right now +* **Queued PRs**: How many PRs are waiting +* **Merged**: List of previously merged PRs + +#### **Activity feed:** + +* PRs merged in the last 24 hours +* Success rate (percentage of PRs that passed tests) +* Average merge time +* Failed PRs requiring attention + +### Graph view + +The graph view shows all PRs currently being tested by Trunk Merge Queue and their relationships. Each node represents a pull request, and each edge indicates that the PR is testing with the item above and depends on it. All edges point towards the target branch; as items merge, the affected queues restructure. If running in `Single` mode, this will be a single line showing the testing and merging process. + +#### Reading the graph + +* **Merged items** appear in the green section at the top of the graph +* **Batched items** display batching information directly on each node. Clicking a batched node takes you to the draft PR created for that batch +* **Hover** over any node to highlight its path to the root, making it easy to trace dependencies +* **Bisection** activity appears in a side tab when a batch is being bisected to isolate a failure +* A **legend** explains the node states and visual indicators + +#### Navigation + +* Click any node to navigate to the PR's detail page +* A link at the top of the graph view lets you switch to the legacy graph layout if needed + +#### Priority badges + +PR nodes in the graph view display a priority badge when the PR was queued with a non-default priority: + +* **Urgent** — a red pulsing badge labeled **URGENT**. Indicates the PR is interrupting in-progress testing. +* **High** — an orange badge labeled **HIGH**. The PR is fast-tracked ahead of normal-priority items. + +PRs queued at the default medium priority or at low priority do not display a badge, keeping the graph view clean. + +For details on setting priority levels, see [Priority merging](/merge-queue/optimizations/priority-merging). + +### Health view + +Select a period of time to inspect using the **Period** dropdown (default 7 days) and a **Granularity** (defaults to daily) of queue metrics + +#### Conclusion counts + +A Bar chart of PRs and their statuses. More Green = More Merges! + +#### Time in queue + +View statical trends of PR time in queue, default p50 view is useful for an "Average time in queue" evaluation. + +## Pull request details + +The PR details show information about a PR, including a link to the PR in GitHub, the history of the PR within Trunk Merge Queue, and what must be done before a PR can be admitted to the queue for PRs that have not entered the queue yet. + +When a PR has not been admitted to the queue yet, Trunk Merge Queue waits for: + +1. Impacted targets to be uploaded for the PRs current SHA (`Parallel` mode only) +2. The PR to be mergeable according to GitHub. If the PR is not mergeable yet, this most likely means that the PR is not meeting all branch protection rules you have set (for example, not all required status checks have passed yet) or has a merge conflict with the target branch +3. The target branch of the pull request to match the branch that merge queue merges into + +

PR readiness details for a PR that has been submitted but has not yet entered the merge queue.

+ +In the screenshot above, the PR has been submitted to Merge but has not yet been added to the queue. It will be added once all of the branch protection rules pass and there are no merge conflics with the target branch. diff --git a/merge-queue/using-the-queue/reference.mdx b/merge-queue/using-the-queue/reference.mdx new file mode 100644 index 0000000..3dcf6dd --- /dev/null +++ b/merge-queue/using-the-queue/reference.mdx @@ -0,0 +1,50 @@ +--- +title: "Submit and cancel pull requests" +description: "Covers the journey of a PR from submission through various states to completion" +--- +## Submitting and cancelling pull requests + +We offer a few mechanisms for submitting a PR to the Merge Graph: + +* Posting a GitHub comment `/trunk merge` on a pull request. +* Ticking the checkbox in the Trunk comment on a pull request. +* Clicking the "Retry" button on the WebApp. +* Using the `trunk` CLI: + +``` +trunk login +trunk merge +``` + +We offer similar commands for cancellation. + +* Posting a GitHub comment `/trunk cancel` on a pull request. +* Cancellation from the WebApp: + +
+ +* Using the `trunk` CLI: + +``` +trunk login +trunk merge cancel +``` + +## Pull request processing + +Once a PR is submitted to the merge queue it goes through several states. First, it starts as *Queued* until all of the required conditions to submit it are met. Once ready, the PR moves to the *Pending* state, waiting for a Merge Queue to pick it up, and then enters the *Testing* state. Once the tests pass the PR may still need to wait for upstream PRs. Once any upstream PRs are complete the PR will be merged and then removed from the Merge Queue. If a PR fails or is canceled then it will go to the failed or canceled state. + +## Pull request states + +A PR's lifecycle in the Merge Queue goes through the following states: + +| State | Description | +| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Queued | The PR was submitted to Trunk Merge Queue, but the PR isn't eligible for merging yet. Impacted targets may not be uploaded, or readiness checks may not have passed. | +| Pending | The MergeGraph created a node for the PR. Testing will begin if the graph has capacity. | +| Testing | The PR is testing. Required status checks that Trunk Merge Queue must gate on before merging PRs can be specified with in `.trunk/trunk.yaml` or through GitHub branch protection rules as the "Status checks that are required" before merging on your merge branch | +| Tests Passed | The PR successfully passed tests. It may have to wait for upstream PRs to complete tests before merging. | +| Pending Failure | The PR failed tests. The cause of failures is still indeterminate - it may be due to an upstream PR, or due to the current PR. It will wait until the root cause of tests has been determined, and restart testing on your PR if due to an upstream PR. If you want to manually restart a failed PR, see [manually restarting PRs](/merge-queue/using-the-queue/handle-failed-pull-requests#manually-restarting-failed-pull-requests). | +| Merged | The PR successfully merged into the target branch. It will be removed from the queue. | +| Failed | The PR caused a testing failure. It will be removed from the queue. | +| Cancelled | The PR was cancelled, e.g. `/trunk cancel`. It will be removed from the queue. | diff --git a/merge-queue/using-the-queue/stacked-pull-requests.mdx b/merge-queue/using-the-queue/stacked-pull-requests.mdx new file mode 100644 index 0000000..9114856 --- /dev/null +++ b/merge-queue/using-the-queue/stacked-pull-requests.mdx @@ -0,0 +1,65 @@ +--- +title: "Work with stacked pull requests" +description: "Yes, Trunk Merge Queue fully supports stacked pull requests. You can use stacked PR workflows with your preferred tooling (GitHub CLI, web interface, or third-party apps)." +--- +### How it works + +Trunk Merge Queue determines PR dependencies by examining each pull request's **base branch** (the branch it will merge into, shown under the PR title on GitHub). + +* If a PR's base branch is your main branch (e.g., `main`), it's ready to process immediately +* If a PR's base branch is another feature branch (indicating it's part of a stack), Merge Queue will wait until that base branch changes to your main branch before processing + +### Merging stacked PRs + +#### Step 1: Enqueue all PRs in your stack + +Each PR in the stack must be enqueued separately. You can: + +* Comment `/trunk merge` on each PR +* Check the box in the Trunk comment on each PR +* Use the CLI: `trunk merge ` for each PR + +**Why enqueue separately?** Each PR is an independent merge operation in the queue. This gives you control over which PRs in your stack should be merged versus which might need more work. + +#### Step 2: Automatic sequential processing + +Once enqueued, Trunk handles the rest automatically: + +1. The **first PR** in the stack (base branch = `main`) enters the queue, runs tests, and merges +2. When it merges, **GitHub automatically updates** the next PR's base branch from the previous feature branch to `main` +3. The **second PR** now has `main` as its base, so it proceeds through the queue +4. This continues until all PRs in the stack are merged + +**Example:** For a stack of 5 PRs: + +* PR #1 (base: `main`) → tests → merges +* PR #2's base automatically changes from PR #1's branch to `main` → tests → merges +* PR #3's base automatically changes from PR #2's branch to `main` → tests → merges +* And so on... + +--- + +### Important considerations + +#### Sequential testing + +PRs in a stack are tested and merged **one at a time** in order. The second PR won't begin testing until the first PR has fully merged. This ensures: + +* Each PR is tested against the actual state of your main branch +* No conflicts arise from dependencies +* Test results are deterministic and reliable + +**Tradeoff:** This sequential approach means that a stack of 5 PRs will take longer to merge than 5 independent PRs, since they cannot be tested in parallel. However, it provides the safest merge path for dependent changes. + +#### Enqueued PRs with non-main base branches + +If you enqueue a PR whose base branch is not your main branch and that base never changes to main, the PR will remain in the queue without processing. This typically happens if: + +* The parent PR in the stack was not enqueued or merged +* You're testing queue behavior with a non-standard workflow + +The PR will begin processing as soon as its base branch updates to your main branch. + +### Configuration + +No special configuration is required. Trunk Merge Queue automatically detects stacked relationships based on the base branch field in GitHub. diff --git a/merge-queue/webhooks.mdx b/merge-queue/webhooks.mdx new file mode 100644 index 0000000..01bc341 --- /dev/null +++ b/merge-queue/webhooks.mdx @@ -0,0 +1,19 @@ +--- +title: "Webhooks" +description: "Merge Queue Webhooks" +--- +Trunk provides a variety of webhooks to allow responding to various events from Trunk. Each event corresponds to a Trunk feature and an action within that feature (for example, a Pull Request being submitted to Trunk Merge). + +### Supported events + +Trunk provides various webhooks to respond to events from Trunk Flaky Tests. Flaky Test events are named with a `pull_request` prefix, you can find the events you can respond to in the Webhook Events reference from Svix. + + +Open the referenced resource in a new tab. + + +You can learn about the Svix event catalog in the [Svix docs](https://docs.svix.com/receiving/using-app-portal/event-catalog). + + +If you don't currently have an endpoint ready to receive webhooks, you can quickly set up a URL for testing by clicking the "use Svix Play" link - you'll be able to inspect all webhook events sent there. + diff --git a/setup-and-administration/apis.mdx b/setup-and-administration/apis.mdx new file mode 100644 index 0000000..f130518 --- /dev/null +++ b/setup-and-administration/apis.mdx @@ -0,0 +1,45 @@ +--- +title: "API Reference" +description: "Trunk APIs for building custom integrations." +--- +## REST API Overview + +Trunk provides HTTP REST APIs for each of our features. The APIs use status codes to indicate the success or failure of requests, return JSON from all requests, and use standard HTTP response codes. All API requests must be authenticated. + +## Available APIs + +* [Flaky Tests](/flaky-tests/flaky-tests): for accessing information like quarantined tests in your repo. +* [Merge API](/merge-queue/reference/merge) : for controlling the Trunk Merge Queue. + +## Authentication + +Authenticate to the API with an API key using the header `x-api-token`. + +### Finding your API token + +You can find your API token in the [Trunk App](https://app.trunk.io). + + +Explore the interactive walkthrough in a new tab. + + +### Example + +To submit an empty list of events to be tracked, do the following from the command line. + +```sh +curl \ + -i \ + -X POST https://api.trunk.io/v1/metrics/trackEvents \ + -H "Content-Type: application/json" \ + -H "x-source: curl-sample" \ + -H "x-api-token: {REDACTED}" \ + -d '{ + "repo": { + "host": "github.com", + "owner": "trunk-io", + "name": "jenkins-plugin" + }, + "events":[] + }' +``` diff --git a/setup-and-administration/apis/webhooks.mdx b/setup-and-administration/apis/webhooks.mdx new file mode 100644 index 0000000..57fc894 --- /dev/null +++ b/setup-and-administration/apis/webhooks.mdx @@ -0,0 +1,17 @@ +--- +title: "Webhooks Reference" +description: "Documentation on the various webhooks that are provided by Trunk" +--- +
+ +Svix generates a reference for all the exposed webhook events. You can find all possible webhooks from Trunk can be viewed in the [Webhook Events Catalog](https://www.svix.com/event-types/us/org_2eQPL41Ew5XSHxiXZIamIUIXg8H/): + + +Open the referenced resource in a new tab. + + +### Guides and Examples + +The documentation of each Trunk product contains guides and examples for using webhooks. + +
Webhooks for Flaky Testswebhooks
Merge Queue Webhookswebhooks
diff --git a/setup-and-administration/billing.mdx b/setup-and-administration/billing.mdx new file mode 100644 index 0000000..85b5b27 --- /dev/null +++ b/setup-and-administration/billing.mdx @@ -0,0 +1,80 @@ +--- +title: "Billing and plans" +description: "Trunk Subscription Plans" +--- +Trunk offers a **Free**, **Team**, and **Enterprise** plan for access to any/all of our features. The team plan is available for organizations of up to 50 active committers. + +## Plans + +### **Free plan** + +Trunk offers a free tier for small teams and open-source projects to experiment with each of our features. You can use Trunk at no cost if: + +* You have 5 or fewer monthly active committers on private repositories, **and** +* Your monthly test span usage is under 5 million + +Public repositories are always free for up to 5 million test spans per month. + +If you exceed these limits, you'll be prompted to upgrade based on the number of private committers and additional test span usage. + +Similar to [calculating user counts](#calculating-user-counts), our free tier limits are calculated based on a 30-day rolling window. + +
FeatureMetricFree Tier Limit
AllUsersUp to 5 private repo committers; unlimited on public repos
Flaky TestsNumber of test spans5 committers and 5M test spans per month
Merge QueuePRs merged per monthUnlimited usage; pricing begins if >5 private repo committers
Code QualityQuality & security metricsUnlimited usage
+ +### Team plan + +Trunk Team Plans offers a monthly subscription plan using a per-seat model. At the end of every billing period, we calculate the number of users using Trunk and update the next month’s invoice to reflect the latest user count. Each seat has access to all of Trunk's features. + +
FeatureMetricLimits
Code QualityQuality & security metricsUnlimited
Merge QueuePRs merged per monthUnlimited
Flaky TestsNumber of test spans

1 million test spans per seat per month.

$3 for each additional 1 million test spans.

+ +### **Enterprise plan** + +Trunk Enterprise offers powerful admin controls, dedicated support, access to custom billing or terms, and features like SSO. If your team is interested in an enterprise plan, please contact sales@trunk.io. + +**Trials** + +You and your team can trial Trunk before signing up for an Enterprise or Team plan. To try Trunk, please contact sales@trunk.io. To extend or cancel the trial, please contact sales@trunk.io. + +### Calculating user counts + +A user is a non-bot user who has made a commit to a private repo with Trunk enabled in the last 30 days. Specifically, we look at their username; if someone changes their username on Git, *we would consider that a separate user*. We do not count contributions to public (open source) repos. Contributor counts are displayed on **Settings** > **Billing**. + + +Trunk requires the [Trunk GitHub App](https://github.com/apps/trunk-io) to be installed in your repo to count seats. + + +For example, consider Alice, Bob, and Charlie are all in the same organization, which owns two private repos: `abc/repo1` and `abc/repo2`. Given the following timeline of events: + +| Day 1 | Day 15 | Day 22 | +| ---------------------------------- | -------------------------------- | ------------------------------------ | +| Alice commits code to `abc/repo1`. | Bob commits code to `abc/repo2`. | Charlie commits code to `abc/repo1`. | + +On Day 1, the user count would be **one**: just Alice.\ +On Day 15, the user count would be **two**: Alice and Bob, since the same organization owns both private repos.\ +On Day 22, the user count would be **three**: Alice, Bob, and Charlie.\ +On Day 30, the user count would be **three**. We consider days 1 through 30 (inclusive), which include Alice's, Bob's, and Charlie's commits.\ +On Day 31, the user count would be **two**. We consider days 2 through 31 (inclusive), which only includes Bob's and Charlie's commits. + +### Calculating an invoice + +At the end of every billing cycle, Trunk calculates what the next invoice should be. Trunk determines feature usage and the number of seats used over the [free tier usage limits](#free-plans-and-trials) . See our section on [calculating user counts](#calculating-user-counts) to determine how much we charge per feature usage. + +### Editing payment details + +You can edit your payment details by navigating to **Settings** > **Billing** and clicking on the pencil icon on the credit card. Trunk accepts both credit card and ACH; if you require a different payment method, please contact us at sales@trunk.io. + +![](https://files.readme.io/d7adf4f-Screen_Shot_2023-01-17_at_8.08.17_PM.png) + +### Cancelling a plan + +You can cancel an active Trunk subscription by navigating to **Settings** > **Billing** and clicking the **Cancel Subscription** button. Your plan will transition back into the [free tier](#free-plans-and-trials); if you want to re-enable this plan, please contact us at sales@trunk.io. + + +Cancelling a Trunk Plan and then re-enabling it will degrade the product experience and may lose data. Please contact us at sales@trunk.io to re-enable any canceled plan. + + +### A note on security + +Your security is important to us. We do not store your credit card information anywhere in our systems. Online payments are processed using Stripe, which is PCI-compliant in accordance with industry standards. + +[^1]: The number of test case results uploaded. diff --git a/setup-and-administration/connecting-to-trunk.mdx b/setup-and-administration/connecting-to-trunk.mdx new file mode 100644 index 0000000..b8534a8 --- /dev/null +++ b/setup-and-administration/connecting-to-trunk.mdx @@ -0,0 +1,37 @@ +--- +title: "Account Setup" +description: "Welcome to Trunk! Before you can access Trunk's toolkit to ship faster, you must create a Trunk account. You can connect to Trunk in two ways:" +--- +### Create a Trunk account + +Welcome to Trunk! Before you can access Trunk's toolkit to ship faster, you must [create a Trunk account](https://app.trunk.io/signup). You can connect to Trunk in two ways: + +* Connect with OAuth using your Google, GitHub, or Microsoft account. +* Connect with SSO through your organization's email. To configure SSO for your organization, please [contact us](mailto:support@trunk.io) or [join us on Slack](https://slack.trunk.io/). + +### Create an Organization + +After creating a Trunk Account, you'll be invited to [create an organization](https://app.trunk.io/onboarding). Organizations are shared workspaces for your team, and **individual repositories** connected to Trunk will fall under your team's organization. + +
+ +To create your organization, you need the following: + +* **Workspace Name**: This is the display name of your organization. This can be changed later. +* **URL Slug**: This is the link to access your workspace and also your [Organization Slug](/setup-and-administration/managing-your-organization#slug). This cannot be changed. + + +**Trying to join your team?** + +If your team is already on Trunk and you're looking to join an existing organization, see the docs on[ inviting team members](/setup-and-administration/managing-your-organization#inviting-team-members) and contact an organization admin. + + +### Start using Trunk + +After connecting to Trunk, you're ready to get started. + +### Next steps + +
Flaky TestsFlakyTests.pngoverview
Merge QueueMerge.pngmerge-queue
+ +
Manage your Organizationmanaging-your-organization
Install the CLIBroken link
diff --git a/setup-and-administration/github-app-permissions.mdx b/setup-and-administration/github-app-permissions.mdx new file mode 100644 index 0000000..dba033e --- /dev/null +++ b/setup-and-administration/github-app-permissions.mdx @@ -0,0 +1,83 @@ +--- +title: "Trunk GitHub App" +description: "Learn more about which permissions the Trunk GitHub app requests and why Trunk needs them." +--- +The Trunk GitHub app lets you integrate Merge Queue and Flaky Test features with your GitHub repos. It can help you[ ](https://docs.trunk.io/code-quality/overview/setup-and-installation/github-integration)manage [merge queue branches](https://github.com/trunk-io/docs/blob/main/merge-queue/set-up-trunk-merge/README), and post [PR comments about your test results](/flaky-tests/github-pull-request-comments). + +You can install the Trunk GitHub App by going to **Settings** > **Organization** > **GitHub** and clicking **Install GitHub App**. You'll be redirected to GitHub to select the repositories where the GitHub app will be installed. + +The Trunk GitHub App enables functionality for all of Trunk's tools. Not every permission is required for every feature of Trunk, and if you have only enabled one of our tools, we will only access the permissions pertaining to that tool. For complete documentation of the individual permissions the Trunk GitHub App requires, see the [GitHub developer docs](https://docs.github.com/en/rest/authentication/permissions-required-for-github-apps). Below, find an explanation of every permission the Trunk GitHub App requires and what Trunk uses it for. + +## **Repository permissions** + +Repository permissions permit access to repositories and related resources. + +### Actions (Read and write) + +This permission allows access to the results of workflow and job runs. + +Trunk uses this permission to read the results of workflow and job runs, and to dispatch or cancel workflow runs triggered by Trunk Merge. + +### Administration (Read-only) + +This permission includes read-only access to repository settings, teams, and collaborators. + +Trunk uses this permission to access a repository’s branch protection rules. Trunk cannot edit any administration settings on your GitHub organization or repository. + +### Checks (Read and write) + +This permission includes access to checks on code (such as GitHub actions and other integrations like BuildKite, CircleCI). + +Trunk uses this permission to examine the status of your commits, branches, and pull requests. Trunk uses this information to determine when pull requests are ready to merge. Trunk also uses this permission to post the results of code analysis. + +### Commit statuses (Read-only) + +This permission includes access to statuses on code. Some CI providers use this integration with GitHub to post the results of a job run. + +Trunk uses this permission to examine the status of your commits, branches, and pull requests. Trunk uses this information to determine when pull requests are ready to merge. + +### Contents (Read and write) + +This permission includes access to repository contents, commits, branches, downloads, releases, and merges. + +Trunk uses this permission to read the `trunk.yaml` configuration file if you’ve added it to your repository. Trunk also uses write permissions to create, update, and delete the branches created and managed by Merge. + +### Issues (Read and write) + +This permission includes access to issues and related comments, assignees, labels, and milestones. + +Trunk uses this permission to read and write comments on pull requests. + +### Metadata (Read-only) + +This permission includes access to search repositories, list collaborators, and access repository metadata. + +This permission is required by all GitHub applications that access repository information. + +### Pull requests (Read and write) + +This permission includes access to pull requests and merges. + +Trunk uses this permission to view and merge pull requests managed by Merge. + +### Workflows (Read and write) + +This permission includes access to update GitHub Action workflow files. + +Trunk uses this permission to facilitate onboarding to running Trunk Check on CI, for users who use Github Actions. + +## **Organization permissions** + +Organization permissions permit access to organization-related resources. + +### **Events (Read-only)** + +This permission includes access to view events triggered by an activity in an organization. + +Trunk uses this permission to get organization events to keep app install information up to date. + +### **Members (Read-only)** + +This permission includes access to organization members and teams. + +Trunk uses this permission to subscribe to organization rename events to keep app install information current. diff --git a/setup-and-administration/managing-your-organization.mdx b/setup-and-administration/managing-your-organization.mdx new file mode 100644 index 0000000..1d9e885 --- /dev/null +++ b/setup-and-administration/managing-your-organization.mdx @@ -0,0 +1,66 @@ +--- +title: "Managing your Organization" +description: "Integrating with Trunk through Webhooks, APIs, or CLI will require authentication using your organization's slug and token." +--- +## Organization Slug and Token + +Integrating with Trunk through [Webhooks](/flaky-tests/webhooks), [APIs](/setup-and-administration/apis), or CLI will require authentication using your organization's slug and token. + +You can find your organization slug and token by going to **Settings** > **Organization** > **General**. + +
+ +--- + +## Managing Team Members + +You can manage a team member's role and remove team members by navigating to **Settings** > **Organization** > **Team** > **Members** and clicking on the name of a team member. You can change the role of a team member between user and admin, as well as removing the user from your organization. + +
+ +### Inviting Team Members + +You can invite individual members manually by navigating to **Settings** > **Organization** > **Team** > **Members** and clicking the **Invite Users** button. An email will be sent to the invitee's inbox. + +In the form, add comma separated email addresses and select a Role from the drop down. This role will be applied to all users in this invite batch. + +Available Roles are: + +* **Member**: Full access with limited permissions (default) +* **Admin**: Full administrative access + +
+ +### Pending Invites + +You can view and manage pending email invites by navigating to **Settings** > **Organization** > **Team** > **Pending Invites**. + +From this page you can copy the member invite link or revoke the invite using the **three dots** menu. + +
+ +### Team Domains + +If your team uses emails managed by Google or Microsoft under a common domain, you can grant access to your team using **Team Domains**. When a team member creates a Trunk account with an email under you configured team domain, they will be granted access to your Trunk organization and repositories. + +You can enable team domains under **Settings > Organization > Team > Domains** and clicking **Add Domain**. + + + + + +
+ +
+ + + +
+ +
+ +
+ + +Trunk also supports SSO login. If you wish to use SSO, please contact us at support@trunk.io. + diff --git a/setup-and-administration/security.mdx b/setup-and-administration/security.mdx new file mode 100644 index 0000000..379dd5c --- /dev/null +++ b/setup-and-administration/security.mdx @@ -0,0 +1,94 @@ +--- +title: "Security" +description: "The security and privacy of your Trunk Platform usage" +--- +At Trunk, we prioritize the security and privacy of your data. Here's how we protect your information when using Trunk Merge Queue and Flaky Tests. + +### What data we access + +#### Merge Queue + +Trunk Merge Queue integrates with your GitHub repository via our GitHub App to safely automate code merging. Here's what we access: + +* **GitHub repository metadata**: Repository structure, branch information, and pull request data necessary for merge operations +* **Pull request details**: PR titles, descriptions, commit information, and test results to determine merge eligibility +* **CI/CD status checks**: Results from your CI jobs to validate code before merging +* **GitHub webhook events**: Real-time notifications about PR updates and CI status changes + +**What we do NOT access:** + +* We do not clone or store your entire codebase +* Your source code remains in your GitHub repository + +#### Flaky Tests + +Flaky Tests works by uploading test results from your CI jobs to Trunk's backend for analysis. Here's what we collect: + +* **Test results**: Test reports in standard formats (JUnit XML, XCResult, Bazel BEP JSON, RSpec JSON) containing: + * Test names and identifiers + * Pass/fail status + * Test execution time and duration + * Error messages and stack traces from failed tests + * Test suite organization and hierarchy +* **CI job metadata**: Job names, build IDs, branch names, commit SHAs, and timestamps +* **Build statistics**: CI job timing data, test count, and historical performance metrics +* **Repository information**: Repository name and organization details + +**How uploads work:** + +* Test results are uploaded from your CI environment after tests complete +* Uploads use your organization-specific API token for authentication +* All data is transmitted over encrypted connections (TLS) +* You control which CI jobs upload results and when + +**What we do NOT collect:** + +* Full source code or proprietary business logic +* Sensitive environment variables or secrets +* Customer data processed by your applications +* Test execution logs beyond standard test framework outputs + +**Data retention:** Test results and analytics data are retained for 45 days to provide historical flakiness analysis and trends over time. + +### How we protect your data + +#### Infrastructure Security + +* **Hosting**: All services are hosted on Amazon Web Services (AWS) in physically secure, U.S.-based data centers with 24/7 on-site security and access monitoring +* **Encryption in transit**: All data transmitted to and from Trunk uses TLS (Transport Layer Security) and HSTS +* **Encryption at rest**: All customer data is encrypted using AES-256 +* **Network isolation**: Production services run in isolated AWS VPCs with restricted access; all services are within private subnets with no internet access and use a network gateway to permit specific traffic + +#### Access Controls + +* **Authentication**: Multi-factor authentication (MFA) required for access to sensitive systems and applications +* **Principle of least privilege**: Access to customer data is limited to authorized personnel with business need +* **Unique user accounts**: All access requires unique user credentials; no shared accounts +* **Access monitoring**: All access to production environments is logged and monitored for security purposes +* **Access reviews**: User access is reviewed annually to ensure appropriate permissions +* **Immediate revocation**: System access is revoked within one business day of employee termination + +#### Security Monitoring & Testing + +* **Continuous monitoring**: Automated logging and alerting for security events; alerts are sent to appropriate personnel and corrective actions are performed as necessary +* **Vulnerability scanning**: Quarterly automated vulnerability scans to identify and remediate security issues +* **Penetration testing**: Annual third-party penetration tests using industry-standard methodologies +* **Incident response**: Formal incident response plan with defined procedures for security events + +### Compliance & Auditing + +#### SOC 2 Type II Certified + +Trunk maintains **SOC 2 Type II compliance**, demonstrating our commitment to: + +* **Security**: Protection against unauthorized access +* **Availability**: System uptime and reliability +* **Confidentiality**: Protection of sensitive information + +Our most recent SOC 2 Type II audit confirmed that: + +* Controls were suitably designed throughout the period +* Controls operated effectively throughout the period +* No significant security incidents occurred during the audit period + +**To request a copy of our SOC 2 report**, please contact us at security@trunk.io diff --git a/setup-and-administration/support.mdx b/setup-and-administration/support.mdx new file mode 100644 index 0000000..f26dcfb --- /dev/null +++ b/setup-and-administration/support.mdx @@ -0,0 +1,54 @@ +--- +title: "Support" +description: "Need help with Trunk?" +--- +### How to reach us + +
Contact MethodDescription
Community SlackWe offer near real-time support in our Slack community. Post your questions in #getting-started, #bugs, or #general for quick answers. Want to discuss something privately? Simply drop a note in any channel and we'll continue in DMs.
Meeting or DemoFor sales-related matters, you can email us at sales@trunk.io or schedule a meeting here.
GitHub DiscussionsIf there's something you want us to build, you can post on Slack in #feature-requests or start a discussion on GitHub Discussions.
EmailEmail us at support@trunk.io. We'll get back to you within 48 hours, though for fastest support, we recommend our Slack community!
Security Concerns (Email)Report security issues or request security information by emailing us at security@trunk.io
Report OutagesReport outages and critical issues by clicking "Report an issue" at status.trunk.io
+ +### Enterprise support + +#### Support availability + +* Business Hours: 6:00 AM - 6:00 PM PST, Monday through Friday +* Critical Issue Monitoring: 24/7 coverage for production-impacting incidents + +#### Response time commitments + +* Critical Issues: 2 hour maximum response time (24/7) (See 'Report Outages' above) +* High Priority Issues: 4 hours maximum response time (business hours) +* General Issues: 1 business day maximum response time (business hours) + + +*Response time refers to initial acknowledgment and issue triage. Resolution timeframes depend on technical complexity and may require coordination across multiple systems.* + + +#### Account management + +* Dedicated account manager assigned to enterprise accounts +* Maintains familiarity with your technical environment and requirements +* Primary point of contact for non-technical issues and escalations + +#### Product development and roadmap + +* Direct access to product management team for feature discussions and roadmap input +* Early access to beta features and roadmap previews aligned with your use cases +* Dedicated feature request tracking with status updates and technical feasibility assessments + +#### Implementation and technical consulting + +* Prioritized onboarding and initial configuration assistance +* Technical best practices review for your specific development workflow +* Architecture guidance for optimal integration + +#### Support channels + +* Dedicated Slack Connect channel with direct engineering team access (business hours) +* Escalated email support queue with enterprise priority handling +* Live technical consultation for complex technical issues + +#### Security and compliance + +* Security assessment completion for procurement processes +* Access to current security documentation, SOC2 certifications, and reports +* Review of custom Data Processing Agreements and security requirements From db3bcb2656c0c7c61dd914e81cb947a2c407a765 Mon Sep 17 00:00:00 2001 From: "mintlify[bot]" <109931778+mintlify[bot]@users.noreply.github.com> Date: Fri, 8 May 2026 16:10:11 +0000 Subject: [PATCH 2/2] Add Google Cloud Build CI provider documentation Generated-By: mintlify-agent --- docs.json | 1 + flaky-tests/get-started/ci-providers.mdx | 2 +- .../ci-providers/google-cloud-build.mdx | 221 ++++++++++++++++++ 3 files changed, 223 insertions(+), 1 deletion(-) create mode 100644 flaky-tests/get-started/ci-providers/google-cloud-build.mdx diff --git a/docs.json b/docs.json index 20d59dc..50d94fc 100644 --- a/docs.json +++ b/docs.json @@ -186,6 +186,7 @@ "flaky-tests/get-started/ci-providers/droneci", "flaky-tests/get-started/ci-providers/github-actions", "flaky-tests/get-started/ci-providers/gitlab", + "flaky-tests/get-started/ci-providers/google-cloud-build", "flaky-tests/get-started/ci-providers/jenkins", "flaky-tests/get-started/ci-providers/otherci", "flaky-tests/get-started/ci-providers/semaphoreci", diff --git a/flaky-tests/get-started/ci-providers.mdx b/flaky-tests/get-started/ci-providers.mdx index 4d61b08..7ef2997 100644 --- a/flaky-tests/get-started/ci-providers.mdx +++ b/flaky-tests/get-started/ci-providers.mdx @@ -12,4 +12,4 @@ Flaky Test support for source control providers like GitLab and BitBucket is sti ### Quickstart -
Atlassian Bambooatlassian-bamboo
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/ci-providers/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
+
Atlassian Bambooatlassian-bamboo
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
Google Cloud Buildgoogle-cloud-build
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/ci-providers/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/ci-providers/google-cloud-build.mdx b/flaky-tests/get-started/ci-providers/google-cloud-build.mdx new file mode 100644 index 0000000..ff1629f --- /dev/null +++ b/flaky-tests/get-started/ci-providers/google-cloud-build.mdx @@ -0,0 +1,221 @@ +--- +title: "Google Cloud Build" +description: "Configure Google Cloud Build to upload test results to Trunk Flaky Tests" +--- +Trunk Flaky Tests integrates with your CI by adding a step in your Google Cloud Build configuration to upload tests with the [Trunk Analytics CLI](/flaky-tests/uploader). + + +**Not using GitHub for source control?** + +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. + + +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. + +### Checklist + +By the end of this guide, you should achieve the following. + +* [ ] Get your Trunk organization slug and token +* [ ] Store your token in GCP Secret Manager +* [ ] Connect your GitHub repos to Cloud Build +* [ ] Create Cloud Build triggers for PR and push events +* [ ] Configure your `cloudbuild.yaml` to upload to Trunk +* [ ] Validate your uploads in Trunk + +After completing these checklist items, you'll be integrated with Trunk. + +### Trunk Organization Slug and Token + +Before setting up uploads to Trunk, you must sign in to [app.trunk.io](https://app.trunk.io/login?intent=flaky%20tests) and obtain your Trunk organization slug and token. + +#### Trunk Slug + +You can find your organization slug under **Settings > Organization > Manage > Organization Name > Slug**. You'll save this as a variable in CI in a later step. + +#### Trunk Token + +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. + +### Store the Trunk Token in GCP Secret Manager + +Store your Trunk API token in [GCP Secret Manager](https://console.cloud.google.com/security/secret-manager) so Cloud Build can securely access it during builds. + +1. Open **GCP Console > Secret Manager**. +2. Click **Create Secret**. +3. Name the secret (for example, `trunk-api-token`) and paste your Trunk organization API token as the value. +4. Click **Create**. + +You'll reference this secret in your `cloudbuild.yaml` using the `availableSecrets` and `secretEnv` fields. + +### Connect GitHub Repos to Cloud Build + +Ensure your GitHub repositories are connected to Cloud Build through the [Cloud Build GitHub App](https://cloud.google.com/build/docs/automating-builds/github/connect-repo-github). + +1. Open **GCP Console > Cloud Build > Repositories**. +2. Connect your GitHub repository using the Cloud Build GitHub App. + +### Create Cloud Build Triggers + +Create two Cloud Build triggers for each repository you want to upload test results from: + +1. Open **GCP Console > Cloud Build > Triggers**. +2. Create a trigger for **pull request events** — this uploads test results from PR branches. +3. Create a trigger for **push events** to your stable branch (for example, `main`) — this uploads test results from your stable branch. + + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. + +[Learn more about detection](/flaky-tests/detection) + + +### Upload to Trunk + +Add an upload step in your `cloudbuild.yaml` that runs after your test steps. The Trunk CLI automatically detects Google Cloud Build when the `TRIGGER_NAME` environment variable is set. + + +Google Cloud Build does not automatically provide environment variables to build steps. You must explicitly pass the required substitution variables in your `cloudbuild.yaml` using the `env` field. Without these variables, the CLI cannot detect your CI platform or link uploads to the correct branches and pull requests. + + +#### Required Environment Variables + +The following environment variables must be passed to the upload step: + +| Variable | Description | +| --------------- | ----------------------------------------------------------------- | +| `TRIGGER_NAME` | Name of the Cloud Build trigger (used for CI platform detection) | +| `PROJECT_ID` | GCP project ID (used to construct the CI job link) | +| `BUILD_ID` | Unique ID of the Cloud Build run (used to construct the CI job link) | +| `BRANCH_NAME` | Git branch being built (used for push/stable branch uploads) | +| `_HEAD_BRANCH` | Head branch for PR-triggered builds | +| `_PR_NUMBER` | Pull request number for PR-triggered builds | + +#### Example `cloudbuild.yaml` + +The following is an example of a `cloudbuild.yaml` configuration that runs tests and uploads results to Trunk. Note: you must either run `trunk` from the repo root when uploading test results or pass a `--repo-root` argument. + +To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [frameworks](/flaky-tests/get-started/frameworks "mention") docs. + + + + + +```yaml +steps: + - name: gcr.io/cloud-builders/npm + id: run-tests + script: | + #!/bin/bash + set -euo pipefail + npm install + npm test + timeout: 600s + allowExitCodes: [0, 1] + + - name: gcr.io/cloud-builders/gcloud + id: upload-test-results + script: | + #!/bin/bash + set -euo pipefail + curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli + ./trunk-analytics-cli upload \ + --junit-paths "" \ + --org-url-slug \ + --token "${TRUNK_API_TOKEN}" + waitFor: + - run-tests + timeout: 300s + env: + - "PROJECT_ID=${PROJECT_ID}" + - "BUILD_ID=${BUILD_ID}" + - "TRIGGER_NAME=${TRIGGER_NAME}" + - "BRANCH_NAME=${BRANCH_NAME}" + - "_HEAD_BRANCH=${_HEAD_BRANCH}" + - "_PR_NUMBER=${_PR_NUMBER}" + secretEnv: ["TRUNK_API_TOKEN"] + +options: + logging: CLOUD_LOGGING_ONLY +timeout: 1200s +availableSecrets: + secretManager: + - versionName: projects/${PROJECT_ID}/secrets//versions/latest + env: TRUNK_API_TOKEN +``` + + + + + +```yaml +steps: + - name: gcr.io/cloud-builders/bazel + id: run-tests + args: ['test', '//...', '--build_event_json_file=bep.json'] + timeout: 600s + allowExitCodes: [0, 1] + + - name: gcr.io/cloud-builders/gcloud + id: upload-test-results + script: | + #!/bin/bash + set -euo pipefail + curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli + ./trunk-analytics-cli upload \ + --bazel-bep-path bep.json \ + --org-url-slug \ + --token "${TRUNK_API_TOKEN}" + waitFor: + - run-tests + timeout: 300s + env: + - "PROJECT_ID=${PROJECT_ID}" + - "BUILD_ID=${BUILD_ID}" + - "TRIGGER_NAME=${TRIGGER_NAME}" + - "BRANCH_NAME=${BRANCH_NAME}" + - "_HEAD_BRANCH=${_HEAD_BRANCH}" + - "_PR_NUMBER=${_PR_NUMBER}" + secretEnv: ["TRUNK_API_TOKEN"] + +options: + logging: CLOUD_LOGGING_ONLY +timeout: 1200s +availableSecrets: + secretManager: + - versionName: projects/${PROJECT_ID}/secrets//versions/latest + env: TRUNK_API_TOKEN +``` + + + + + + +The examples above use the Linux x64 binary. If your CI runs on a different platform, see the [Trunk Analytics CLI](/flaky-tests/uploader) page for all available platform downloads. + + + +**Important:** Set `allowExitCodes: [0, 1]` on your test step so the upload step runs even when tests fail. Without this, Cloud Build stops the pipeline on test failures and your results won't be uploaded. + + +Replace the following placeholders in the example: + +| Placeholder | Description | +| ------------------- | -------------------------------------------------------------------------------------------- | +| `` | Glob pattern matching your JUnit XML test report files (for example, `**/junit.xml`) | +| `` | Your Trunk organization slug | +| ``| The name of the secret you created in GCP Secret Manager | + +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. + +#### Stale files + +Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. + + +**Have questions?** + +Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. +