diff --git a/.atlas-analysis.json b/.atlas-analysis.json deleted file mode 100644 index 7f07f04..0000000 --- a/.atlas-analysis.json +++ /dev/null @@ -1,107 +0,0 @@ -{ - "projectType": "saas", - "projectName": "Trunk", - "projectDescription": "Trunk enables continuous delivery by eliminating flaky tests and serialized merge queues that bottleneck software teams at scale.", - "theme": "aspen", - "primaryColor": "#838484", - "lightColor": "#151516", - "darkColor": "#c8c8c8", - "navigation": { - "tabs": [ - { - "tab": "Overview", - "groups": [ - { - "group": "Get Started", - "pages": [ - "introduction", - "setup-and-administration/connecting-to-trunk" - ] - }, - { - "group": "Administration", - "pages": [ - "setup-and-administration/managing-your-organization", - "setup-and-administration/github-app-permissions", - "setup-and-administration/security", - "setup-and-administration/billing" - ] - } - ] - }, - { - "tab": "Flaky Tests", - "groups": [ - { - "group": "Get Started", - "pages": [ - "flaky-tests/overview", - "flaky-tests/get-started" - ] - }, - { - "group": "Features", - "pages": [ - "flaky-tests/detection", - "flaky-tests/quarantining", - "flaky-tests/github-pull-request-comments", - "flaky-tests/ticketing-integrations", - "flaky-tests/webhooks" - ] - } - ] - }, - { - "tab": "Merge Queue", - "groups": [ - { - "group": "Get Started", - "pages": [ - "merge-queue/overview", - "merge-queue/set-up-trunk-merge" - ] - }, - { - "group": "Features", - "pages": [ - "merge-queue/optimizations", - "merge-queue/using-the-queue", - "merge-queue/reference" - ] - } - ] - }, - { - "tab": "API Reference", - "groups": [ - { - "group": "APIs", - "pages": [ - "setup-and-administration/apis" - ] - } - ] - } - ] - }, - "keyFeatures": [ - "Flaky test detection using branch-aware analysis and stack trace embeddings", - "Automatic quarantine of flaky tests to prevent CI pipeline failures", - "Parallel merge queue with independent lanes for non-overlapping PRs", - "Intelligent batching of up to 100 PRs per CI run with auto-bisection", - "Anti-flake protection in merge queue with optimistic merging", - "GitHub PR comments with test failure context and quarantine status", - "Jira and Linear ticketing integrations for flaky test tracking", - "Webhooks and full REST API for both Flaky Tests and Merge Queue", - "AI-powered flaky test repair via MCP integration (Claude, Codex, Cursor)", - "SOC 2 Type II certified with no access to source code or secrets" - ], - "publicApiSurface": [ - "POST /v1/metrics/trackEvents", - "GET /flaky-tests (quarantined tests API)", - "Merge Queue REST API", - "Webhooks: v2.test_case.status_changed", - "Trunk Analytics CLI (test result uploader)", - "x-api-token header authentication" - ] -} diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 8e5bbf0..0000000 --- a/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.py \ No newline at end of file diff --git a/.mintignore b/.mintignore deleted file mode 100644 index fdd79ee..0000000 --- a/.mintignore +++ /dev/null @@ -1,11 +0,0 @@ -# Mintlify automatically ignores these files and directories: - -# .git, .github, .claude, .agents, .idea, node_modules, - -# README.md, LICENSE.md, CHANGELOG.md, CONTRIBUTING.md - -# Draft content - -drafts/ -\*.draft.mdx -\_deprecated diff --git a/assets/02_Branch-1_kopiera.png b/assets/02_Branch-1_kopiera.png deleted file mode 100644 index ef7e2fe..0000000 Binary files a/assets/02_Branch-1_kopiera.png and /dev/null differ diff --git a/assets/04_anti-flake_(final01)_050324.mp4 b/assets/04_anti-flake_(final01)_050324.mp4 deleted file mode 100644 index 2c1ead4..0000000 Binary files a/assets/04_anti-flake_(final01)_050324.mp4 and /dev/null differ diff --git a/assets/14d4355-image.png b/assets/14d4355-image.png deleted file mode 100644 index c982ec2..0000000 Binary files a/assets/14d4355-image.png and /dev/null differ diff --git a/assets/1768426934-direct-merge-mode-toggle.avif b/assets/1768426934-direct-merge-mode-toggle.avif deleted file mode 100644 index 1422576..0000000 Binary files a/assets/1768426934-direct-merge-mode-toggle.avif and /dev/null differ diff --git a/assets/1768426960-batching-settings.avif b/assets/1768426960-batching-settings.avif deleted file mode 100644 index 6d4fd69..0000000 Binary files a/assets/1768426960-batching-settings.avif and /dev/null differ diff --git a/assets/1768426992-impacted-target-filtering.avif b/assets/1768426992-impacted-target-filtering.avif deleted file mode 100644 index dd39943..0000000 Binary files a/assets/1768426992-impacted-target-filtering.avif and /dev/null differ diff --git a/assets/4x2_Logos_(Dark-NoBG).png b/assets/4x2_Logos_(Dark-NoBG).png deleted file mode 100644 index 19626a4..0000000 Binary files a/assets/4x2_Logos_(Dark-NoBG).png and /dev/null differ diff --git a/assets/4x2_Logos_(Light-NoBG).png b/assets/4x2_Logos_(Light-NoBG).png deleted file mode 100644 index 0707d7d..0000000 Binary files a/assets/4x2_Logos_(Light-NoBG).png and /dev/null differ diff --git a/assets/99d3bf4f-9035-4cb6-9d7c-51c8ad9412a8-1757943910265.png b/assets/99d3bf4f-9035-4cb6-9d7c-51c8ad9412a8-1757943910265.png deleted file mode 100644 index a774e85..0000000 Binary files a/assets/99d3bf4f-9035-4cb6-9d7c-51c8ad9412a8-1757943910265.png and /dev/null differ diff --git a/assets/Actions.svg b/assets/Actions.svg deleted file mode 100644 index bfdbeb2..0000000 --- a/assets/Actions.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/Ads_&_Social.zip b/assets/Ads_&_Social.zip deleted file mode 100644 index 0252a04..0000000 Binary files a/assets/Ads_&_Social.zip and /dev/null differ diff --git a/assets/Annotations.png b/assets/Annotations.png deleted file mode 100644 index 96fdeed..0000000 Binary files a/assets/Annotations.png and /dev/null differ diff --git a/assets/Audit_log_(2).png b/assets/Audit_log_(2).png deleted file mode 100644 index 41cf4e1..0000000 Binary files a/assets/Audit_log_(2).png and /dev/null differ diff --git a/assets/BFNs.png b/assets/BFNs.png deleted file mode 100644 index 5d3fe02..0000000 Binary files a/assets/BFNs.png and /dev/null differ diff --git a/assets/BFNs_(2).png b/assets/BFNs_(2).png deleted file mode 100644 index 320bda9..0000000 Binary files a/assets/BFNs_(2).png and /dev/null differ diff --git a/assets/BFNs_(3).png b/assets/BFNs_(3).png deleted file mode 100644 index 58a4db0..0000000 Binary files a/assets/BFNs_(3).png and /dev/null differ diff --git a/assets/BFNs_for_test_case.png b/assets/BFNs_for_test_case.png deleted file mode 100644 index 9860b6e..0000000 Binary files a/assets/BFNs_for_test_case.png and /dev/null differ diff --git a/assets/Billing.svg b/assets/Billing.svg deleted file mode 100644 index 3677694..0000000 --- a/assets/Billing.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/Brex-logo-ink.svg b/assets/Brex-logo-ink.svg deleted file mode 100644 index acdfa4a..0000000 --- a/assets/Brex-logo-ink.svg +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - - - - - - - diff --git a/assets/Brex_Inc._Corporate_Logo.png b/assets/Brex_Inc._Corporate_Logo.png deleted file mode 100644 index d64ce47..0000000 Binary files a/assets/Brex_Inc._Corporate_Logo.png and /dev/null differ diff --git a/assets/Brex_Inc_White.png b/assets/Brex_Inc_White.png deleted file mode 100644 index d56b54b..0000000 Binary files a/assets/Brex_Inc_White.png and /dev/null differ diff --git a/assets/CIAnalytics.png b/assets/CIAnalytics.png deleted file mode 100644 index bfb2712..0000000 Binary files a/assets/CIAnalytics.png and /dev/null differ diff --git a/assets/CI_Analytics.mov b/assets/CI_Analytics.mov deleted file mode 100644 index d17ffae..0000000 Binary files a/assets/CI_Analytics.mov and /dev/null differ diff --git a/assets/CI_Analytics.mp4 b/assets/CI_Analytics.mp4 deleted file mode 100644 index 1eb2717..0000000 Binary files a/assets/CI_Analytics.mp4 and /dev/null differ diff --git a/assets/CI_Analytics.svg b/assets/CI_Analytics.svg deleted file mode 100644 index b927192..0000000 --- a/assets/CI_Analytics.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/CI_Analytics_Settings_screenshot b/assets/CI_Analytics_Settings_screenshot deleted file mode 100644 index 89daf9a..0000000 Binary files a/assets/CI_Analytics_Settings_screenshot and /dev/null differ diff --git a/assets/CI_Debugger.svg b/assets/CI_Debugger.svg deleted file mode 100644 index 0ffbab4..0000000 --- a/assets/CI_Debugger.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/CQ_Deprecation.png b/assets/CQ_Deprecation.png deleted file mode 100644 index fb58f4a..0000000 Binary files a/assets/CQ_Deprecation.png and /dev/null differ diff --git a/assets/CQ_Deprecation_(1).png b/assets/CQ_Deprecation_(1).png deleted file mode 100644 index e1e4e57..0000000 Binary files a/assets/CQ_Deprecation_(1).png and /dev/null differ diff --git a/assets/CQ_Nightly_Deprecation.png b/assets/CQ_Nightly_Deprecation.png deleted file mode 100644 index f26b8c6..0000000 Binary files a/assets/CQ_Nightly_Deprecation.png and /dev/null differ diff --git a/assets/Check.svg b/assets/Check.svg deleted file mode 100644 index a29e3f3..0000000 --- a/assets/Check.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/CheckOptions.png b/assets/CheckOptions.png deleted file mode 100644 index a8303b4..0000000 Binary files a/assets/CheckOptions.png and /dev/null differ diff --git a/assets/CircleCI-env-var-settings-screenshot.png b/assets/CircleCI-env-var-settings-screenshot.png deleted file mode 100644 index 40d9b46..0000000 Binary files a/assets/CircleCI-env-var-settings-screenshot.png and /dev/null differ diff --git a/assets/CircleCI.png b/assets/CircleCI.png deleted file mode 100644 index b0858b0..0000000 Binary files a/assets/CircleCI.png and /dev/null differ diff --git a/assets/CodeQuality.png b/assets/CodeQuality.png deleted file mode 100644 index 2f308ad..0000000 Binary files a/assets/CodeQuality.png and /dev/null differ diff --git a/assets/Community.svg b/assets/Community.svg deleted file mode 100644 index c71ebdc..0000000 --- a/assets/Community.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/Components.svg b/assets/Components.svg deleted file mode 100644 index 5d7532c..0000000 --- a/assets/Components.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/Contact_Support.svg b/assets/Contact_Support.svg deleted file mode 100644 index e078faa..0000000 --- a/assets/Contact_Support.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/Enable_Override_Quarantining_(1).png b/assets/Enable_Override_Quarantining_(1).png deleted file mode 100644 index 180f640..0000000 Binary files a/assets/Enable_Override_Quarantining_(1).png and /dev/null differ diff --git a/assets/Faire_logo.png b/assets/Faire_logo.png deleted file mode 100644 index fa284e1..0000000 Binary files a/assets/Faire_logo.png and /dev/null differ diff --git a/assets/Faire_logo.svg b/assets/Faire_logo.svg deleted file mode 100644 index d8ae98c..0000000 --- a/assets/Faire_logo.svg +++ /dev/null @@ -1,63 +0,0 @@ - - - - - - - - - - diff --git a/assets/Faire_logo_(1).png b/assets/Faire_logo_(1).png deleted file mode 100644 index fafbbaa..0000000 Binary files a/assets/Faire_logo_(1).png and /dev/null differ diff --git a/assets/Faire_logo_cropped.svg b/assets/Faire_logo_cropped.svg deleted file mode 100644 index 9bbf000..0000000 --- a/assets/Faire_logo_cropped.svg +++ /dev/null @@ -1,63 +0,0 @@ - - - - - - - - - - diff --git a/assets/Faire_logo_white.png b/assets/Faire_logo_white.png deleted file mode 100644 index 042259b..0000000 Binary files a/assets/Faire_logo_white.png and /dev/null differ diff --git a/assets/FlakyTests.png b/assets/FlakyTests.png deleted file mode 100644 index d7e6444..0000000 Binary files a/assets/FlakyTests.png and /dev/null differ diff --git a/assets/Frame_1000004291.png b/assets/Frame_1000004291.png deleted file mode 100644 index 92b3b79..0000000 Binary files a/assets/Frame_1000004291.png and /dev/null differ diff --git a/assets/GitHub_Comment.png b/assets/GitHub_Comment.png deleted file mode 100644 index 31ab505..0000000 Binary files a/assets/GitHub_Comment.png and /dev/null differ diff --git a/assets/Glob.svg b/assets/Glob.svg deleted file mode 100644 index 08a4c76..0000000 --- a/assets/Glob.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/Glydways-horizontal-black.png b/assets/Glydways-horizontal-black.png deleted file mode 100644 index 57fdb73..0000000 Binary files a/assets/Glydways-horizontal-black.png and /dev/null differ diff --git a/assets/Glydways-horizontal-black_(1).png b/assets/Glydways-horizontal-black_(1).png deleted file mode 100644 index 5912a94..0000000 Binary files a/assets/Glydways-horizontal-black_(1).png and /dev/null differ diff --git a/assets/Glydways-horizontal-logos-white2.png b/assets/Glydways-horizontal-logos-white2.png deleted file mode 100644 index 1aa7fef..0000000 Binary files a/assets/Glydways-horizontal-logos-white2.png and /dev/null differ diff --git a/assets/Group_1270.png b/assets/Group_1270.png deleted file mode 100644 index d0a1e6d..0000000 Binary files a/assets/Group_1270.png and /dev/null differ diff --git a/assets/Group_1273.png b/assets/Group_1273.png deleted file mode 100644 index 3a7eb14..0000000 Binary files a/assets/Group_1273.png and /dev/null differ diff --git a/assets/Group_1274.png b/assets/Group_1274.png deleted file mode 100644 index b3f513f..0000000 Binary files a/assets/Group_1274.png and /dev/null differ diff --git a/assets/Group_1275.png b/assets/Group_1275.png deleted file mode 100644 index 57c4501..0000000 Binary files a/assets/Group_1275.png and /dev/null differ diff --git a/assets/Group_1276.png b/assets/Group_1276.png deleted file mode 100644 index 5583245..0000000 Binary files a/assets/Group_1276.png and /dev/null differ diff --git a/assets/Group_1277.png b/assets/Group_1277.png deleted file mode 100644 index 7ba0311..0000000 Binary files a/assets/Group_1277.png and /dev/null differ diff --git a/assets/Group_1337.svg b/assets/Group_1337.svg deleted file mode 100644 index 23c608f..0000000 --- a/assets/Group_1337.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/Group_1337_(1).svg b/assets/Group_1337_(1).svg deleted file mode 100644 index 28051c7..0000000 --- a/assets/Group_1337_(1).svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/Handshake_Wordmark_Lime_RGB.png b/assets/Handshake_Wordmark_Lime_RGB.png deleted file mode 100644 index a4bcee0..0000000 Binary files a/assets/Handshake_Wordmark_Lime_RGB.png and /dev/null differ diff --git a/assets/Handshake_Wordmark_Nori_RGB.png b/assets/Handshake_Wordmark_Nori_RGB.png deleted file mode 100644 index 784477a..0000000 Binary files a/assets/Handshake_Wordmark_Nori_RGB.png and /dev/null differ diff --git a/assets/Handshake_Wordmark_Nori_RGB_(1).png b/assets/Handshake_Wordmark_Nori_RGB_(1).png deleted file mode 100644 index bda871b..0000000 Binary files a/assets/Handshake_Wordmark_Nori_RGB_(1).png and /dev/null differ diff --git a/assets/Handshake_Wordmark_White_RGB.png b/assets/Handshake_Wordmark_White_RGB.png deleted file mode 100644 index 6b78cb1..0000000 Binary files a/assets/Handshake_Wordmark_White_RGB.png and /dev/null differ diff --git a/assets/Handshake_Wordmark_White_RGB1.png b/assets/Handshake_Wordmark_White_RGB1.png deleted file mode 100644 index bc9fbae..0000000 Binary files a/assets/Handshake_Wordmark_White_RGB1.png and /dev/null differ diff --git a/assets/Hero_Check.svg b/assets/Hero_Check.svg deleted file mode 100644 index 74777a9..0000000 --- a/assets/Hero_Check.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/Merge.png b/assets/Merge.png deleted file mode 100644 index 1b31981..0000000 Binary files a/assets/Merge.png and /dev/null differ diff --git a/assets/Merge.svg b/assets/Merge.svg deleted file mode 100644 index babde99..0000000 --- a/assets/Merge.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/MergeOptions.png b/assets/MergeOptions.png deleted file mode 100644 index 4854ce3..0000000 Binary files a/assets/MergeOptions.png and /dev/null differ diff --git a/assets/Metabase.png b/assets/Metabase.png deleted file mode 100644 index 11a1760..0000000 Binary files a/assets/Metabase.png and /dev/null differ diff --git a/assets/Metabase_copy.png b/assets/Metabase_copy.png deleted file mode 100644 index 05f6e92..0000000 Binary files a/assets/Metabase_copy.png and /dev/null differ diff --git a/assets/NX.png b/assets/NX.png deleted file mode 100644 index fefa0fd..0000000 Binary files a/assets/NX.png and /dev/null differ diff --git a/assets/Organization_API_Token.png b/assets/Organization_API_Token.png deleted file mode 100644 index a911781..0000000 Binary files a/assets/Organization_API_Token.png and /dev/null differ diff --git a/assets/Organization_Slug.png b/assets/Organization_Slug.png deleted file mode 100644 index df412c1..0000000 Binary files a/assets/Organization_Slug.png and /dev/null differ diff --git a/assets/PermissinoPage.png b/assets/PermissinoPage.png deleted file mode 100644 index 707bb1b..0000000 Binary files a/assets/PermissinoPage.png and /dev/null differ diff --git a/assets/SCR-20230811-mtip.png b/assets/SCR-20230811-mtip.png deleted file mode 100644 index 3cad5f2..0000000 Binary files a/assets/SCR-20230811-mtip.png and /dev/null differ diff --git a/assets/SCR-20230811-mtvw.png b/assets/SCR-20230811-mtvw.png deleted file mode 100644 index 14372df..0000000 Binary files a/assets/SCR-20230811-mtvw.png and /dev/null differ diff --git a/assets/SCR-20260202-negj.png b/assets/SCR-20260202-negj.png deleted file mode 100644 index ffa6f4d..0000000 Binary files a/assets/SCR-20260202-negj.png and /dev/null differ diff --git a/assets/SCR-20260202-neph.png b/assets/SCR-20260202-neph.png deleted file mode 100644 index 99415c5..0000000 Binary files a/assets/SCR-20260202-neph.png and /dev/null differ diff --git a/assets/SCR-20260202-obcl.png b/assets/SCR-20260202-obcl.png deleted file mode 100644 index ba2c185..0000000 Binary files a/assets/SCR-20260202-obcl.png and /dev/null differ diff --git a/assets/Screen_Shot_2023-08-22_at_1.52.51_PM.png b/assets/Screen_Shot_2023-08-22_at_1.52.51_PM.png deleted file mode 100644 index 284cfc8..0000000 Binary files a/assets/Screen_Shot_2023-08-22_at_1.52.51_PM.png and /dev/null differ diff --git a/assets/Screen_Shot_2023-08-22_at_11.44.08_AM.png b/assets/Screen_Shot_2023-08-22_at_11.44.08_AM.png deleted file mode 100644 index 7ef9718..0000000 Binary files a/assets/Screen_Shot_2023-08-22_at_11.44.08_AM.png and /dev/null differ diff --git a/assets/Screenshot_2023-08-23_173119.png b/assets/Screenshot_2023-08-23_173119.png deleted file mode 100644 index bee7546..0000000 Binary files a/assets/Screenshot_2023-08-23_173119.png and /dev/null differ diff --git a/assets/Screenshot_2023-08-23_173252.png b/assets/Screenshot_2023-08-23_173252.png deleted file mode 100644 index cd920b1..0000000 Binary files a/assets/Screenshot_2023-08-23_173252.png and /dev/null differ diff --git a/assets/Screenshot_2023-08-29_at_11.37.35_AM.png b/assets/Screenshot_2023-08-29_at_11.37.35_AM.png deleted file mode 100644 index 3e07d70..0000000 Binary files a/assets/Screenshot_2023-08-29_at_11.37.35_AM.png and /dev/null differ diff --git a/assets/Screenshot_2023-08-29_at_11.54.07_PM.png b/assets/Screenshot_2023-08-29_at_11.54.07_PM.png deleted file mode 100644 index 3e666b9..0000000 Binary files a/assets/Screenshot_2023-08-29_at_11.54.07_PM.png and /dev/null differ diff --git a/assets/Screenshot_2023-10-16_at_5.09.29_PM.png b/assets/Screenshot_2023-10-16_at_5.09.29_PM.png deleted file mode 100644 index 372b09e..0000000 Binary files a/assets/Screenshot_2023-10-16_at_5.09.29_PM.png and /dev/null differ diff --git a/assets/Screenshot_2023-10-16_at_5.10.39_PM.png b/assets/Screenshot_2023-10-16_at_5.10.39_PM.png deleted file mode 100644 index 0f008c7..0000000 Binary files a/assets/Screenshot_2023-10-16_at_5.10.39_PM.png and /dev/null differ diff --git a/assets/Screenshot_2023-10-16_at_5.11.57_PM.png b/assets/Screenshot_2023-10-16_at_5.11.57_PM.png deleted file mode 100644 index 5db43a2..0000000 Binary files a/assets/Screenshot_2023-10-16_at_5.11.57_PM.png and /dev/null differ diff --git a/assets/Screenshot_2023-10-16_at_5.12.23_PM.png b/assets/Screenshot_2023-10-16_at_5.12.23_PM.png deleted file mode 100644 index 414c9a7..0000000 Binary files a/assets/Screenshot_2023-10-16_at_5.12.23_PM.png and /dev/null differ diff --git a/assets/Screenshot_2023-11-06_at_2.35.34_PM.png b/assets/Screenshot_2023-11-06_at_2.35.34_PM.png deleted file mode 100644 index 05f9969..0000000 Binary files a/assets/Screenshot_2023-11-06_at_2.35.34_PM.png and /dev/null differ diff --git a/assets/Screenshot_2023-11-09_at_1.58.30_PM.png b/assets/Screenshot_2023-11-09_at_1.58.30_PM.png deleted file mode 100644 index 6fb2680..0000000 Binary files a/assets/Screenshot_2023-11-09_at_1.58.30_PM.png and /dev/null differ diff --git a/assets/Screenshot_2023-11-09_at_1.59.35_PM.png b/assets/Screenshot_2023-11-09_at_1.59.35_PM.png deleted file mode 100644 index 3f03158..0000000 Binary files a/assets/Screenshot_2023-11-09_at_1.59.35_PM.png and /dev/null differ diff --git a/assets/Screenshot_2024-06-04_at_6.29.22_PM.png b/assets/Screenshot_2024-06-04_at_6.29.22_PM.png deleted file mode 100644 index f7273fa..0000000 Binary files a/assets/Screenshot_2024-06-04_at_6.29.22_PM.png and /dev/null differ diff --git a/assets/Screenshot_2024-06-04_at_6.31.22_PM.png b/assets/Screenshot_2024-06-04_at_6.31.22_PM.png deleted file mode 100644 index a5e2096..0000000 Binary files a/assets/Screenshot_2024-06-04_at_6.31.22_PM.png and /dev/null differ diff --git a/assets/Screenshot_2024-06-04_at_6.34.10_PM.png b/assets/Screenshot_2024-06-04_at_6.34.10_PM.png deleted file mode 100644 index cb8e21c..0000000 Binary files a/assets/Screenshot_2024-06-04_at_6.34.10_PM.png and /dev/null differ diff --git a/assets/Screenshot_2024-06-04_at_6.35.18_PM.png b/assets/Screenshot_2024-06-04_at_6.35.18_PM.png deleted file mode 100644 index eb987be..0000000 Binary files a/assets/Screenshot_2024-06-04_at_6.35.18_PM.png and /dev/null differ diff --git a/assets/Screenshot_2024-06-12_at_9.52.28_AM.png b/assets/Screenshot_2024-06-12_at_9.52.28_AM.png deleted file mode 100644 index 5f73f14..0000000 Binary files a/assets/Screenshot_2024-06-12_at_9.52.28_AM.png and /dev/null differ diff --git a/assets/Screenshot_2024-09-06_at_9.09.15_AM.png b/assets/Screenshot_2024-09-06_at_9.09.15_AM.png deleted file mode 100644 index e91d3f3..0000000 Binary files a/assets/Screenshot_2024-09-06_at_9.09.15_AM.png and /dev/null differ diff --git a/assets/Screenshot_2024-09-06_at_9.10.30_AM.png b/assets/Screenshot_2024-09-06_at_9.10.30_AM.png deleted file mode 100644 index 2c46c18..0000000 Binary files a/assets/Screenshot_2024-09-06_at_9.10.30_AM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-08_at_2.42.45_PM.png b/assets/Screenshot_2025-09-08_at_2.42.45_PM.png deleted file mode 100644 index 0d5eb65..0000000 Binary files a/assets/Screenshot_2025-09-08_at_2.42.45_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-08_at_2.51.23_PM.png b/assets/Screenshot_2025-09-08_at_2.51.23_PM.png deleted file mode 100644 index 433452d..0000000 Binary files a/assets/Screenshot_2025-09-08_at_2.51.23_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-08_at_2.54.33_PM.png b/assets/Screenshot_2025-09-08_at_2.54.33_PM.png deleted file mode 100644 index 4eb44e7..0000000 Binary files a/assets/Screenshot_2025-09-08_at_2.54.33_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-08_at_2.56.20_PM.png b/assets/Screenshot_2025-09-08_at_2.56.20_PM.png deleted file mode 100644 index 34b8924..0000000 Binary files a/assets/Screenshot_2025-09-08_at_2.56.20_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-08_at_2.58.54_PM.png b/assets/Screenshot_2025-09-08_at_2.58.54_PM.png deleted file mode 100644 index 8d348fe..0000000 Binary files a/assets/Screenshot_2025-09-08_at_2.58.54_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-08_at_3.02.29_PM.png b/assets/Screenshot_2025-09-08_at_3.02.29_PM.png deleted file mode 100644 index 8d2434a..0000000 Binary files a/assets/Screenshot_2025-09-08_at_3.02.29_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-08_at_3.09.01_PM.png b/assets/Screenshot_2025-09-08_at_3.09.01_PM.png deleted file mode 100644 index ec82e2e..0000000 Binary files a/assets/Screenshot_2025-09-08_at_3.09.01_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-08_at_3.12.32_PM.png b/assets/Screenshot_2025-09-08_at_3.12.32_PM.png deleted file mode 100644 index f95f92b..0000000 Binary files a/assets/Screenshot_2025-09-08_at_3.12.32_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-08_at_3.12.52_PM.png b/assets/Screenshot_2025-09-08_at_3.12.52_PM.png deleted file mode 100644 index bc70d0e..0000000 Binary files a/assets/Screenshot_2025-09-08_at_3.12.52_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-08_at_3.39.13_PM.png b/assets/Screenshot_2025-09-08_at_3.39.13_PM.png deleted file mode 100644 index 6e49535..0000000 Binary files a/assets/Screenshot_2025-09-08_at_3.39.13_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-08_at_3.48.58_PM.png b/assets/Screenshot_2025-09-08_at_3.48.58_PM.png deleted file mode 100644 index 708713b..0000000 Binary files a/assets/Screenshot_2025-09-08_at_3.48.58_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-09_at_12.13.53_PM.png b/assets/Screenshot_2025-09-09_at_12.13.53_PM.png deleted file mode 100644 index dcae20b..0000000 Binary files a/assets/Screenshot_2025-09-09_at_12.13.53_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-09_at_12.15.00_PM.png b/assets/Screenshot_2025-09-09_at_12.15.00_PM.png deleted file mode 100644 index 1cda43f..0000000 Binary files a/assets/Screenshot_2025-09-09_at_12.15.00_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-09_at_3.44.15_PM.png b/assets/Screenshot_2025-09-09_at_3.44.15_PM.png deleted file mode 100644 index 4d84d41..0000000 Binary files a/assets/Screenshot_2025-09-09_at_3.44.15_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-10_at_11.28.24_AM.png b/assets/Screenshot_2025-09-10_at_11.28.24_AM.png deleted file mode 100644 index 45ff48c..0000000 Binary files a/assets/Screenshot_2025-09-10_at_11.28.24_AM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-10_at_11.28.34_AM.png b/assets/Screenshot_2025-09-10_at_11.28.34_AM.png deleted file mode 100644 index 2949413..0000000 Binary files a/assets/Screenshot_2025-09-10_at_11.28.34_AM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-10_at_11.29.00_AM.png b/assets/Screenshot_2025-09-10_at_11.29.00_AM.png deleted file mode 100644 index c27042b..0000000 Binary files a/assets/Screenshot_2025-09-10_at_11.29.00_AM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-10_at_12.02.48_PM.png b/assets/Screenshot_2025-09-10_at_12.02.48_PM.png deleted file mode 100644 index 129b047..0000000 Binary files a/assets/Screenshot_2025-09-10_at_12.02.48_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-10_at_12.06.54_PM.png b/assets/Screenshot_2025-09-10_at_12.06.54_PM.png deleted file mode 100644 index 12d9339..0000000 Binary files a/assets/Screenshot_2025-09-10_at_12.06.54_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-10_at_4.22.11_PM.png b/assets/Screenshot_2025-09-10_at_4.22.11_PM.png deleted file mode 100644 index dfc4347..0000000 Binary files a/assets/Screenshot_2025-09-10_at_4.22.11_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-10_at_4.22.40_PM.png b/assets/Screenshot_2025-09-10_at_4.22.40_PM.png deleted file mode 100644 index 70abf96..0000000 Binary files a/assets/Screenshot_2025-09-10_at_4.22.40_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-10_at_4.24.35_PM.png b/assets/Screenshot_2025-09-10_at_4.24.35_PM.png deleted file mode 100644 index 7612ceb..0000000 Binary files a/assets/Screenshot_2025-09-10_at_4.24.35_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-11_at_2.35.49_PM.png b/assets/Screenshot_2025-09-11_at_2.35.49_PM.png deleted file mode 100644 index 3374d99..0000000 Binary files a/assets/Screenshot_2025-09-11_at_2.35.49_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-11_at_2.37.58_PM.png b/assets/Screenshot_2025-09-11_at_2.37.58_PM.png deleted file mode 100644 index f379f00..0000000 Binary files a/assets/Screenshot_2025-09-11_at_2.37.58_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-11_at_2.39.31_PM.png b/assets/Screenshot_2025-09-11_at_2.39.31_PM.png deleted file mode 100644 index 2f06436..0000000 Binary files a/assets/Screenshot_2025-09-11_at_2.39.31_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-11_at_2.41.58_PM.png b/assets/Screenshot_2025-09-11_at_2.41.58_PM.png deleted file mode 100644 index 77622a3..0000000 Binary files a/assets/Screenshot_2025-09-11_at_2.41.58_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-11_at_2.46.39_PM.png b/assets/Screenshot_2025-09-11_at_2.46.39_PM.png deleted file mode 100644 index bbc63f4..0000000 Binary files a/assets/Screenshot_2025-09-11_at_2.46.39_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-11_at_3.44.44_PM.png b/assets/Screenshot_2025-09-11_at_3.44.44_PM.png deleted file mode 100644 index cc20bfa..0000000 Binary files a/assets/Screenshot_2025-09-11_at_3.44.44_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-11_at_4.08.51_PM.png b/assets/Screenshot_2025-09-11_at_4.08.51_PM.png deleted file mode 100644 index f98ef3a..0000000 Binary files a/assets/Screenshot_2025-09-11_at_4.08.51_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-12_at_10.22.02_AM.png b/assets/Screenshot_2025-09-12_at_10.22.02_AM.png deleted file mode 100644 index 62cdc44..0000000 Binary files a/assets/Screenshot_2025-09-12_at_10.22.02_AM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-12_at_10.32.44_AM.png b/assets/Screenshot_2025-09-12_at_10.32.44_AM.png deleted file mode 100644 index b239202..0000000 Binary files a/assets/Screenshot_2025-09-12_at_10.32.44_AM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-12_at_11.03.01_AM.png b/assets/Screenshot_2025-09-12_at_11.03.01_AM.png deleted file mode 100644 index 3e55571..0000000 Binary files a/assets/Screenshot_2025-09-12_at_11.03.01_AM.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-12_at_11.03.01_AM_copy.png b/assets/Screenshot_2025-09-12_at_11.03.01_AM_copy.png deleted file mode 100644 index af3b10d..0000000 Binary files a/assets/Screenshot_2025-09-12_at_11.03.01_AM_copy.png and /dev/null differ diff --git a/assets/Screenshot_2025-09-12_at_11.21.11_AM.png b/assets/Screenshot_2025-09-12_at_11.21.11_AM.png deleted file mode 100644 index 5ee16d0..0000000 Binary files a/assets/Screenshot_2025-09-12_at_11.21.11_AM.png and /dev/null differ diff --git a/assets/Screenshot_2025-10-09_at_3.28.03_PM.png b/assets/Screenshot_2025-10-09_at_3.28.03_PM.png deleted file mode 100644 index 9ddaf82..0000000 Binary files a/assets/Screenshot_2025-10-09_at_3.28.03_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-10-21_at_4.32.54_PM.png b/assets/Screenshot_2025-10-21_at_4.32.54_PM.png deleted file mode 100644 index 86e977d..0000000 Binary files a/assets/Screenshot_2025-10-21_at_4.32.54_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-11-11_at_3.57.57_PM.png b/assets/Screenshot_2025-11-11_at_3.57.57_PM.png deleted file mode 100644 index d454beb..0000000 Binary files a/assets/Screenshot_2025-11-11_at_3.57.57_PM.png and /dev/null differ diff --git a/assets/Screenshot_2025-11-11_at_3.58.16_PM.png b/assets/Screenshot_2025-11-11_at_3.58.16_PM.png deleted file mode 100644 index b1cffb3..0000000 Binary files a/assets/Screenshot_2025-11-11_at_3.58.16_PM.png and /dev/null differ diff --git a/assets/Sign_Sign-ON.svg b/assets/Sign_Sign-ON.svg deleted file mode 100644 index bdaeda4..0000000 --- a/assets/Sign_Sign-ON.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/Slack.svg b/assets/Slack.svg deleted file mode 100644 index 0570366..0000000 --- a/assets/Slack.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/TrunkYaml.svg b/assets/TrunkYaml.svg deleted file mode 100644 index 399335a..0000000 --- a/assets/TrunkYaml.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/TrunkYaml_(1).svg b/assets/TrunkYaml_(1).svg deleted file mode 100644 index 7d1310b..0000000 --- a/assets/TrunkYaml_(1).svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/Trunk_Announce.mp4 b/assets/Trunk_Announce.mp4 deleted file mode 100644 index 9d38cb7..0000000 Binary files a/assets/Trunk_Announce.mp4 and /dev/null differ diff --git a/assets/Trunk_Merge_Queue_Metrics.png b/assets/Trunk_Merge_Queue_Metrics.png deleted file mode 100644 index f5c1021..0000000 Binary files a/assets/Trunk_Merge_Queue_Metrics.png and /dev/null differ diff --git a/assets/Usage_Data.svg b/assets/Usage_Data.svg deleted file mode 100644 index 1a6d8f4..0000000 --- a/assets/Usage_Data.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/UserYaml.svg b/assets/UserYaml.svg deleted file mode 100644 index ba715d5..0000000 --- a/assets/UserYaml.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/UserYaml_(1).svg b/assets/UserYaml_(1).svg deleted file mode 100644 index 643142b..0000000 --- a/assets/UserYaml_(1).svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/VSCode.svg b/assets/VSCode.svg deleted file mode 100644 index 6a514c8..0000000 --- a/assets/VSCode.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/VSCode_(1).svg b/assets/VSCode_(1).svg deleted file mode 100644 index 6a514c8..0000000 --- a/assets/VSCode_(1).svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/Vector.png b/assets/Vector.png deleted file mode 100644 index 4ff310f..0000000 Binary files a/assets/Vector.png and /dev/null differ diff --git a/assets/YAML.svg b/assets/YAML.svg deleted file mode 100644 index 900e3bd..0000000 --- a/assets/YAML.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/Zillow_Logo_Primary_RGB.png b/assets/Zillow_Logo_Primary_RGB.png deleted file mode 100644 index 8256814..0000000 Binary files a/assets/Zillow_Logo_Primary_RGB.png and /dev/null differ diff --git a/assets/Zillow_Logo_Primary_RGB_(1).png b/assets/Zillow_Logo_Primary_RGB_(1).png deleted file mode 100644 index c5fca99..0000000 Binary files a/assets/Zillow_Logo_Primary_RGB_(1).png and /dev/null differ diff --git a/assets/Zillow_Logo_Primary_RGB_(2).png b/assets/Zillow_Logo_Primary_RGB_(2).png deleted file mode 100644 index c5fca99..0000000 Binary files a/assets/Zillow_Logo_Primary_RGB_(2).png and /dev/null differ diff --git a/assets/Zillow_Logo_Secondary_RGB_(1).png b/assets/Zillow_Logo_Secondary_RGB_(1).png deleted file mode 100644 index de969d3..0000000 Binary files a/assets/Zillow_Logo_Secondary_RGB_(1).png and /dev/null differ diff --git a/assets/add-test-framework-step-1-dark.png b/assets/add-test-framework-step-1-dark.png deleted file mode 100644 index d846ff6..0000000 Binary files a/assets/add-test-framework-step-1-dark.png and /dev/null differ diff --git a/assets/add-test-framework-step-1-light.png b/assets/add-test-framework-step-1-light.png deleted file mode 100644 index aeeb6de..0000000 Binary files a/assets/add-test-framework-step-1-light.png and /dev/null differ diff --git a/assets/add-test-framework-step-4-dark.png b/assets/add-test-framework-step-4-dark.png deleted file mode 100644 index 6387efb..0000000 Binary files a/assets/add-test-framework-step-4-dark.png and /dev/null differ diff --git a/assets/add-test-framework-step-4-light.png b/assets/add-test-framework-step-4-light.png deleted file mode 100644 index 51ef5bd..0000000 Binary files a/assets/add-test-framework-step-4-light.png and /dev/null differ diff --git a/assets/af94be48d630815efd4882614036e1b9_copy.png b/assets/af94be48d630815efd4882614036e1b9_copy.png deleted file mode 100644 index a264ea8..0000000 Binary files a/assets/af94be48d630815efd4882614036e1b9_copy.png and /dev/null differ diff --git a/assets/android.png b/assets/android.png deleted file mode 100644 index a0d112a..0000000 Binary files a/assets/android.png and /dev/null differ diff --git a/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_pr_84_repo=gewenyu99%2Freal-saas-app&commitHash=7274ce1090cba4614cf9ebc316bacac090c06f56.png b/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_pr_84_repo=gewenyu99%2Freal-saas-app&commitHash=7274ce1090cba4614cf9ebc316bacac090c06f56.png deleted file mode 100644 index 50255fc..0000000 Binary files a/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_pr_84_repo=gewenyu99%2Freal-saas-app&commitHash=7274ce1090cba4614cf9ebc316bacac090c06f56.png and /dev/null differ diff --git a/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_pr_84_repo=gewenyu99%2Freal-saas-app&commitHash=7274ce1090cba4614cf9ebc316bacac090c06f56_(1).png b/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_pr_84_repo=gewenyu99%2Freal-saas-app&commitHash=7274ce1090cba4614cf9ebc316bacac090c06f56_(1).png deleted file mode 100644 index cd81175..0000000 Binary files a/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_pr_84_repo=gewenyu99%2Freal-saas-app&commitHash=7274ce1090cba4614cf9ebc316bacac090c06f56_(1).png and /dev/null differ diff --git a/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_test_6675ee1f-49c5-5400-a6e1-96292a9307ec_status_repo=gewenyu99%2Freal-saas-app&intervalDays=14_(4).png b/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_test_6675ee1f-49c5-5400-a6e1-96292a9307ec_status_repo=gewenyu99%2Freal-saas-app&intervalDays=14_(4).png deleted file mode 100644 index 6e453f5..0000000 Binary files a/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_test_6675ee1f-49c5-5400-a6e1-96292a9307ec_status_repo=gewenyu99%2Freal-saas-app&intervalDays=14_(4).png and /dev/null differ diff --git a/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_test_6675ee1f-49c5-5400-a6e1-96292a9307ec_status_repo=gewenyu99%2Freal-saas-app&intervalDays=14_(5).png b/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_test_6675ee1f-49c5-5400-a6e1-96292a9307ec_status_repo=gewenyu99%2Freal-saas-app&intervalDays=14_(5).png deleted file mode 100644 index 49673dc..0000000 Binary files a/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_test_6675ee1f-49c5-5400-a6e1-96292a9307ec_status_repo=gewenyu99%2Freal-saas-app&intervalDays=14_(5).png and /dev/null differ diff --git a/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_test_6675ee1f-49c5-5400-a6e1-96292a9307ec_status_repo=gewenyu99%2Freal-saas-app&intervalDays=14_(8).png b/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_test_6675ee1f-49c5-5400-a6e1-96292a9307ec_status_repo=gewenyu99%2Freal-saas-app&intervalDays=14_(8).png deleted file mode 100644 index 329f479..0000000 Binary files a/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_test_6675ee1f-49c5-5400-a6e1-96292a9307ec_status_repo=gewenyu99%2Freal-saas-app&intervalDays=14_(8).png and /dev/null differ diff --git a/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_test_6675ee1f-49c5-5400-a6e1-96292a9307ec_status_repo=gewenyu99%2Freal-saas-app&intervalDays=14_(9).png b/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_test_6675ee1f-49c5-5400-a6e1-96292a9307ec_status_repo=gewenyu99%2Freal-saas-app&intervalDays=14_(9).png deleted file mode 100644 index cd095ea..0000000 Binary files a/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_test_6675ee1f-49c5-5400-a6e1-96292a9307ec_status_repo=gewenyu99%2Freal-saas-app&intervalDays=14_(9).png and /dev/null differ diff --git a/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_test_fa45dbe2-afe2-5314-880e-e4a03175a323_repo=gewenyu99%2Freal-saas-app.png b/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_test_fa45dbe2-afe2-5314-880e-e4a03175a323_repo=gewenyu99%2Freal-saas-app.png deleted file mode 100644 index e4476b9..0000000 Binary files a/assets/app.trunk-staging.io_totally-real-saas_flaky-tests_test_fa45dbe2-afe2-5314-880e-e4a03175a323_repo=gewenyu99%2Freal-saas-app.png and /dev/null differ diff --git a/assets/app.trunk-staging.io_totally-real-saas_settings_webhooks_(3).png b/assets/app.trunk-staging.io_totally-real-saas_settings_webhooks_(3).png deleted file mode 100644 index bdc84b4..0000000 Binary files a/assets/app.trunk-staging.io_totally-real-saas_settings_webhooks_(3).png and /dev/null differ diff --git a/assets/app.trunk-staging.io_trunk-staging-org_settings_webhooks_(2).png b/assets/app.trunk-staging.io_trunk-staging-org_settings_webhooks_(2).png deleted file mode 100644 index 0e2ace2..0000000 Binary files a/assets/app.trunk-staging.io_trunk-staging-org_settings_webhooks_(2).png and /dev/null differ diff --git a/assets/app.trunk-staging.io_trunk-staging-org_settings_webhooks_(3).png b/assets/app.trunk-staging.io_trunk-staging-org_settings_webhooks_(3).png deleted file mode 100644 index 2f234b5..0000000 Binary files a/assets/app.trunk-staging.io_trunk-staging-org_settings_webhooks_(3).png and /dev/null differ diff --git a/assets/atlassian-bamboo.png b/assets/atlassian-bamboo.png deleted file mode 100644 index 193b3f2..0000000 Binary files a/assets/atlassian-bamboo.png and /dev/null differ diff --git a/assets/autofix-cursor-automation.png b/assets/autofix-cursor-automation.png deleted file mode 100644 index 488dc11..0000000 Binary files a/assets/autofix-cursor-automation.png and /dev/null differ diff --git a/assets/autopep8.gif b/assets/autopep8.gif deleted file mode 100644 index 561209d..0000000 Binary files a/assets/autopep8.gif and /dev/null differ diff --git a/assets/azure.png b/assets/azure.png deleted file mode 100644 index e25fd26..0000000 Binary files a/assets/azure.png and /dev/null differ diff --git a/assets/bandit.gif b/assets/bandit.gif deleted file mode 100644 index 7d653b9..0000000 Binary files a/assets/bandit.gif and /dev/null differ diff --git a/assets/batching-settings.png b/assets/batching-settings.png deleted file mode 100644 index d331bda..0000000 Binary files a/assets/batching-settings.png and /dev/null differ diff --git a/assets/bazel-dark.png b/assets/bazel-dark.png deleted file mode 100644 index 3580ba2..0000000 Binary files a/assets/bazel-dark.png and /dev/null differ diff --git a/assets/bazel.png b/assets/bazel.png deleted file mode 100644 index 06fdfe7..0000000 Binary files a/assets/bazel.png and /dev/null differ diff --git a/assets/behave.png b/assets/behave.png deleted file mode 100644 index 4fa6209..0000000 Binary files a/assets/behave.png and /dev/null differ diff --git a/assets/bitbucket.png b/assets/bitbucket.png deleted file mode 100644 index f7bb344..0000000 Binary files a/assets/bitbucket.png and /dev/null differ diff --git a/assets/black.gif b/assets/black.gif deleted file mode 100644 index af7388f..0000000 Binary files a/assets/black.gif and /dev/null differ diff --git a/assets/branch-ruleset-exclusions.png b/assets/branch-ruleset-exclusions.png deleted file mode 100644 index b734446..0000000 Binary files a/assets/branch-ruleset-exclusions.png and /dev/null differ diff --git a/assets/brex-logo-png_seeklogo-618025.png b/assets/brex-logo-png_seeklogo-618025.png deleted file mode 100644 index 68647cf..0000000 Binary files a/assets/brex-logo-png_seeklogo-618025.png and /dev/null differ diff --git a/assets/buildifier.gif b/assets/buildifier.gif deleted file mode 100644 index 525e94d..0000000 Binary files a/assets/buildifier.gif and /dev/null differ diff --git a/assets/buildkite.png b/assets/buildkite.png deleted file mode 100644 index 13bce18..0000000 Binary files a/assets/buildkite.png and /dev/null differ diff --git a/assets/cargo-next.png b/assets/cargo-next.png deleted file mode 100644 index 66ce30d..0000000 Binary files a/assets/cargo-next.png and /dev/null differ diff --git a/assets/cargo-nextest-logo.png b/assets/cargo-nextest-logo.png deleted file mode 100644 index 7a0c2a4..0000000 Binary files a/assets/cargo-nextest-logo.png and /dev/null differ diff --git a/assets/check b/assets/check deleted file mode 100644 index e9b4668..0000000 --- a/assets/check +++ /dev/null @@ -1,270 +0,0 @@ -Overview
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Trunk Check manages, downloads, and runs dozens of linters, formatters, static analyzers, and security tools. It's also a platform to write your own custom checks and integrate them to run as part of your repo's check suite.

-

Use it via:

- -

We currently support over 75 unique linters and we are adding new and updating old linters every sprint.

-

Hold-the-line

-

Trunk Check distinguishes new from existing issues, which allows you to introduce new linters and checks without any hassle.

-

This is traditionally a very painful process, because running a new linter or check in your CI means that you have to either fix all existing issues, which requires a huge upfront time investment, or only run the check on modified files, which adds unpredictable overhead to PRs and discourages engineers from making simple changes like typo fixes.

-

By integrating with Git, Trunk Check can hold-the-line: that is, it allows you to leave existing issues alone, and simply enforces that you never introduce new lint issues in a pull request.

-

Formatting linters behave slightly differently under hold-the-line than other tools. Since formatters apply to entire files they will be run across the entirety of any changed file and their recommendations must be applied before landing.

-

A short story

-
    -
  • -

    src/adder.js, which has 12 pre-existing issues, has a typo in a comment:

    -
    // This method adds too numbers together
    -
    -
  • -
  • -

    Engineer sees this typo and corrects it:

    -
    // This method adds two numbers together
    -
    -
  • -
  • -

    Engineer pushes this super-simple, no-code-change Pull Request (PR)

    -
  • -
  • -

    PR is checked for formatting/lint issues on CI

    -
  • -
-

Without trunk

-
    -
  • Engineer receives fail-mail and fail-slacks about their PR
  • -
  • Engineer wonders what in the world could've failed in a PR that fixes a typo in a comment
  • -
  • Engineer opens the failure notifications and starts going down the rabbit hole
  • -
  • 2 hours later, engineer finally understands how much work it would be to fix all the pre-existing
    -issues
  • -
  • Engineer abandons PR
  • -
  • Not only has a good chunk of the day been blown, but now the engineer is significantly less
    -inclined to ever do this in the future
  • -
-

With trunk

-
    -
  • trunk check CI succeeds on their PR
  • -
  • Engineer merges their PR
  • -
-

Issue severity

-

Trunk Check currently supports over 75 linters, each of which has it own issue severity classification system. We surface these as either Low, Medium, or High priority. This is not always a 1-to-1 translation of the linter's natively defined severity - many have more than 3 severity levels - but we've found that this provides the best signal for the end user.

-

By default, all issues reported by a linter are considered when evaluating if a pull request is passing. We have taken this approach because trunk is presenting a holistic view of a codebase, across all its languages and technologies, and we don't believe trunk should be in the business of making severity recommendations for individual linters.

-

If a linter believes an issue is worth reporting, we consider it worth blocking for. If that issue is not interesting for your repo - we suggest you completely disable that rule for that linter in its configuration.


- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\ No newline at end of file diff --git a/assets/check.png b/assets/check.png deleted file mode 100644 index 50ab1be..0000000 Binary files a/assets/check.png and /dev/null differ diff --git a/assets/checkov.gif b/assets/checkov.gif deleted file mode 100644 index 747779d..0000000 Binary files a/assets/checkov.gif and /dev/null differ diff --git a/assets/ci-analytics-dashboard b/assets/ci-analytics-dashboard deleted file mode 100644 index 7465fb3..0000000 Binary files a/assets/ci-analytics-dashboard and /dev/null differ diff --git a/assets/ci-analytics-workflow-hover-details b/assets/ci-analytics-workflow-hover-details deleted file mode 100644 index ca338fb..0000000 Binary files a/assets/ci-analytics-workflow-hover-details and /dev/null differ diff --git a/assets/ci-autofix-cursor-automation.png b/assets/ci-autofix-cursor-automation.png deleted file mode 100644 index 13c6cb6..0000000 Binary files a/assets/ci-autofix-cursor-automation.png and /dev/null differ diff --git a/assets/ci_debugger b/assets/ci_debugger deleted file mode 100644 index b02ffe9..0000000 --- a/assets/ci_debugger +++ /dev/null @@ -1,266 +0,0 @@ -Overview
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Trunk CI Debugger (beta invite required) is available at app.trunk.io. With a sprinkling of code you can enable live debugging of your CI actions enabling real-time diagnosis, troubleshooting, and of course debugging of your otherwise ephemeral CI job.

-

What is it?

-

Similar to any traditional debugger, the Trunk CI Debugger, operates on breakpoints configured through the trunk web app. Each breakpoint is tracked with a unique id and is configured through a set of conditional rules. For example, you can specify that breakpoint 'foo' always triggers when the exit code of its command is non-zero (or failing).

-
-

How does it work?

-

At its most basic - the trunk ci debugger wraps the execution of whatever command you give it. This allows the debugger to break on_enter before running your command and on_exit after your command completes. This wrapper connects to the Trunk Service to determine in real time based on the conditional rules whether to trigger a breakpoint or continue execution.

-

What happens a breakpoint is triggered?

-

Upon triggering, the execution of your CI run will be paused and the system will attempt to notify someone that a breakpoint has been triggered. In practice when working with a pull request for example, this can be a Slack notification to the author of the PR, or a posting of a comment to the PR thread on GitHub.

-

The notification will include a link to connect to the debugging session and provides authenticated users with direct access to the machine that is being held.

-

What can I do during a debug session?

-

Anything! When connected over a debug session, you have live access to the terminal that is running your CI job; you are connecting to the live instance that is being used to run your job.

-

Besides running any normal shell command from the session, the debugger provides a set of command line tooling to further assist your debugging session.

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
commandwhat does it do
retryruns the provided command again. upon retry the exit code of the breakpoint will be overwritten.
getexitreturns the current exit code that will be returned when the debugging session terminates (by default this is the exit code of the user provided command)
setexit {value}overwrites the current exit code of the session with a user provided value. use this to change a failing execution into a passing execution (or vice versa)
download {file}downloads the provided file to your local machine
continueresumes execution; returns the current exit code, and allows CI process execution to proceed

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\ No newline at end of file diff --git a/assets/circle-ci.png b/assets/circle-ci.png deleted file mode 100644 index 29c2601..0000000 Binary files a/assets/circle-ci.png and /dev/null differ diff --git a/assets/circuitPurple_(6).png b/assets/circuitPurple_(6).png deleted file mode 100644 index 1d64e11..0000000 Binary files a/assets/circuitPurple_(6).png and /dev/null differ diff --git a/assets/claude.png b/assets/claude.png deleted file mode 100644 index ecf81ce..0000000 Binary files a/assets/claude.png and /dev/null differ diff --git a/assets/codespell.gif b/assets/codespell.gif deleted file mode 100644 index 3544e1d..0000000 Binary files a/assets/codespell.gif and /dev/null differ diff --git a/assets/connect-jira.png b/assets/connect-jira.png deleted file mode 100644 index da24f4e..0000000 Binary files a/assets/connect-jira.png and /dev/null differ diff --git a/assets/create-ticket-button-dark.png b/assets/create-ticket-button-dark.png deleted file mode 100644 index ed8cc1c..0000000 Binary files a/assets/create-ticket-button-dark.png and /dev/null differ diff --git a/assets/create-ticket-button-details-page-dark.png b/assets/create-ticket-button-details-page-dark.png deleted file mode 100644 index 4eb691a..0000000 Binary files a/assets/create-ticket-button-details-page-dark.png and /dev/null differ diff --git a/assets/create-ticket-button-details-page-light.png b/assets/create-ticket-button-details-page-light.png deleted file mode 100644 index b1d159a..0000000 Binary files a/assets/create-ticket-button-details-page-light.png and /dev/null differ diff --git a/assets/create-ticket-button-light.png b/assets/create-ticket-button-light.png deleted file mode 100644 index 7eb96ca..0000000 Binary files a/assets/create-ticket-button-light.png and /dev/null differ diff --git a/assets/create-ticket-button.png b/assets/create-ticket-button.png deleted file mode 100644 index 79ee50a..0000000 Binary files a/assets/create-ticket-button.png and /dev/null differ diff --git a/assets/create-ticket-kebab.png b/assets/create-ticket-kebab.png deleted file mode 100644 index 5f0d7f6..0000000 Binary files a/assets/create-ticket-kebab.png and /dev/null differ diff --git a/assets/cspell.gif b/assets/cspell.gif deleted file mode 100644 index cd13b45..0000000 Binary files a/assets/cspell.gif and /dev/null differ diff --git a/assets/cubesGreen_(3).png b/assets/cubesGreen_(3).png deleted file mode 100644 index 695ebf8..0000000 Binary files a/assets/cubesGreen_(3).png and /dev/null differ diff --git a/assets/cucumber-logo.png b/assets/cucumber-logo.png deleted file mode 100644 index aa7e96e..0000000 Binary files a/assets/cucumber-logo.png and /dev/null differ diff --git a/assets/cucumber.png b/assets/cucumber.png deleted file mode 100644 index 2952c63..0000000 Binary files a/assets/cucumber.png and /dev/null differ diff --git a/assets/cursor.png b/assets/cursor.png deleted file mode 100644 index cdbce30..0000000 Binary files a/assets/cursor.png and /dev/null differ diff --git a/assets/cylinder_yellow_medium.png b/assets/cylinder_yellow_medium.png deleted file mode 100644 index 527f553..0000000 Binary files a/assets/cylinder_yellow_medium.png and /dev/null differ diff --git a/assets/cylinder_yellow_medium.svg b/assets/cylinder_yellow_medium.svg deleted file mode 100644 index 1065278..0000000 --- a/assets/cylinder_yellow_medium.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/cypress.png b/assets/cypress.png deleted file mode 100644 index 391f9fe..0000000 Binary files a/assets/cypress.png and /dev/null differ diff --git a/assets/dart.png b/assets/dart.png deleted file mode 100644 index abaed14..0000000 Binary files a/assets/dart.png and /dev/null differ diff --git a/assets/dashboard-dark_(1).png b/assets/dashboard-dark_(1).png deleted file mode 100644 index e644ad7..0000000 Binary files a/assets/dashboard-dark_(1).png and /dev/null differ diff --git a/assets/dashboard-dark_(2).png b/assets/dashboard-dark_(2).png deleted file mode 100644 index 2573ac7..0000000 Binary files a/assets/dashboard-dark_(2).png and /dev/null differ diff --git a/assets/dashboard-light_(1).png b/assets/dashboard-light_(1).png deleted file mode 100644 index 1831f69..0000000 Binary files a/assets/dashboard-light_(1).png and /dev/null differ diff --git a/assets/dashboard-quarantined-dark.png b/assets/dashboard-quarantined-dark.png deleted file mode 100644 index e5abc6d..0000000 Binary files a/assets/dashboard-quarantined-dark.png and /dev/null differ diff --git a/assets/dashboard-quarantined-light.png b/assets/dashboard-quarantined-light.png deleted file mode 100644 index 1da2b7a..0000000 Binary files a/assets/dashboard-quarantined-light.png and /dev/null differ diff --git a/assets/data-uploads-dark.png b/assets/data-uploads-dark.png deleted file mode 100644 index 8ce83f8..0000000 Binary files a/assets/data-uploads-dark.png and /dev/null differ diff --git a/assets/data-uploads-light.png b/assets/data-uploads-light.png deleted file mode 100644 index 3c7ab35..0000000 Binary files a/assets/data-uploads-light.png and /dev/null differ diff --git a/assets/descript-logo-png_seeklogo-448113.png b/assets/descript-logo-png_seeklogo-448113.png deleted file mode 100644 index 8ce5d92..0000000 Binary files a/assets/descript-logo-png_seeklogo-448113.png and /dev/null differ diff --git a/assets/descript-logo-png_seeklogo-448113_(1).png b/assets/descript-logo-png_seeklogo-448113_(1).png deleted file mode 100644 index e1d068a..0000000 Binary files a/assets/descript-logo-png_seeklogo-448113_(1).png and /dev/null differ diff --git a/assets/descript-logo-png_seeklogo-448113_(2).png b/assets/descript-logo-png_seeklogo-448113_(2).png deleted file mode 100644 index fd29bd1..0000000 Binary files a/assets/descript-logo-png_seeklogo-448113_(2).png and /dev/null differ diff --git a/assets/descript.png b/assets/descript.png deleted file mode 100644 index b3e6cb3..0000000 Binary files a/assets/descript.png and /dev/null differ diff --git a/assets/details-code-owners-dark.png b/assets/details-code-owners-dark.png deleted file mode 100644 index 7ff4ada..0000000 Binary files a/assets/details-code-owners-dark.png and /dev/null differ diff --git a/assets/details-code-owners-light.png b/assets/details-code-owners-light.png deleted file mode 100644 index 7937f26..0000000 Binary files a/assets/details-code-owners-light.png and /dev/null differ diff --git a/assets/diamond_multi_large.png b/assets/diamond_multi_large.png deleted file mode 100644 index 30fd88c..0000000 Binary files a/assets/diamond_multi_large.png and /dev/null differ diff --git a/assets/docs-mq-restrict-push.png b/assets/docs-mq-restrict-push.png deleted file mode 100644 index ae72064..0000000 Binary files a/assets/docs-mq-restrict-push.png and /dev/null differ diff --git a/assets/drill-down-overview.png b/assets/drill-down-overview.png deleted file mode 100644 index 712c300..0000000 Binary files a/assets/drill-down-overview.png and /dev/null differ diff --git a/assets/drone.png b/assets/drone.png deleted file mode 100644 index 8e67239..0000000 Binary files a/assets/drone.png and /dev/null differ diff --git a/assets/droneci.png b/assets/droneci.png deleted file mode 100644 index 0dc06d4..0000000 Binary files a/assets/droneci.png and /dev/null differ diff --git a/assets/enable-flaky-dark.png b/assets/enable-flaky-dark.png deleted file mode 100644 index 4bd7241..0000000 Binary files a/assets/enable-flaky-dark.png and /dev/null differ diff --git a/assets/enable-flaky-light.png b/assets/enable-flaky-light.png deleted file mode 100644 index befbb2b..0000000 Binary files a/assets/enable-flaky-light.png and /dev/null differ diff --git a/assets/enable-parallel-mode b/assets/enable-parallel-mode deleted file mode 100644 index f16c557..0000000 Binary files a/assets/enable-parallel-mode and /dev/null differ diff --git a/assets/enable_test_quarantining.png b/assets/enable_test_quarantining.png deleted file mode 100644 index 65ab186..0000000 Binary files a/assets/enable_test_quarantining.png and /dev/null differ diff --git a/assets/example-jira-form.png b/assets/example-jira-form.png deleted file mode 100644 index a6ede5f..0000000 Binary files a/assets/example-jira-form.png and /dev/null differ diff --git a/assets/example-jira-ticket.png b/assets/example-jira-ticket.png deleted file mode 100644 index 538dbaf..0000000 Binary files a/assets/example-jira-ticket.png and /dev/null differ diff --git a/assets/example-jira-ticket_(1).png b/assets/example-jira-ticket_(1).png deleted file mode 100644 index 78fb6e7..0000000 Binary files a/assets/example-jira-ticket_(1).png and /dev/null differ diff --git a/assets/example-webhook-connector-dark.png b/assets/example-webhook-connector-dark.png deleted file mode 100644 index 397bff2..0000000 Binary files a/assets/example-webhook-connector-dark.png and /dev/null differ diff --git a/assets/example-webhook-connector-light.png b/assets/example-webhook-connector-light.png deleted file mode 100644 index ba95784..0000000 Binary files a/assets/example-webhook-connector-light.png and /dev/null differ diff --git a/assets/example-webhook-connector-slack.png b/assets/example-webhook-connector-slack.png deleted file mode 100644 index 6c4dd87..0000000 Binary files a/assets/example-webhook-connector-slack.png and /dev/null differ diff --git a/assets/example-webhook-connector-slack_(1).png b/assets/example-webhook-connector-slack_(1).png deleted file mode 100644 index ff055c1..0000000 Binary files a/assets/example-webhook-connector-slack_(1).png and /dev/null differ diff --git a/assets/example-webhook-delivery-status.png b/assets/example-webhook-delivery-status.png deleted file mode 100644 index 800d6bb..0000000 Binary files a/assets/example-webhook-delivery-status.png and /dev/null differ diff --git a/assets/example-webhook-github-permissions-dark.png b/assets/example-webhook-github-permissions-dark.png deleted file mode 100644 index b9985fd..0000000 Binary files a/assets/example-webhook-github-permissions-dark.png and /dev/null differ diff --git a/assets/example-webhook-github-permissions-light.png b/assets/example-webhook-github-permissions-light.png deleted file mode 100644 index 19fa0ff..0000000 Binary files a/assets/example-webhook-github-permissions-light.png and /dev/null differ diff --git a/assets/example-webhook-logs.png b/assets/example-webhook-logs.png deleted file mode 100644 index 60f168e..0000000 Binary files a/assets/example-webhook-logs.png and /dev/null differ diff --git a/assets/failure-logs-dark.png b/assets/failure-logs-dark.png deleted file mode 100644 index 20ef7b7..0000000 Binary files a/assets/failure-logs-dark.png and /dev/null differ diff --git a/assets/failure-logs-light.png b/assets/failure-logs-light.png deleted file mode 100644 index 282469a..0000000 Binary files a/assets/failure-logs-light.png and /dev/null differ diff --git a/assets/file.excalidraw.svg b/assets/file.excalidraw.svg deleted file mode 100644 index 6350f9b..0000000 --- a/assets/file.excalidraw.svg +++ /dev/null @@ -1,21 +0,0 @@ - - - eyJ2ZXJzaW9uIjoiMSIsImVuY29kaW5nIjoiYnN0cmluZyIsImNvbXByZXNzZWQiOnRydWUsImVuY29kZWQiOiJ4nNVa21LbOlx1MDAxNH3nK5ic19bV/dI3blx1MDAwNXpcdTAwMDFaOITpaYcxtpKYOLaxXHI0dPj3s2VC7DhcdTAwMTdIgJLSXHUwMDE5mliytSWttddeMr9XVldcdTAwMWJ5PzGN96tccvPLc8PAT93rxlx1MDAxYnv9yqRZXHUwMDEwR9BEiu9ZfJl6Rc9OnifZ+3fv3CRx2kF+XHUwMDE2x13Hi3t3t5nQ9EyUZ9DxP/i+uvq7+F1cdTAwMTkoNV7uRu3QXHUwMDE0N1x1MDAxNE3lWFKz+tW9OCrGxVpwyYhcImWPINuE8XLjQ3PLXHIzU7bYS40tdeX1mzRccvz2R9Nf23bPm/thOWwrXGLDw7xcdTAwMWZcdTAwMTZhZTHMvmzL8jTummbg51x1MDAxZDt27fq0u9L4st2JTGanj4ZX48T1grxvn4PKq3drUO33XHUwMDBivlx0QVx1MDAxY6Ip01x1MDAxMinOJabDVns/pcpBgmuhXHUwMDE1w5yRymrdXHUwMDA1tlx1MDAxMYdxalx1MDAwM/tcdTAwMDdcdTAwMWL7r1xm7cz1um2IL/LLPi73VatV9rm+ny7hXHUwMDBlY4hcIimwxEryMoqOXHTanXzQhyhcdTAwMDVRaio5Qqjsk5liRzDGUlwijjVcdTAwMTm22Fx1MDAwMJJdv0DHz3JcdTAwMWZSt2d27S3RZVx1MDAxOFZcdTAwMTcz8lx1MDAwN4t5j6JcdTAwMTJHdHDltpyh7b9Vx19cdTAwMTWDIzjMza98OPVcbmjMsTzX7HR/T52eXrL2vmh5m1lj2O/2zeTH3t2M20dfXHUwMDBlXHUwMDEwXHUwMDBizyTWdO+bPo9Om59HR7lcdTAwMWbfTdP4uvLcwadyWS5cdTAwMTPfvcM2bFx1MDAwMlx1MDAxN5RcdTAwMGLOkCzhXHUwMDEyXHUwMDA2Ube+ZmHsdUs6rFRcdTAwMDJcdTAwMWXj4cj8q1x1MDAxNORoXHUwMDFhXHUwMDA1qeaIXHUwMDAxJsmjXHUwMDE5OHkxXHUwMDE3YCBZgIF4MVx1MDAwNkriXHUwMDAwdFx1MDAwMfyYMSz4KFx1MDAwMVx1MDAwNXU0PFx1MDAwM1x1MDAxM4VcdTAwMTHw9Cn0y1M3ylx1MDAxMjdcdTAwMDW4TqAg4o5CTFx1MDAwMPnGuSepI+psg2Rg6cqegWwjXHJjrJpcdTAwMDZQiSimXHUwMDE0MTxcdTAwMDdAy6jiKD9cZm6KPC9Grn5we0HYXHUwMDFmQUCBXFxYwLVcdTAwMWbRjyjLIYb3q7nJ8iBq/4iSNIhT2GtcdTAwMTg3TntuJd3b29bCoG1B3vBgOiZcdTAwMWTBf1x1MDAxZYBcdTAwMDJcdTAwMGU79Fx1MDAwMt+vapRcdTAwMDexuEFk0t3HSFx1MDAwYoTQXHUwMDBlXCI3PFosVFg5szPMtFx1MDAwZeZcdTAwMTVcdTAwMWNlxrZaoOqZ/J6ps7qyynWdXHUwMDA1llNcdTAwMGVcdTAwMTh/vM5+PNjrfz95e3xcdTAwMTJcdTAwMWZcdTAwMDbrcSt0v3S9b8uts1xuK4drhVx1MDAwNOGYXHUwMDEzrtSr6CwlXHUwMDBlRUIwXHUwMDFiXHUwMDA2XHUwMDFh0dBcdTAwMTGdpcKqMNPFz1x1MDAxOPMhVUErRqqc4d+gsyfpXHUwMDFhuVL55e7RcW+H70T/xptnR39AZ2c+t5V/XHUwMDBlNrfiLP602Vu7+uzfdFSTPIt+2/RcYnvM5Uvrt1J0KrVcdTAwMDWnXHUwMDA0cTFcdTAwMDe1J+/SUlx1MDAwYrii2FFMI1xugijlXHUwMDE4ta2CM23rKV1Qe3kknGPMOFx1MDAwMaq/poTPi9GnSfj6K0r4XHUwMDAzqlWX8DlDfVx1MDAwZVx0v0sxkzhOdf3qkONMK1wiXHUwMDAxR4/n+OxkuqTybetwhiHZKUCsXHUwMDFl5ThDzMFcZmFGXHUwMDE1tGFWj6vkeFx1MDAwYilcdTAwMGahxTlOkIOkZlx1MDAwMlx1MDAxMlxyZVRSVVwif0h15FxiipVAilxipMFcdTAwMGXjSspcdTAwMTkoOFx1MDAwN7svlVx1MDAxNC+i4ORhXHUwMDA1n5VcdTAwMTBcYlx1MDAxN1x1MDAxNZ8zR0JcdTAwMDCupPl6XHUwMDEw+cCU0cBcdTAwMDaHRI+hoV1cbjexe+pcYthryaDaIZDcXHSt9GjF3qWdx1vYXHJcdTAwMGVlkoBKiiEtmWB6bPIm8lx1MDAxZlx1MDAwZWp2eT9cZko42Fx1MDAwMoxrpFx1MDAwMJFMylx0QUFMXHUwMDEwiraeXHUwMDExXHUwMDAwIIG6Uo1cdTAwMDVcdTAwMTW6Wb5cdTAwMTH3ekFcdTAwMGWLf1x1MDAxMFx1MDAwN1FeX+RiNddsQuhcdTAwMTh3XGZcdTAwMDEwqWpbPXMk9omjtVr5abXkVvFl+Pnnm4m9305cdTAwMDd90TyO9/KJK9X/XHUwMDE3ci5UTk99IF6UcUXKbPBQ6ru5QenF9lx1MDAwNZfN/VxyuS2882b3prncqVx1MDAwZuDjIFhcdTAwMDXrXHUwMDFihCSqdkDxvM6l1TKe1pNOXGLtMDbH3nmTXHRcdTAwMTVcdTAwMGXA3SGcXHUwMDBixiWAgFNcXFmie+PCmVx1MDAxMvBTxrhcZsblyU5gisN4yFx1MDAxMPmd3jf3YOvr1ulOsHPs8n53+8Sdz2AgjirV41x1MDAwYlx1MDAxOVxmhqaf0XPEuCBsXHUwMDBlgzF51kttMLTSXHUwMDBlXHUwMDE2goCFUIhcdTAwMTJSO6NcdTAwMTdg6jVcdTAwMTBcdTAwMTQq+Vx1MDAwMvnLYzAwVVgzXFxNXG6v4DDmXHUwMDA16dNcdTAwMWPGRqVsT0yh+3/MYTygLnWHMWeoL+owMKayfnn4XHUwMDFhQCpcdTAwMDFV9+NVdnYuXVKVRZZbxJ7NQS1DR1VcdTAwMTZ0z1x1MDAwMemSnFAsJatU/c9tMMDnMKXtcnP7po2TXHRHhMiBIJmEqstcdTAwMTa8hI5cdTAwMWZcdTAwMTFSbUsj/FwiL+KW3F7M5uCwkpdcdTAwMGWhVDDCpYKQpnhcdTAwMGJcdTAwMDWmglJI7lxuykyovlx1MDAxNvNcdTAwMTaPNDxcdTAwMDJ2VUNqV0JLXHUwMDBlXHUwMDBlYpK3sEFB/SW4XHUwMDEwiil7oiXHg/qrvMV0vFx1MDAxN83jUH9Ob1x1MDAwMVx1MDAxNm5azlx1MDAwM78jNVZzvPrcO6LbW1x1MDAwN1x1MDAxZm+6wrvY48mH759u0NflTnqw6Fx1MDAwZVx1MDAxNFxijGlGmGS0/vKTXHUwMDExsHaQXHUwMDE5rYhcdTAwMTM9w1tMTGlzpz5cIqaXN1x1MDAxOEwoXHUwMDAx3JNKeTUocjRBXHUwMDFjeICe4Vx1MDAxOHUhVzHrXHK+tH/FMneBvjLgS8NNkkNbXHUwMDFmXGZDg6lcdTAwMDX+oCYqXHUwMDFm07hcbsz1+uzlXlx1MDAxOczBXHUwMDAy0Vx1MDAxNHO9Xbn9XHUwMDFmdUzq3yJ9 - - - - - Astate: testingpriority: normalBstate: testingpriority: normalCstate: pendingpriority: normal \ No newline at end of file diff --git a/assets/file.excalidraw_(1).svg b/assets/file.excalidraw_(1).svg deleted file mode 100644 index 90b98f2..0000000 --- a/assets/file.excalidraw_(1).svg +++ /dev/null @@ -1,21 +0,0 @@ - - - eyJ2ZXJzaW9uIjoiMSIsImVuY29kaW5nIjoiYnN0cmluZyIsImNvbXByZXNzZWQiOnRydWUsImVuY29kZWQiOiJ4nOVaa1PbyFx1MDAxMv3Or6C4X4N2nj3T+UaAJZC7XHUwMDBlu0BCcrNFXHRZtoVlSZFFeGzlv9+WXHUwMDAwS5ZcdTAwMWZcYmxcdTAwMTKnNlRcdTAwMDWjXHUwMDE5aXpm+pw+Z6x/1tbXN7KbxN94vb7hX3tuXHUwMDE4tFP3auNVfv2bn1x1MDAwZYM4oiZR/D2ML1Ov6NnLsmT4+rff3CRxukF2XHUwMDFlx33Hi1x1MDAwN3e3+aE/8KNsSFx1MDAxZP9Hf6+v/1P8X1x1MDAxOSj1vcyNuqFf3FA0lWNxbm39ciuOioG50qik5jDqXHUwMDEwXGZ3aLzMb1Nrx1xyh37Z0i6Giv9kX8//kvxcdTAwMTS7R9s7vXct9+q2XHUwMDFjtlx1MDAxM4ThUXZcdTAwMTNcdTAwMTZhXHJjmn3ZNszSuO9/XGbaWe9hXHQq12fdlcaX3V7kXHUwMDBm8+mz0dU4cb0gu8mnwMqrd2tQ7XdNf1mlXHUwMDFjqUBqkMiRgVx1MDAxObXm9ytcdTAwMDFcdTAwMGWAsJZbzTmCrMW1XHUwMDFkh3Gax/VcdTAwMWbu5z9lZOeu1+9SeFG77NPpeOhh2efqfracRpFKXCIqrlx1MDAxOFxihWrUpedcdTAwMDfdXlZsXHUwMDE0OFZIQESKxHAsI/GL/aCNZIJasdyufPxkv13kxt/lLqTuwN/Pb4kuw7C6lFH7filcdTAwMWZyqMxcInl/5Xs5wbz/bj37qlx1MDAxOTiWhZl/nY1mXkmZv77e2Nt+q+19+upvJrcnScD8wcao3/dX01x1MDAxZnt38+fedlx1MDAxMr6/Rjg+UCFcdTAwMWO2h1x1MDAwN62TaHyUh/HdNI2vmj5cdTAwMTc/XVx1MDAxZHXOXHUwMDA2rtv+8HknOt9vXVx1MDAxZp/2mz33/lO53JdJ271DXGbtmjZMSsG0LVx1MDAxMylcZqJ+fS/C2OuXIFurXHUwMDA0PIHusXWtXHUwMDAyW3I+XHUwMDBi2Fx1MDAxNlx1MDAwMFx1MDAxNUUhXHUwMDFhXHUwMDAze/ou/SBg8+dcdTAwMDFbXHUwMDBiR1pcdTAwMGVSXHUwMDE5Llx1MDAwNEcxXHUwMDBlbGVcdTAwMWRhlJHcXG4wXHUwMDA0rEWAnaVuNEzclJAwXHUwMDA13JzRSFx1MDAwNG+jJ1FtpFx1MDAwM1x1MDAxM0BmXHUwMDEywVx1MDAxOG7Y4kBcdTAwMWVrmEDsMpO0jCqOsqPgtiggMHb1d3dcdTAwMTCEN2NJUGQvreDWl+hLNMwohtfrqU9cdTAwMWbSLIi6X6IkXHLilHacho7TgVx1MDAxYm6M3blcdTAwMTVcdTAwMDbdPNc3PJqRn47BIFx1MDAwYqi8jjpcZoJ2u1pcdTAwMDA9XG7HXHJcIj/db1K3KIRuXHUwMDEwueHxs6Ol9fPfjqjc4bqSTkM/b6XrgHORPreOIzP1q6MyLrQwXGYtlPn0XHUwMDE43KOtPa+bXHUwMDBlszPTOlHZvnv2bvPs42rXcc6EdoBZxlx1MDAxNacqrlm5xC9RyH1cdTAwMGanXHUwMDE2cnQ4lXFcdTAwMWGIpITQrFx1MDAxY6ZSyI1cdTAwMDOcadBcdTAwMTSKVjBZyFxyciN4lZ5XoZDfZYbb+mNL7YdcdTAwMTd7b9lcdTAwMTFcdTAwMWJsf7jBy/fXXHUwMDBiVdzHXHUwMDA0wsXR4UX7JIKbP3B3863i9uK8Z3+2QLiN4eP7oFx1MDAwNcZTO97xx9+7Z1wiSZYnXHUwMDEwSFx1MDAwNaoqd7yEQLAgZjGGJlx1MDAwMIFcdTAwMTFMNSaM6Zu00vqAyqxxJNOKcVx1MDAxMs6swlxiXHUwMDBm+lx1MDAwMIDkg1IkzFx1MDAwMe1cInwxX1x1MDAxZjDtWKbANtVcdTAwMDdGXHUwMDFh4lx1MDAxMCHV4vSwgDx4YoouJlx1MDAwZt5UXG5u4kftXHUwMDFmqVxyXHUwMDFlqYV1bfDEUJchXGbuXHUwMDE4ZppcdTAwMDdcdTAwMDAu65dHXHUwMDFlQCnBSVx1MDAxYjc39/O5dFVFXHUwMDAxd4QyXHUwMDAyXHUwMDE4Q1x0tqy1XHUwMDA1xq12XGbjqLVcdTAwMTSWXHUwMDFi0LW4KvWeWY+x52NcXJJcdTAwMTVcdTAwMDGGjFlzJ1xmJpHOXHUwMDFjIJdAYSAjyrGmskj3sGeKJJzFXHUwMDE3UVx1MDAwNeJxVfBcImxQaOc3QVx1MDAwMZPxwO7PtZpgMF9cbjfJNzRfZZMzumTAdWnqXG6G8S7zeWwyh9FuUyejczVIPlx1MDAwYtTE7Fx0uI9HNd81jKLiljQhJaBcdTAwMDUrSFx1MDAxMqIxs8JcdTAwMDKllTBUdLhcdTAwMDXabj1cdTAwMTFW6Fx1MDAwZbPteDBcYjJa/8M4iLL6Olx1MDAxN1x1MDAwYrqVXHUwMDEzQs93J5KAplVtqzNHkj9xXFxcdTAwMDKWn9ZLbFx1MDAxNX+MPv/9amrvzZlJX7RO5Hv5vLXq72e5IaVY/erIXHJcdTAwMTHp5fJcXDcnvkvW2/NOQ9v6cHIhPptMe35PrDjxkaBxXHUwMDA0cpBcdTAwMDK0XHUwMDEwVrzsseYsN0SA5KSelJ1++sG5clx1MDAxMK0xKDDXP1x1MDAxNbv04IWEzfNcdTAwMDfZXHUwMDEyzkKW7oVcdTAwMTYwXHUwMDAxz/RCyt/P4naAXG5PLt9ccnd9XHUwMDE57WzurODhI1x1MDAwMXom/qhcdTAwMTAz1Fx1MDAxMpvjb/qsV9tcXFxirlx1MDAxZDL3wnAqjrTyNfwpslx1MDAxZSCoVSGhT9pcdTAwMTc8fnyqvUCqU1xiVEV/qr34oaeP2z/RXjxSXFzq9uKJob6svVx1MDAxMHz2oSNQmVVU9ptcdTAwMWY6zqfT1SyzXHUwMDFj0WHaIFx1MDAxNSgzXHUwMDA15lx1MDAxNlx1MDAxY0bSXHUwMDE4tFUolZpTZ1x1MDAxN3ZcdTAwMTjGsTSSNIJcdTAwMTF+yPdMqbZcdTAwMGU1SyBcdTAwMDI2SFx1MDAwMVmc/OZBMlJcdTAwMDRj3/j+ezzGfCCO1LxxhJSghCadXpW0Y0reXHUwMDEypVx1MDAxYkvyioRcdTAwMTZcdTAwMWGFz/NcdTAwMTdccl1cdTAwMGY4ipGI0paG0rxyaldcdTAwMGJcblxmXHUwMDA3XHJAOy81MjFcdTAwMTnUr+UuZiV80TqZ68u0XHUwMDE3olJcdTAwMWNrvIecgzJcYs2PTjtcdTAwMWZcdTAwMGV3XHUwMDEzk9re+Tlv928/87f9rcvVpj1cdTAwMDAgS2uMXHUwMDExtLy0zLWj0+bmXHUwMDAyXHL3xHxz4eq27XSmf9WiXHUwMDA0OYg7JCosqbfiL0iEWUbySlx1MDAxMj9X8VoyXn5gQEZ0tb5recxcdTAwMDfsfzv2b5k89fY7+r9M7Fx1MDAxZZzG+utcdTAwMGZ4uWHuc99cdTAwMDRcdTAwMDfXn5Kj7l433uvgwbHfP+xcdTAwMWb/Yi9NVM5cdTAwMDBrwFx1MDAxNppLkzvZxsCevkkrbVvA5q6F2Fx1MDAxMlx1MDAwNVx1MDAxOVx1MDAwNqy8XHUwMDFh9uBaQEtmpGY8LzeLXHUwMDAwe66aQeZcdTAwMTje2LGQieKSo/o3vTCxU/FcdTAwMDGZP6y/f9Cj5Xohw/JIvapcdTAwMWKWJ1x1MDAwNfqidlx1MDAwNcxscDNp8nPBJ6B7PouuZNkmveSQJkJttGZM1dFNoFx1MDAwM6qYXHUwMDEyXHUwMDA0ivxcdTAwMDRA1Vx1MDAwMluiWdFcdTAwMGUoTjJJoZhcdTAwMGVz5mjaXHUwMDEyo4BcIpWka81k5SbMidxllrNYXHUwMDE5r1wipIWK8V2+V2n4zYN00CpJNK5Iilx1MDAxOeSVXHUwMDBlVWMgyVx1MDAxOXBcdTAwMTCSK0pcdTAwMGU7KVpcdTAwMWGZlfm0MFxuXHRcdTAwMWROtVx1MDAwM6Y6J1xuhWmG+TuwjHSdsIZP7sOvZVJmJHrROJnj5fPWqr+fzHSI9Yujb30pQ1x1MDAxNbmi5m9+zld1K8lzRiqHXHUwMDFiRaRuXHLpXHUwMDE4Nn4oI7V0NNJcdTAwMGa1XGJcdLZcdTAwMWXX8miOOZbwh1ZLPZbyJc1RKORT8/fsXHUwMDExXHUwMDE4yiksJ7VcdTAwMTaWtNhcbp7IXGKhpVxcxolMXHUwMDFkn0sgm1xupXBcIkClXGJohLI8I6rMc8dJwjqG5W/sSaRcdTAwMDJoOTO/NuvMyrv830TGzeKctfvnb7hJcpTLt9F2UFx1MDAwNlx1MDAwNu17wVpOcuNb4F+9mY+QtfslzVnDL1Ly+9r3/1x1MDAwM2+zhmUifQ== - - - - - Astate: restartingpriority: normalBstate: pendingpriority: normalCstate: pendingpriority: normalDstate: testingpriority: high \ No newline at end of file diff --git a/assets/file.excalidraw_(1)_(1).svg b/assets/file.excalidraw_(1)_(1).svg deleted file mode 100644 index 49c4121..0000000 --- a/assets/file.excalidraw_(1)_(1).svg +++ /dev/null @@ -1,21 +0,0 @@ - - - eyJ2ZXJzaW9uIjoiMSIsImVuY29kaW5nIjoiYnN0cmluZyIsImNvbXByZXNzZWQiOnRydWUsImVuY29kZWQiOiJ4nOVaXXfaRlx1MDAxMH33r/Chr7G6s9+bN/xcdTAwMTHXJ6mT1Elqt+nJkUGAaiEpQlx1MDAxOOOe/PfOXG5cdTAwMDdcdCFhjMHBLbYxaFfa2Z25c+eu9M/O7m4jXHUwMDFkx17j5W7Du2m5gd9O3FHjhT1+7SVcdTAwMDM/XG6xiWbfXHUwMDA30TBpZT17aVx1MDAxYVx1MDAwZl7+/LNcdTAwMWLHTtdPL6PoymlF/clpXuD1vTBcdTAwMWRgxz/x++7uP9l7YaDEa6Vu2FxyvOyErClcdTAwMWZLgyhcdTAwMWY9jcJsXFwwoLRWXFySaVx1MDAwZn9wiOOlXlx1MDAxYps7bjDw8lx1MDAxNnuocTG8Vlx1MDAxMb+ln15ftvTB+Fx1MDAwM3nb/+NrPmzHXHUwMDBmgrN0XHUwMDFjZGZcclwinH3eNkiT6Mr73W+nPTt26XjdWUk07PZCb2Cnn5tcdTAwMTnFbstPx/Y6JD86WYNiv1x1MDAxYrtcdTAwMDBcXDtGaSG04IRxTqet9nyuuKNccuFaXHUwMDExJlx1MDAwMIRcdTAwMTQlw1x1MDAwZaIgSqxhP4Fnf3LTLt3WVVx1MDAxN+1cdTAwMGLbeVx1MDAxZle0daeT91x1MDAxOX2fLlx1MDAxM46mUlx1MDAxOFx1MDAwM1x1MDAxNIhhSk+79Dy/20uzPuBcYuBSaSNcdTAwMDRRXHUwMDFhcktcdTAwMDde5lx1MDAxMbSPSU1B5C3WgPiknUXHX7lcdTAwMWZcdTAwMTK3753YU8JhXHUwMDEwXHUwMDE0XHUwMDE3M2zfLeb3KMrjiN1cdTAwMWT5ls/Q9j8qx18xXHUwMDA2Z+Iw9W7S6dRcdTAwMGJBXHUwMDEzNdvH5lxya7Y77/bi66/Bq1x1MDAwZYO4Me337UX1ZScnu6ed8Wt6cMrOP/wmLoZ/XHUwMDFm0lt4PzvK9/HdJIlGhevefcqXZVx1MDAxOLfdSWyDXHUwMDAygVx1MDAxZVx1MDAwNy2oUtP2wFx1MDAwZq/Ka1x1MDAxNkStq1x1MDAxY1x1MDAwZTtcdTAwMDWD53A4M/9cdTAwMDJcdTAwMDSVMvVcdTAwMTBUwjCqXHJdXHUwMDFhgtWruVx1MDAwMlx1MDAwNOlcblx1MDAxMITVICjBUVJqobGvNlx1MDAwMDNcdTAwMTBcdTAwMTSEOZorXHUwMDBiXHUwMDBmwD+uXHUwMDFlg8A0ccNB7CZcdTAwMTixXHUwMDE1KFx1MDAwNOJQMY88xVx1MDAxY1nGmkCsKkWAPFx1MDAxZWozXHJzmLovPOVcdTAwMDPCM7cqXG7TM/82XHUwMDBiMTlz9JXb94PxjPuzuMW1a35cdTAwMGU/h4NcdTAwMTRteLmbeoPUXHUwMDBmu5/DOPGjXHUwMDA0XHUwMDFkjeNGSd9ccnZcdTAwMWIz5zVcdTAwMDO/a2O80cL5eMlM+Kc+XHUwMDEy4LRD32+3i1x1MDAxNNVCY1xcP/SSk2WYXHUwMDA1bej6oVx1MDAxYnxY0VZcXDvvl2mmdVxupHjpXHUwMDBlPNuKx6VZiO+FPGtcbum6XGZygVx1MDAwNCMw7JdcdTAwMDe56ERN+vGLSsKLw18v+VH/9rZ7td08XHUwMDBihGlHSsm1UFpyljPck/Isc1xmNYaB5ERSTC3VPKuYQFx1MDAxM1x1MDAxMF+Cyzw5f6dZXHUwMDA1XHUwMDFjXHUwMDE0XHUwMDA1liPnOdCsXGJH8rc3be72hoF834bu+Nq9eVx1MDAwMppdeN3zg875sVx1MDAxYVx1MDAxZPU+8qMvhvy6f3Peaq2FviW6XHUwMDEw0I+6iPFN0DeyVy2ypY1kQvXyXHUwMDE1dLWXtpq+gVxi6ihqXHUwMDAwi1IhlSBiXHUwMDA22pa/jUD6NpKinjDllLNG/iZYRVx1MDAxMy61WpLEjaJcXEqs+n9cdTAwMTSJr1x1MDAxMqSPI/H9ZYhxQ1x1MDAxY35cdTAwMGZrlTn8gaaug8InOaZSJteCXFyBjTnQeVx1MDAxMN2H8cXJdMNcdTAwMThfmb1cdTAwMTHFXGJiXHUwMDAwKXCyOSlmXHUwMDEw58xRmkjFXHUwMDE11bhcdTAwMWP1XHUwMDEw71x1MDAxMN1cImR1iONAREnBNNEojFx1MDAxNM3FwFx1MDAxNOhcdTAwMDSVtLGYUlx1MDAxYzDhXHUwMDE4yDl+SuBWS1x1MDAxME3XgPt5XHUwMDAyp/dcdTAwMTP4RtJcdTAwMDFcIiVJ9/2wjTiZNexui2hcdTAwMTlcdTAwMTDapXBju9CO1MwuISOUXHUwMDFiQlmhRydqXHLtPPaIQ4QyWFBpMHbbXHUwMDAxy7u5yXth+36jXHUwMDE2V/dTo6SDkWpsvaw1MJ5L84JRaFx1MDAxMzHKXHUwMDEwXHUwMDAylEpkJIqd54xcbtxBelx1MDAxMPX7foqL/y7yw7S8yNlqNm066HnuXFxcdTAwMDTgpIpt5bxcdTAwMTHbK86Wavmn3Vx1MDAxY1rZl+nnv15U9t6rjfmsdT7c81x1MDAwYu5cdTAwMTT/ryRbUJLX5T2K4lx1MDAxN1x1MDAwNTt9gGxcdTAwMTlHwdnfsHdzTdzjY/WFXHUwMDFkXHUwMDA0zfdsy2VcdTAwMGJcdTAwMDftUCtbJEhcZqvCdtBcdTAwMDZ0S6fjtYyp0i1cdTAwMWF1i8RcdTAwMWFKMKjaokDXO1x1MDAxOFx1MDAxZUJcdTAwMDFFc7BjYYUmSY9qJjVRfCM5b2OipX9z2Dvg5KJcdTAwMTW0P42/nv7hvU7PPi0rLo5cXFx1MDAxOFxyz49Et7lvgt570fVGg1+e196gJLXbXHUwMDA2jFwiWUj2XHUwMDAwbVG9mNutLThDgFFMelhmMcHoLPxcdTAwMDRcdTAwMDFHMjxKXHUwMDExm1x1MDAxMrX6o+C3XnGB6Vx1MDAwMmlcdTAwMTPx+sPUxSpR+jh1cVAo2WMvY/0nU1x1MDAxN/eQS1ldPNDUjapcdTAwMGJgovZcdTAwMTaA1LZcIsSie2mYL95T2U55QcE4xGA2XHUwMDAzwEKGleRcdTAwMDUzXHUwMDBlQ4RcdTAwMDOnmmJ1p0t2rU9eMOlQhplcdTAwMDb5XHUwMDEz3yvlXHUwMDA1+lx1MDAxZYAxjlx1MDAwNYFhKHnm1Fx1MDAwNSWKXG5jXGJsZHtwy9VFfNa6XHUwMDFj9mNNP7JO6sGr8Tt6JedcdTAwMGJ5XHUwMDAwXHUwMDE0k8CMlFxuhZjRskpeYCWvbYkpKKZ+itld8NXUxZKSx979sVtUgKRcdTAwMDNcdTAwMWErOl5tXHUwMDE0I4CFXHUwMDE2VmKSo8Ig7JnLi9qYt6+5aF+nuKCy/tlcdTAwMDMkL0pccs9j9b6stzjytlNbUFx1MDAwNijt7EtLKrje6D2Rem3BXHUwMDFkXHUwMDE0+Nr6d3JXpEpeXHUwMDE4h1x1MDAwYvRcdTAwMDdow61cdTAwMDQppOBJ1pNcZucrTKHhOchcdTAwMGKqTuImXHUwMDA0+vjrJ9lcdTAwMTm9oX46ulx1MDAxOD/BvYv/umyhitRcIltcdTAwMGKmKebY5aFd7aXt1i1UXHUwMDEwh9tcIlx1MDAwMFx1MDAxMSOUmH2qSFx1MDAxMOpoQLWijKZY89DNPdNgpJM9K7SsaEGrsMrS4oc/1/B0ouUwV1x1MDAwMlx1MDAxNUKgh6u1IcVyXHUwMDBmZZVcdTAwMTXLQ+zcrFxcwTKhnrhcdTAwMDVTjEu2/K7g4ny3nXqFI4JcdTAwMDVWiowrI60uKVx0XHUwMDE2hJamXHUwMDE0yyZkS8NYybD1XHRcdTAwMTaqXHUwMDFj++SIloIyzlSlYqGorFxml1x1MDAwNKnbYGdl5iSLvXXDsOX/qFhcdTAwMTZvXHUwMDFi5OJAoEdcdTAwMDXWYVgqK8nzu9iz2oBcdTAwMTPFXHUwMDAxKLe3Qlx1MDAwNGqIubkvJViWVFHG3urATGk9Lyrs2UNBwyRcdTAwMTGoru3OXHUwMDE511ihzVx1MDAxOfSstEp9uE+a51wiPb/iTvH/g1NeIVx1MDAwNMtcdFx1MDAwZlx1MDAxNSqTXHUwMDAy+PL3f9Xbq7fvXkt+dFx1MDAxMZ6mhydcdTAwMDE9XHUwMDFjXHUwMDBm+lue8KhhXHUwMDBls6lcZnOaMjD7iFx1MDAwN5P2XHUwMDExXHUwMDBmzrTGhGd/N5bvUC+hc0FcbsJMtVTRiELFrInlLtNcdTAwMTJHSSCGsI08Jf24bMclonlcdTAwMWTZrlxm0ZqW51x1MDAwNP4619vXnNPrcL9zd/mGXHUwMDFix2e2lJo6XHUwMDExg8Bv39WO+Vx1MDAxY1x1MDAxYte+N9pfXHUwMDFjpDt33rbA9bKo+Lbz7V8w10FSIn0= - - - - - Astate: testingpriority: normal Bstate: testingpriority: normalCstate: pendingpriority: normalDstate:pendingpriority: high \ No newline at end of file diff --git a/assets/file.excalidraw_(10).svg b/assets/file.excalidraw_(10).svg deleted file mode 100644 index 90b98f2..0000000 --- a/assets/file.excalidraw_(10).svg +++ /dev/null @@ -1,21 +0,0 @@ - - - eyJ2ZXJzaW9uIjoiMSIsImVuY29kaW5nIjoiYnN0cmluZyIsImNvbXByZXNzZWQiOnRydWUsImVuY29kZWQiOiJ4nOVaa1PbyFx1MDAxMv3Or6C4X4N2nj3T+UaAJZC7XHUwMDBlu0BCcrNFXHRZtoVlSZFFeGzlv9+WXHUwMDAwS5ZcdTAwMWZcYmxcdTAwMTKnNlRcdTAwMDWjXHUwMDE5aXpm+pw+Z6x/1tbXN7KbxN94vb7hX3tuXHUwMDE4tFP3auNVfv2bn1x1MDAwZYM4oiZR/D2ML1Ov6NnLsmT4+rff3CRxukF2XHUwMDFlx33Hi1x1MDAwN3e3+aE/8KNsSFx1MDAxZP9Hf6+v/1P8X1x1MDAxOSj1vcyNuqFf3FA0lWNxbm39ciuOioG50qik5jDqXHUwMDEwXGZ3aLzMb1Nrx1xyh37Z0i6Giv9kX8//kvxcdTAwMTS7R9s7vXct9+q2XHUwMDFjtlx1MDAxM4ThUXZcdTAwMTNcdTAwMTZhXHJjmn3ZNszSuO9/XGbaWe9hXHQq12fdlcaX3V7kXHUwMDBm8+mz0dU4cb0gu8mnwMqrd2tQ7XdNf1mlXHUwMDFjqUBqkMiRgVx1MDAxObXm9ytcdTAwMDFcdTAwMGWAsJZbzTmCrMW1XHUwMDFkh3Gax/VcdTAwMWbu5z9lZOeu1+9SeFG77NPpeOhh2efqfracRpFKXCIqrlx1MDAxOFxihWrUpedcdTAwMDfdXlZsXHUwMDE0OFZIQESKxHAsI/GL/aCNZIJasdyufPxkv13kxt/lLqTuwN/Pb4kuw7C6lFH7filcdTAwMWZyqMxcInl/5Xs5wbz/bj37qlx1MDAxOTiWhZl/nY1mXkmZv77e2Nt+q+19+upvJrcnScD8wcao3/dX01x1MDAxZnt38+fedlx1MDAxMr6/Rjg+UCFcdTAwMWO2h1x1MDAwN62TaHyUh/HdNI2vmj5cdTAwMTc/XVx1MDAxZHXOXHUwMDA2rtv+8HknOt9vXVx1MDAxZp/2mz33/lO53JdJ271DXGbtmjZMSsG0LVx1MDAxMylcZqJ+fS/C2OuXIFurXHUwMDA0PIHusXWtXHUwMDAyW3I+XHUwMDBi2Fx1MDAxNlx1MDAwMFx1MDAxNUUhXHUwMDFhXHUwMDAze/ou/SBg8+dcdTAwMDFbXHUwMDBiR1pcdTAwMGVSXHUwMDE5Llx1MDAwNEcxXHUwMDBlbGVcdTAwMWRhlJHcXG4wXHUwMDA0rEWAnaVuNEzclJAwXHUwMDA13JzRSFx1MDAwNG+jJ1FtpFx1MDAwM1x1MDAxM0BmXHUwMDEywVx1MDAxOG7Y4kBcdTAwMWVrmEDsMpO0jCqOsqPgtiggMHb1d3dcdTAwMTCEN2NJUGQvreDWl+hLNMwohtfrqU9cdTAwMWbSLIi6X6IkXHLilHacho7TgVx1MDAxYm6M3blcdTAwMTVcdTAwMDbdPNc3PJqRn47BIFx1MDAwYqi8jjpcZoJ2u1pcdTAwMDA9XG7HXHJcIj/db1K3KIRuXHUwMDEwueHxs6Ol9fPfjqjc4bqSTkM/b6XrgHORPreOIzP1q6MyLrQwXGYtlPn0XHUwMDE43KOtPa+bXHUwMDBlszPTOlHZvnv2bvPs42rXcc6EdoBZxlx1MDAxNacqrlm5xC9RyH1cdTAwMGanXHUwMDE2cnQ4lXFcdTAwMWGIpITQrFx1MDAxY6ZSyI1cdTAwMDOcadBcdTAwMTSKVjBZyFxyciN4lZ5XoZDfZYbb+mNL7YdcdTAwMTd7b9lcdTAwMTFcdTAwMWJsf7jBy/fXXHUwMDBiVdzHXHUwMDA0wsXR4UX7JIKbP3B3863i9uK8Z3+2QLiN4eP7oFx1MDAwNcZTO97xx9+7Z1wiSZYnXHUwMDEwSFx1MDAwNaoqd7yEQLAgZjGGJlx1MDAwMIFcdTAwMTFMNSaM6Zu00vqAyqxxJNOKcVx1MDAxMs6swlxiXHUwMDBm+lx1MDAwMIDkg1IkzFx1MDAwMe1cInwxX1x1MDAxZjDtWKbANtVcdTAwMDdGXHUwMDFh4lx1MDAxMCHV4vSwgDx4YoouJlx1MDAwZt5UXG5u4kftXHUwMDFmqVxyXHUwMDFlqYV1bfDEUJchXGbuXHUwMDE4ZppcdTAwMDdcdTAwMDAu65dHXHUwMDFlQCnBSVx1MDAxYjc39/O5dFVFXHUwMDAxd4QyXHUwMDAyXHUwMDE4Q1x0tqy1XHUwMDA1xq12XGbjqLVcdTAwMTSWXHUwMDFi0LW4KvWeWY+x52NcXJJcdTAwMTVcdTAwMDGGjFlzJ1xmJpHOXHUwMDFjIJdAYSAjyrGmskj3sGeKJJzFXHUwMDE3UVx1MDAwNeJxVfBcImxQaOc3QVx1MDAwMZPxwO7PtZpgMF9cbjfJNzRfZZMzumTAdWnqXG6G8S7zeWwyh9FuUyejczVIPlx1MDAwYtTE7Fx0uI9HNd81jKLiljQhJaBcdTAwMDUrSFx1MDAxMqIxs8JcdTAwMDKllTBUdLhcdTAwMDXabj1cdTAwMTFW6Fx1MDAwZbPteDBcYjJa/8M4iLL6Olx1MDAxN1x1MDAwYrqVXHUwMDEzQs93J5KAplVtqzNHkj9xXFxcdTAwMDKWn9ZLbFx1MDAxNX+MPv/9amrvzZlJX7RO5Hv5vLXq72e5IaVY/erIXHJcdTAwMTHp5fJcXDcnvkvW2/NOQ9v6cHIhPptMe35PrDjxkaBxXHUwMDA0cpBcdTAwMDK0XHUwMDEwVrzsseYsN0SA5KSelJ1++sG5clx1MDAxMK0xKDDXP1x1MDAxNbv04IWEzfNcdTAwMDfZXHUwMDEyzkKW7oVcdTAwMTYwXHUwMDAxz/RCyt/P4naAXG5PLt9ccnd9XHUwMDE57WzurODhI1x1MDAwMXom/qhcdTAwMTAz1Fx1MDAxMpvjb/qsV9tcXFxirlx1MDAxZDL3wnAqjrTyNfwpslx1MDAxZSCoVSGhT9pcdTAwMTc8fnyqvUCqU1xiVEV/qr34oaeP2z/RXjxSXFzq9uKJob6svVx1MDAxMHz2oSNQmVVU9ptcdTAwMWY6zqfT1SyzXHUwMDFj0WHaIFx1MDAxNSgzXHUwMDA15lx1MDAxNlx1MDAxY0bSXHUwMDE4tFUolZpTZ1x1MDAxN3ZcdTAwMTjGsTSSNIJcdTAwMTF+yPdMqbZcdTAwMGU1SyBcdTAwMDI2SFx1MDAwMVmc/OZBMlJcdTAwMDRj3/j+ezzGfCCO1LxxhJSghCadXpW0Y0reXHUwMDEypVx1MDAxYkvyioRcdTAwMTZcdTAwMWGFz/NcdTAwMTdccl1cdTAwMGY4ipGI0paG0rxyaldcdTAwMGJcblxmXHUwMDA3XHJAOy81MjFcdTAwMTnUr+UuZiV80TqZ68u0XHUwMDE3olJcdTAwMWNrvIecgzJcYs2PTjtcdTAwMWZcdTAwMGV3XHUwMDEzk9re+Tlv928/87f9rcvVpj1cdTAwMDAgS2uMXHUwMDExtLy0zLWj0+bmXHUwMDAyXHL3xHxz4eq27XSmf9WiXHUwMDA0OYg7JCosqbfiL0iEWUbySlx1MDAxMj9X8VoyXn5gQEZ0tb5recxcdTAwMDfsfzv2b5k89fY7+r9M7Fx1MDAxZZzG+utcdTAwMGZ4uWHuc99cdTAwMDRcdTAwMDfXn5Kj7l433uvgwbHfP+xcdTAwMWb/Yi9NVM5cdTAwMDBrwFx1MDAxNppLkzvZxsCevkkrbVvA5q6F2Fx1MDAxMlx1MDAwNVx1MDAxOVx1MDAwNqy8XHUwMDFh9uBaQEtmpGY8LzeLXHUwMDAwe66aQeZcdTAwMTje2LGQieKSo/o3vTCxU/FcdTAwMDGZP6y/f9Cj5Xohw/JIvapcdTAwMWKWJ1x1MDAwNfqidlx1MDAwNcxscDNp8nPBJ6B7PouuZNkmveSQJkJttGZM1dFNoFx1MDAwM6qYXHUwMDEyXHUwMDA0ivxcdTAwMDRA1Vx1MDAwMluiWdFcdTAwMGUoTjJJoZhcdTAwMGVz5mjaXHUwMDEyo4BcIpWka81k5SbMidxllrNYXHUwMDE5r1wipIWK8V2+V2n4zYN00CpJNK5Iilx1MDAxOeSVXHUwMDBlVWMgyVx1MDAxOXBcdTAwMTCSK0pcdTAwMGU7KVpcdTAwMWGZlfm0MFxuXHRcdTAwMWROtVx1MDAwM6Y6J1xuhWmG+TuwjHSdsIZP7sOvZVJmJHrROJnj5fPWqr+fzHSI9Yujb30pQ1x1MDAxNbmi5m9+zld1K8lzRiqHXHUwMDFiRaRuXHLpXHUwMDE4Nn4oI7V0NNJcdTAwMGa1XGJcdLZcdTAwMWXX8miOOZbwh1ZLPZbyJc1RKORT8/fsXHUwMDExXHUwMDE4yiksJ7VcdTAwMTaWtNhcbp7IXGKhpVxcxolMXHUwMDFkn0sgm1xupXBcIkClXGJohLI8I6rMc8dJwjqG5W/sSaRcdTAwMDJoOTO/NuvMyrv830TGzeKctfvnb7hJcpTLt9F2UFx1MDAwNlx1MDAwNu17wVpOcuNb4F+9mY+QtfslzVnDL1Ly+9r3/1x1MDAwM2+zhmUifQ== - - - - - Astate: restartingpriority: normalBstate: pendingpriority: normalCstate: pendingpriority: normalDstate: testingpriority: high \ No newline at end of file diff --git a/assets/file.excalidraw_(11).svg b/assets/file.excalidraw_(11).svg deleted file mode 100644 index 6350f9b..0000000 --- a/assets/file.excalidraw_(11).svg +++ /dev/null @@ -1,21 +0,0 @@ - - - eyJ2ZXJzaW9uIjoiMSIsImVuY29kaW5nIjoiYnN0cmluZyIsImNvbXByZXNzZWQiOnRydWUsImVuY29kZWQiOiJ4nNVa21LbOlx1MDAxNH3nK5ic19bV/dI3blx1MDAwNXpcdTAwMDFaOITpaYcxtpKYOLaxXHI0dPj3s2VC7DhcdTAwMTdIgJLSXHUwMDE5mliytSWttddeMr9XVldcdTAwMWJ5PzGN96tccvPLc8PAT93rxlx1MDAxYnv9yqRZXHUwMDEwR9BEiu9ZfJl6Rc9OnifZ+3fv3CRx2kF+XHUwMDE2x13Hi3t3t5nQ9EyUZ9DxP/i+uvq7+F1cdTAwMTkoNV7uRu3QXHUwMDE0N1x1MDAxNE3lWFKz+tW9OCrGxVpwyYhcImWPINuE8XLjQ3PLXHIzU7bYS40tdeX1mzRccvz2R9Nf23bPm/thOWwrXGLDw7xcdTAwMWZcdTAwMTZhZTHMvmzL8jTummbg51x1MDAxZDt27fq0u9L4st2JTGanj4ZX48T1grxvn4PKq3drUO33XHUwMDBivlx0QVx1MDAxY6Ip01x1MDAxMinOJabDVns/pcpBgmuhXHUwMDE1w5yRymrdXHUwMDA1tlx1MDAxMYdxalx1MDAwM/tcdTAwMDdcdTAwMWL7r1xm7cz1um2IL/LLPi73VatV9rm+ny7hXHUwMDBlY4hcIimwxEryMoqOXHTanXzQhyhcdTAwMDVRaio5Qqjsk5liRzDGUlwijjVcdTAwMTm22Fx1MDAwMJJdv0DHz3JcdTAwMWZSt2d27S3RZVx1MDAxOFZcdTAwMTcz8lx1MDAwN4t5j6JcdTAwMTJHdHDltpyh7b9Vx19cdTAwMTWDIzjMza98OPVcbmjMsTzX7HR/T52eXrL2vmh5m1lj2O/2zeTH3t2M20dfXHUwMDBlXHUwMDEwXHUwMDBizyTWdO+bPo9Om59HR7lcdTAwMWbfTdP4uvLcwadyWS5cdTAwMTPfvcM2bFx1MDAwMlx1MDAxN5RcdTAwMGLOkCzhXHUwMDEyXHUwMDA2Ube+ZmHsdUs6rFRcdTAwMDJcdTAwMWXj4cj8q1x1MDAxNORoXHUwMDFhXHUwMDA1qeaIXHUwMDAxJsmjXHUwMDE5OHkxXHUwMDE3YCBZgIF4MVx1MDAwNkriXHUwMDAwdFx1MDAwMfyYMSz4KFx1MDAwMVx1MDAwNXU0PFx1MDAwM1x1MDAxM4VcdTAwMTHw9Cn0y1M3ylx1MDAxMjdcdTAwMDW4TqAg4o5CTFx1MDAwMPnGuSepI+psg2Rg6cqegWwjXHJjrJpcdTAwMDZQiSimXHUwMDE0MTxcdTAwMDdAy6jiKD9cZm6KPC9Grn5we0HYXHUwMDFmQUCBXFxYwLVcdTAwMWbRjyjLIYb3q7nJ8iBq/4iSNIhT2GtcdTAwMTg3TntuJd3b29bCoG1B3vBgOiZcdTAwMWTBf1x1MDAxZYBcdTAwMDJcdTAwMGU79Fx1MDAwMt+vapRcdTAwMDexuEFk0t3HSFx1MDAwYoTQXHUwMDBlXCI3PFosVFg5szPMtFx1MDAwZeZcdTAwMTVcdTAwMWNlxrZaoOqZ/J6ps7qyynWdXHUwMDA1llNcdTAwMGVcdTAwMTh/vM5+PNjrfz95e3xcdTAwMTJcdTAwMWZcdTAwMDbrcSt0v3S9b8uts1xuK4drhVx1MDAwNOGYXHUwMDEzrtSr6CwlXHUwMDBlRUIwXHUwMDFiXHUwMDA2XHUwMDFh0dBcdTAwMTGdpcKqMNPFz1x1MDAxOPMhVUErRqqc4d+gsyfpXHUwMDFhuVL55e7RcW+H70T/xptnR39AZ2c+t5V/XHUwMDBlNrfiLP602Vu7+uzfdFSTPIt+2/RcYnvM5Uvrt1J0KrVcdTAwMDWnXHUwMDA0cTFcdTAwMDe1J+/SUlx1MDAwYrii2FFMI1xugijlXHUwMDE4ta2CM23rKV1Qe3kknGPMOFx1MDAwMaq/poTPi9GnSfj6K0r4XHUwMDAzqlWX8DlDfVx1MDAwZVx0v0sxkzhOdf3qkONMK1wiXHUwMDAxR4/n+OxkuqTybetwhiHZKUCsXHUwMDFl5ThDzMFcZmFGXHUwMDE1tGFWj6vkeFx1MDAwYilcdTAwMGahxTlOkIOkZlx1MDAwMlx1MDAxMlxyZVRSVVwif0h15FxiipVAilxipMFcdTAwMGXjSspcdTAwMTkoOFx1MDAwN7svlVx1MDAxNC+i4ORhXHUwMDA1n5VcdTAwMTBcYlx1MDAxN1x1MDAxNZ8zR0JcdTAwMDCupPl6XHUwMDEw+cCU0cBcdTAwMDaHRI+hoV1cbjexe+pcYthryaDaIZDcXHSt9GjF3qWdx1vYXHJcdTAwMGVlkoBKiiEtmWB6bPIm8lx1MDAxZlx1MDAwZWp2eT9cZko42Fx1MDAwMoxrpFx1MDAwMJFMylx0QUFMXHUwMDEwiraeXHUwMDExXHUwMDAwIIG6Uo1cdTAwMDVcdTAwMTW6Wb5cdTAwMTH3ekFcdTAwMGWLf1x1MDAxMFx1MDAwN1FeX+RiNddsQuhcdTAwMTh3XGZcdTAwMDEwqWpbPXMk9omjtVr5abXkVvFl+Pnnm4m9305cdTAwMDd90TyO9/KJK9X/XHUwMDE3ci5UTk99IF6UcUXKbPBQ6ru5QenF9lx1MDAwNZfN/VxyuS2882b3prncqVx1MDAwZuDjIFhcdTAwMDXrXHUwMDFihCSqdkDxvM6l1TKe1pNOXGLtMDbH3nmTXHRcdTAwMTVcdTAwMGXA3SGcXHUwMDBixiWAgFNcXFmie+PCmVx1MDAxMvBTxrhcZsblyU5gisN4yFx1MDAxMPmd3jf3YOvr1ulOsHPs8n53+8Sdz2AgjirV41x1MDAwYlx1MDAxOVxmhqaf0XPEuCBsXHUwMDBlgzF51kttMLTSXHUwMDBlXHUwMDE2goCFUIhcdTAwMTJSO6NcdTAwMTdg6jVcdTAwMTBcdTAwMTQq+Vx1MDAwMvnLYzAwVVgzXFxNXG6v4DDmXHUwMDA16dNcdTAwMWPGRqVsT0yh+3/MYTygLnWHMWeoL+owMKayfnn4XHUwMDFhQCpcdTAwMDFV9+NVdnYuXVKVRZZbxJ7NQS1DR1VcdTAwMTZ0z1x1MDAwMemSnFAsJatU/c9tMMDnMKXtcnP7po2TXHRHhMiBIJmEqstcdTAwMTa8hI5cdTAwMWZcdTAwMTFSbUsj/FwiL+KW3F7M5uCwkpdcdTAwMGWhVDDCpYKQpnhcdTAwMGJcdTAwMDWmglJI7lxuykyovlx1MDAxNvNcdTAwMTaPNDxcdTAwMDJ2VUNqV0JLXHUwMDBlXHUwMDBlYpK3sEFB/SW4XHUwMDEwiil7oiXHg/qrvMV0vFx1MDAxN83jUH9Ob1x1MDAwMVx1MDAxNm5azlx1MDAwM78jNVZzvPrcO6LbW1x1MDAwN1x1MDAxZm+6wrvY48mH759u0NflTnqw6Fx1MDAwZVx1MDAxNFxijGlGmGS0/vKTXHUwMDExsHaQXHUwMDE5rYhcdTAwMTM9w1tMTGlzpz5cIqaXN1x1MDAxOEwoXHUwMDAx3JNKeTUocjRBXHUwMDFjeICe4Vx1MDAxOHUhVzHrXHK+tH/FMneBvjLgS8NNkkNbXHUwMDFmXGZDg6lcdTAwMDX+oCYqXHUwMDFm07hcbsz1+uzlXlx1MDAxOczBXHUwMDAy0Vx1MDAxNHO9Xbn9XHUwMDFmdUzq3yJ9 - - - - - Astate: testingpriority: normalBstate: testingpriority: normalCstate: pendingpriority: normal \ No newline at end of file diff --git a/assets/file.excalidraw_(12).svg b/assets/file.excalidraw_(12).svg deleted file mode 100644 index 49c4121..0000000 --- a/assets/file.excalidraw_(12).svg +++ /dev/null @@ -1,21 +0,0 @@ - - - eyJ2ZXJzaW9uIjoiMSIsImVuY29kaW5nIjoiYnN0cmluZyIsImNvbXByZXNzZWQiOnRydWUsImVuY29kZWQiOiJ4nOVaXXfaRlx1MDAxMH33r/Chr7G6s9+bN/xcdTAwMTHXJ6mT1Elqt+nJkUGAaiEpQlx1MDAxOOOe/PfOXG5cdTAwMDdcdCFhjMHBLbYxaFfa2Z25c+eu9M/O7m4jXHUwMDFkx17j5W7Du2m5gd9O3FHjhT1+7SVcdTAwMDM/XG6xiWbfXHUwMDA30TBpZT17aVx1MDAxYVx1MDAwZl7+/LNcdTAwMWLHTtdPL6PoymlF/clpXuD1vTBcdTAwMWRgxz/x++7uP9l7YaDEa6Vu2FxyvOyErClcdTAwMWZLgyhcdTAwMWY9jcJsXFwwoLRWXFySaVx1MDAwZn9wiOOlXlx1MDAxYps7bjDw8lx1MDAxNnuocTG8Vlx1MDAxMb+ln15ftvTB+Fx1MDAwM3nb/+NrPmzHXHUwMDBmgrN0XHUwMDFjZGZcclwinH3eNkiT6Mr73W+nPTt26XjdWUk07PZCb2Cnn5tcdTAwMTnFbstPx/Y6JD86WYNiv1x1MDAxYrtcdTAwMDBcXDtGaSG04IRxTqet9nyuuKNccuFaXHUwMDExJlx1MDAwMIRcdTAwMTQlw1x1MDAwZaIgSqxhP4Fnf3LTLt3WVVx1MDAxN+1cdTAwMGLbeVx1MDAxZle0daeT91x1MDAxOX2fLlx1MDAxM46mUlx1MDAxOFx1MDAwM1x1MDAxNIhhSk+79Dy/20uzPuBcYuBSaSNcdTAwMDRRXHUwMDFhcktcdTAwMDde5lx1MDAxMbSPSU1B5C3WgPiknUXHX7lcdTAwMWZcdTAwMTK3753YU8JhXHUwMDEwXHUwMDE0XHUwMDE3M2zfLeb3KMrjiN1cdTAwMWT5ls/Q9j8qx18xXHUwMDA2Z+Iw9W7S6dRcdTAwMGJBXHUwMDEzNdvH5lxya7Y77/bi66/Bq1x1MDAwZYO4Me337UX1ZScnu6ed8Wt6cMrOP/wmLoZ/XHUwMDFm0lt4PzvK9/HdJIlGhevefcqXZVx1MDAxOLfdSWyDXHUwMDAygVx1MDAxZVx1MDAwNy2oUtP2wFx1MDAwZq/Ka1x1MDAxNkStq1x1MDAxY1x1MDAwZTtcdTAwMDWD53A4M/9cdTAwMDJcdTAwMDSVMvVcdTAwMTBUwjCqXHJdXHUwMDFhgtWruVx1MDAwMlx1MDAwNOlcblx1MDAxMITVICjBUVJqobGvNlx1MDAwMDNcdTAwMTBcdTAwMTSEOZorXHUwMDBiXHUwMDBmwD+uXHUwMDFlg8A0ccNB7CZcdTAwMTixXHUwMDE1KFx1MDAwNOJQMY88xVx1MDAxY1nGmkCsKkWAPFx1MDAxZWozXHJzmLovPOVcdTAwMDPCM7cqXG7TM/82XHUwMDBiMTlz9JXb94PxjPuzuMW1a35cdTAwMGU/h4NcdTAwMTRteLmbeoPUXHUwMDBmu5/DOPGjXHUwMDA0XHUwMDFkjeNGSd9ccnZcdTAwMWIz5zVcdTAwMDO/a2O80cL5eMlM+Kc+XHUwMDEy4LRD32+3i1x1MDAxNNVCY1xcP/SSk2WYXHUwMDA1bej6oVx1MDAxYnxY0VZcXDvvl2mmdVxupHjpXHUwMDBlPNuKx6VZiO+FPGtcbum6XGZygVx1MDAwNCMw7JdcdTAwMDe56ERN+vGLSsKLw18v+VH/9rZ7td08XHUwMDBihGlHSsm1UFpyljPck/Isc1xmNYaB5ERSTC3VPKuYQFx1MDAxM1x1MDAxMF+Cyzw5f6dZXHUwMDA1XHUwMDFjXHUwMDE0XHUwMDA1liPnOdCsXGJH8rc3be72hoF834bu+Nq9eVx1MDAwMppdeN3zg875sVx1MDAxYVx1MDAxZPU+8qMvhvy6f3Peaq2FviW6XHUwMDEw0I+6iPFN0DeyVy2ypY1kQvXyXHUwMDE1dLWXtpq+gVxi6ihqXHUwMDAwi1IhlSBiXHUwMDA22pa/jUD6NpKinjDllLNG/iZYRVx1MDAxMy61WpLEjaJcXEqs+n9cdTAwMTSJr1x1MDAxMqSPI/H9ZYhxQ1x1MDAxY35cdTAwMGZrlTn8gaaug8InOaZSJteCXFyBjTnQeVx1MDAxMN2H8cXJdMNcdTAwMThfmb1cdTAwMTHFXGJiXHUwMDAwKXCyOSlmXHUwMDEw58xRmkjFXHUwMDE11bhcdTAwMWP1XHUwMDEw71x1MDAxMN1cImR1iONAREnBNNEojFx1MDAxNM3FwFx1MDAxNOhcdTAwMDSVtLGYUlx1MDAxYzDhXHUwMDE4yDl+SuBWS1x1MDAxME3XgPt5XHUwMDAyp/dcdTAwMTP4RtJcdTAwMDFcIiVJ9/2wjTiZNexui2hcdTAwMTlcdTAwMTDapXBju9CO1MwuISOUXHUwMDFiQlmhRydqXHLtPPaIQ4QyWFBpMHbbXHUwMDAxy7u5yXth+36jXHUwMDE2V/dTo6SDkWpsvaw1MJ5L84JRaFx1MDAxMzHKXHUwMDEwXHUwMDAylEpkJIqd54xcbtxBelx1MDAxMPX7foqL/y7yw7S8yNlqNm066HnuXFxcdTAwMDTgpIpt5bxcdTAwMTHbK86Wavmn3Vx1MDAxY1rZl+nnv15U9t6rjfmsdT7c81x1MDAwYu5cdTAwMTT/ryRbUJLX5T2K4lx1MDAxN1x1MDAwNTt9gGxcdTAwMTlHwdnfsHdzTdzjY/WFXHUwMDFkXHUwMDA0zfdsy2VcdTAwMGJcdTAwMDftUCtbJEhcZqvCdtBcdTAwMDZ0S6fjtYyp0i1cdTAwMWF1i8RcdTAwMWFKMKjaokDXO1x1MDAxOFx1MDAxZUJcdTAwMDFFc7BjYYUmSY9qJjVRfCM5b2OipX9z2Dvg5KJcdTAwMTW0P42/nv7hvU7PPi0rLo5cXFx1MDAxOFxyz49Et7lvgt570fVGg1+e196gJLXbXHUwMDA2jFwiWUj2XHUwMDAwbVG9mNutLThDgFFMelhmMcHoLPxcdTAwMDRcdTAwMDFHMjxKXHUwMDExm1x1MDAxMrX6o+C3XnGB6Vx1MDAwMmlcdTAwMTPx+sPUxSpR+jh1cVAo2WMvY/0nU1x1MDAxN/eQS1ldPNDUjapcdTAwMGJgovZcdTAwMTaA1LZcIsSie2mYL95T2U55QcE4xGA2XHUwMDAzwEKGleRcdTAwMDUzXHUwMDBlQ4RcdTAwMDOnmmJ1p0t2rU9eMOlQhplcdTAwMDb5XHUwMDEz3yvlXHUwMDA1+lx1MDAxZYAxjlx1MDAwNYFhKHnm1Fx1MDAwNSWKXG5jXGJsZHtwy9VFfNa6XHUwMDFj9mNNP7JO6sGr8Tt6JedcdTAwMGJ5XHUwMDAwXHUwMDE0k8CMlFxuhZjRskpeYCWvbYkpKKZ+itld8NXUxZKSx979sVtUgKRcdTAwMDNcdTAwMWErOl5tXHUwMDE0I4CFXHUwMDE2VmKSo8Ig7JnLi9qYt6+5aF+nuKCy/tlcdTAwMDMkL0pccs9j9b6stzjytlNbUFx1MDAwNijt7EtLKrje6D2Rem3BXHUwMDFkXHUwMDE0+Nr6d3JXpEpeXHUwMDE4h1x1MDAwYvRcdTAwMDdow61cdTAwMDQppOBJ1pNcZucrTKHhOchcdTAwMGKqTuImXHUwMDA0+vjrJ9lcdTAwMTm9oX46ulx1MDAxOD/BvYv/umyhitRcIltcdTAwMGKmKebY5aFd7aXt1i1UXHUwMDEwh9tcIlx1MDAwMFx1MDAxMSOUmH2qSFx1MDAxMOpoQLWijKZY89DNPdNgpJM9K7SsaEGrsMrS4oc/1/B0ouUwV1x1MDAwMlx1MDAxNUKgh6u1IcVyXHUwMDBmZZVcdTAwMTXLQ+zcrFxcwTKhnrhcdTAwMDVTjEu2/K7g4ny3nXqFI4JcdTAwMDVWiowrI60uKVx0XHUwMDE2hJamXHUwMDE0yyZkS8NYybD1XHRcdTAwMTaqXHUwMDFj++SIloIyzlSlYqGorFxml1x1MDAwNKnbYGdl5iSLvXXDsOX/qFhcdTAwMTZvXHUwMDFi5OJAoEdcdTAwMDXWYVgqK8nzu9iz2oBcdTAwMTPFXHUwMDAxKLe3Qlx1MDAwNGqIubkvJViWVFHG3urATGk9Lyrs2UNBwyRcdTAwMTGoru3OXHUwMDE511ihzVx1MDAxOfSstEp9uE+a51wiPb/iTvH/g1NeIVx1MDAwNMtcdFx1MDAwZlx1MDAxNSqTXHUwMDAy+PL3f9Xbq7fvXkt+dFx1MDAxMZ6mhydcdTAwMDE9XHUwMDFjXHUwMDBm+lue8KhhXHUwMDBls6lcZnOaMjD7iFx1MDAwN5P2XHUwMDExXHUwMDBmzrTGhGd/N5bvUC+hc0FcbsJMtVTRiELFrInlLtNcdTAwMTJHSSCGsI08Jf24bMclonlcdTAwMWTZrlxm0ZqW51x1MDAwNP4619vXnNPrcL9zd/mGXHUwMDFix2e2lJo6XHUwMDExg8Bv39WO+Vx1MDAxY1x1MDAxYte+N9pfXHUwMDFjpDt33rbA9bKo+Lbz7V8w10FSIn0= - - - - - Astate: testingpriority: normal Bstate: testingpriority: normalCstate: pendingpriority: normalDstate:pendingpriority: high \ No newline at end of file diff --git a/assets/file.excalidraw_(13).svg b/assets/file.excalidraw_(13).svg deleted file mode 100644 index 90b98f2..0000000 --- a/assets/file.excalidraw_(13).svg +++ /dev/null @@ -1,21 +0,0 @@ - - - eyJ2ZXJzaW9uIjoiMSIsImVuY29kaW5nIjoiYnN0cmluZyIsImNvbXByZXNzZWQiOnRydWUsImVuY29kZWQiOiJ4nOVaa1PbyFx1MDAxMv3Or6C4X4N2nj3T+UaAJZC7XHUwMDBlu0BCcrNFXHRZtoVlSZFFeGzlv9+WXHUwMDAwS5ZcdTAwMWZcYmxcdTAwMTKnNlRcdTAwMDWjXHUwMDE5aXpm+pw+Z6x/1tbXN7KbxN94vb7hX3tuXHUwMDE4tFP3auNVfv2bn1x1MDAwZYM4oiZR/D2ML1Ov6NnLsmT4+rff3CRxukF2XHUwMDFlx33Hi1x1MDAwN3e3+aE/8KNsSFx1MDAxZP9Hf6+v/1P8X1x1MDAxOSj1vcyNuqFf3FA0lWNxbm39ciuOioG50qik5jDqXHUwMDEwXGZ3aLzMb1Nrx1xyh37Z0i6Giv9kX8//kvxcdTAwMTS7R9s7vXct9+q2XHUwMDFjtlx1MDAxM4ThUXZcdTAwMTNcdTAwMTZhXHJjmn3ZNszSuO9/XGbaWe9hXHQq12fdlcaX3V7kXHUwMDBm8+mz0dU4cb0gu8mnwMqrd2tQ7XdNf1mlXHUwMDFjqUBqkMiRgVx1MDAxObXm9ytcdTAwMDFcdTAwMGWAsJZbzTmCrMW1XHUwMDFkh3Gax/VcdTAwMWbu5z9lZOeu1+9SeFG77NPpeOhh2efqfracRpFKXCIqrlx1MDAxOFxihWrUpedcdTAwMDfdXlZsXHUwMDE0OFZIQESKxHAsI/GL/aCNZIJasdyufPxkv13kxt/lLqTuwN/Pb4kuw7C6lFH7filcdTAwMWZyqMxcInl/5Xs5wbz/bj37qlx1MDAxOTiWhZl/nY1mXkmZv77e2Nt+q+19+upvJrcnScD8wcao3/dX01x1MDAxZnt38+fedlx1MDAxMr6/Rjg+UCFcdTAwMWO2h1x1MDAwN62TaHyUh/HdNI2vmj5cdTAwMTc/XVx1MDAxZHXOXHUwMDA2rtv+8HknOt9vXVx1MDAxZp/2mz33/lO53JdJ271DXGbtmjZMSsG0LVx1MDAxMylcZqJ+fS/C2OuXIFurXHUwMDA0PIHusXWtXHUwMDAyW3I+XHUwMDBi2Fx1MDAxNlx1MDAwMFx1MDAxNUUhXHUwMDFhXHUwMDAze/ou/SBg8+dcdTAwMDFbXHUwMDBiR1pcdTAwMGVSXHUwMDE5Llx1MDAwNEcxXHUwMDBlbGVcdTAwMWRhlJHcXG4wXHUwMDA0rEWAnaVuNEzclJAwXHUwMDA13JzRSFx1MDAwNG+jJ1FtpFx1MDAwM1x1MDAxM0BmXHUwMDEywVx1MDAxOG7Y4kBcdTAwMWVrmEDsMpO0jCqOsqPgtiggMHb1d3dcdTAwMTCEN2NJUGQvreDWl+hLNMwohtfrqU9cdTAwMWbSLIi6X6IkXHLilHacho7TgVx1MDAxYm6M3blcdTAwMTVcdTAwMDbdPNc3PJqRn47BIFx1MDAwYqi8jjpcZoJ2u1pcdTAwMDA9XG7HXHJcIj/db1K3KIRuXHUwMDEwueHxs6Ol9fPfjqjc4bqSTkM/b6XrgHORPreOIzP1q6MyLrQwXGYtlPn0XHUwMDE43KOtPa+bXHUwMDBlszPTOlHZvnv2bvPs42rXcc6EdoBZxlx1MDAxNacqrlm5xC9RyH1cdTAwMGanXHUwMDE2cnQ4lXFcdTAwMWGIpITQrFx1MDAxY6ZSyI1cdTAwMDOcadBcdTAwMTSKVjBZyFxyciN4lZ5XoZDfZYbb+mNL7YdcdTAwMTd7b9lcdTAwMTFcdTAwMWJsf7jBy/fXXHUwMDBiVdzHXHUwMDA0wsXR4UX7JIKbP3B3863i9uK8Z3+2QLiN4eP7oFx1MDAwNcZTO97xx9+7Z1wiSZYnXHUwMDEwSFx1MDAwNaoqd7yEQLAgZjGGJlx1MDAwMIFcdTAwMTFMNSaM6Zu00vqAyqxxJNOKcVx1MDAxMs6swlxiXHUwMDBm+lx1MDAwMIDkg1IkzFx1MDAwMe1cInwxX1x1MDAxZjDtWKbANtVcdTAwMDdGXHUwMDFh4lx1MDAxMCHV4vSwgDx4YoouJlx1MDAwZt5UXG5u4kftXHUwMDFmqVxyXHUwMDFlqYV1bfDEUJchXGbuXHUwMDE4ZppcdTAwMDdcdTAwMDAu65dHXHUwMDFlQCnBSVx1MDAxYjc39/O5dFVFXHUwMDAxd4QyXHUwMDAyXHUwMDE4Q1x0tqy1XHUwMDA1xq12XGbjqLVcdTAwMTSWXHUwMDFi0LW4KvWeWY+x52NcXJJcdTAwMTVcdTAwMDGGjFlzJ1xmJpHOXHUwMDFjIJdAYSAjyrGmskj3sGeKJJzFXHUwMDE3UVx1MDAwNeJxVfBcImxQaOc3QVx1MDAwMZPxwO7PtZpgMF9cbjfJNzRfZZMzumTAdWnqXG6G8S7zeWwyh9FuUyejczVIPlx1MDAwYtTE7Fx0uI9HNd81jKLiljQhJaBcdTAwMDUrSFx1MDAxMqIxs8JcdTAwMDKllTBUdLhcdTAwMDXabj1cdTAwMTFW6Fx1MDAwZbPteDBcYjJa/8M4iLL6Olx1MDAxN1x1MDAwYrqVXHUwMDEzQs93J5KAplVtqzNHkj9xXFxcdTAwMDKWn9ZLbFx1MDAxNX+MPv/9amrvzZlJX7RO5Hv5vLXq72e5IaVY/erIXHJcdTAwMTHp5fJcXDcnvkvW2/NOQ9v6cHIhPptMe35PrDjxkaBxXHUwMDA0cpBcdTAwMDK0XHUwMDEwVrzsseYsN0SA5KSelJ1++sG5clx1MDAxMK0xKDDXP1x1MDAxNbv04IWEzfNcdTAwMDfZXHUwMDEyzkKW7oVcdTAwMTYwXHUwMDAxz/RCyt/P4naAXG5PLt9ccnd9XHUwMDE57WzurODhI1x1MDAwMXom/qhcdTAwMTAz1Fx1MDAxMpvjb/qsV9tcXFxirlx1MDAxZDL3wnAqjrTyNfwpslx1MDAxZSCoVSGhT9pcdTAwMTc8fnyqvUCqU1xiVEV/qr34oaeP2z/RXjxSXFzq9uKJob6svVx1MDAxMHz2oSNQmVVU9ptcdTAwMWY6zqfT1SyzXHUwMDFj0WHaIFx1MDAxNSgzXHUwMDA15lx1MDAxNlx1MDAxY0bSXHUwMDE4tFUolZpTZ1x1MDAxN3ZcdTAwMTjGsTSSNIJcdTAwMTF+yPdMqbZcdTAwMGU1SyBcdTAwMDI2SFx1MDAwMVmc/OZBMlJcdTAwMDRj3/j+ezzGfCCO1LxxhJSghCadXpW0Y0reXHUwMDEypVx1MDAxYkvyioRcdTAwMTZcdTAwMWGFz/NcdTAwMTdccl1cdTAwMGY4ipGI0paG0rxyaldcdTAwMGJcblxmXHUwMDA3XHJAOy81MjFcdTAwMTnUr+UuZiV80TqZ68u0XHUwMDE3olJcdTAwMWNrvIecgzJcYs2PTjtcdTAwMWZcdTAwMGV3XHUwMDEzk9re+Tlv928/87f9rcvVpj1cdTAwMDAgS2uMXHUwMDExtLy0zLWj0+bmXHUwMDAyXHL3xHxz4eq27XSmf9WiXHUwMDA0OYg7JCosqbfiL0iEWUbySlx1MDAxMj9X8VoyXn5gQEZ0tb5recxcdTAwMDfsfzv2b5k89fY7+r9M7Fx1MDAxZZzG+utcdTAwMGZ4uWHuc99cdTAwMDRcdTAwMDfXn5Kj7l433uvgwbHfP+xcdTAwMWb/Yi9NVM5cdTAwMDBrwFx1MDAxNppLkzvZxsCevkkrbVvA5q6F2Fx1MDAxMlx1MDAwNVx1MDAxOVx1MDAwNqy8XHUwMDFh9uBaQEtmpGY8LzeLXHUwMDAwe66aQeZcdTAwMTje2LGQieKSo/o3vTCxU/FcdTAwMDGZP6y/f9Cj5Xohw/JIvapcdTAwMWKWJ1x1MDAwNfqidlx1MDAwNcxscDNp8nPBJ6B7PouuZNkmveSQJkJttGZM1dFNoFx1MDAwM6qYXHUwMDEyXHUwMDA0ivxcdTAwMDRA1Vx1MDAwMluiWdFcdTAwMGUoTjJJoZhcdTAwMGVz5mjaXHUwMDEyo4BcIpWka81k5SbMidxllrNYXHUwMDE5r1wipIWK8V2+V2n4zYN00CpJNK5Iilx1MDAxOeSVXHUwMDBlVWMgyVx1MDAxOXBcdTAwMTCSK0pcdTAwMGU7KVpcdTAwMWGZlfm0MFxuXHRcdTAwMWROtVx1MDAwM6Y6J1xuhWmG+TuwjHSdsIZP7sOvZVJmJHrROJnj5fPWqr+fzHSI9Yujb30pQ1x1MDAxNbmi5m9+zld1K8lzRiqHXHUwMDFiRaRuXHLpXHUwMDE4Nn4oI7V0NNJcdTAwMGa1XGJcdLZcdTAwMWXX8miOOZbwh1ZLPZbyJc1RKORT8/fsXHUwMDExXHUwMDE4yiksJ7VcdTAwMTaWtNhcbp7IXGKhpVxcxolMXHUwMDFkn0sgm1xupXBcIkClXGJohLI8I6rMc8dJwjqG5W/sSaRcdTAwMDJoOTO/NuvMyrv830TGzeKctfvnb7hJcpTLt9F2UFx1MDAwNlx1MDAwNu17wVpOcuNb4F+9mY+QtfslzVnDL1Ly+9r3/1x1MDAwM2+zhmUifQ== - - - - - Astate: restartingpriority: normalBstate: pendingpriority: normalCstate: pendingpriority: normalDstate: testingpriority: high \ No newline at end of file diff --git a/assets/file.excalidraw_(2).svg b/assets/file.excalidraw_(2).svg deleted file mode 100644 index 56270f4..0000000 --- a/assets/file.excalidraw_(2).svg +++ /dev/null @@ -1,21 +0,0 @@ - - - eyJ2ZXJzaW9uIjoiMSIsImVuY29kaW5nIjoiYnN0cmluZyIsImNvbXByZXNzZWQiOnRydWUsImVuY29kZWQiOiJ4nN1aW1fTSlx1MDAxNH7nV7B6XnWc+8U3UOSiKFxignJ0uUI6trFpXHUwMDEyk2BcdTAwMDFcdTAwMTf//eyE2lxcmpZcdTAwMTZbKadcXNrOffZ8+/v2TvJrbX29lV5FtvV8vWUvXcf32rEzaD3Jyn/aOPHCXHUwMDAwqmj+PVx0L2I3b9lN0yh5/uyZXHUwMDEzRajjpedh2ENu2L/tZn3bt0GaQMN/4fv6+q/8f2mi2LqpXHUwMDEzdHybd8irirm4rFx1MDAxN75ccoN8WoqZUpTD36iFl7yE6VLbhupvjp/YoiYralx1MDAxZJx5vW1xvfum/THccd+fXsVcdTAwMDH+WMz6zfP9o/TKz1eVhLD5oi5J47BnT7122oVaUiuf1CtcdTAwMGUvOt3AJkmlT1x1MDAxODmul15lZVx1MDAxOI9Kb03wfL0ouYRvXGZcdTAwMTPEuWCEilF51pNcdTAwMTKOjIT+XHUwMDFhbGCMZqK2pFx1MDAxN6FcdTAwMWbG2ZL+sca69luxqHPH7XVgZUF7cpvBcKPSXGLEJTZMXHUwMDE143et1+mm2dkwgbRcdTAwMTBcXOpSbWJz8ysljVx1MDAxMZhcdTAwMTXHl01cdTAwMTnttnMgfClsXHUwMDFlO327m/VcYi58v2y4oD00XFyl4jyr2Cphqlx1MDAxOOpcImo7t2dPXHUwMDE0XHUwMDExXG5zeFx1MDAxMVGY3feCXn04P3R7XHUwMDA1XFzy0psn94ApY3xcIk650HCAXHUwMDFjz1xmU4I/bIfOpW82XHUwMDBlIzpcYt5HXf+Hd1x1MDAxZpjSv1x1MDAwN1OukDBKXG5Zx6mhiMB5VCBSXHUwMDAzKLHZz1SAprFcdTAwMTMkkVx1MDAxM8Opj4OUXHUwMDEwhvAtXGZx/lwi41glRCOpmWKSXHUwMDE4eEnF64hcdTAwMTWGalwiKdV/jtjf2CnQw4YlN5OBPOpT9C6hL7WXxc5LWDk4d8/cjuB7+4cnXHUwMDE39il75fW7rDVqd/Okedjbznb77PpcdTAwMTJ36WvZa5++2t/fP/z+2q3O8nt+J47DQWnc4afJ3lx1MDAwN4yhjOaL8r7K/kuOJ1xineR4QmudXHUwMDAxUs/seM3GXFxtx9PgX4xqXHUwMDBlQlx1MDAwMVx1MDAxNtek4n1cZlx1MDAxOJpcdTAwMTEs4Vx1MDAxZChcXFx1MDAxOa6W5oScIThuRpiRXHUwMDFj3pv0glFEjdScY0KhhZZ1XHUwMDE3NFoyTWhcdDV/XTSYNFRgM1x1MDAwN2yLVYVBeuRdZ2dEKDJAeUxJzI1cdTAwMTKE80qrV07f868qZ57jXHUwMDFibLzxOUhtknpBp1Wp2/C9Tob3llx1MDAwYnuwccVcdTAwMTVSXHUwMDBmXCK1UYO+126XRcqFXHRcdTAwMWQvsPHuLOJcdTAwMTLGXsdcdTAwMGJcdTAwMWP/eMp6wCZ2Z0SsqMT4505is9rMXHUwMDA11Fx1MDAxZqhpXHTGdTVlXHUwMDE4U2NoYdC7nPqCc/+r89S+ZZdq75K8ky5cdTAwMGJ/rrZTXHUwMDAzXHUwMDAw0ZiOMkSUkauvo6ChXHUwMDAyQlKBXHUwMDFml5Bublx1MDAwNZ3tkzdX/a1TdVx1MDAxOWF83Ll693pWId08+P6dXHUwMDFm7L75XHUwMDE4XHUwMDA1akdccjyr9914cULKXGaZh5HuJ6Ryos9cdTAwMTHDNVx1MDAxNVrp2UPYZmuuttOBgiktiJZKNimpRIB68kiUVCpQ0eznQZVU4ZLV/7aSbi5bSe9cdTAwMTCWupI2rWfpSsqxqJeOvFpiOmd8XHUwMDFjn+xF/vZXonr750fH+vjNp4/do9X2anBcdTAwMDTEXGZ+vGkpZKRUgKZcdTAwMTbbelxmarp/XHUwMDE2bjFzXHUwMDAw42yRl/RtcN7rhN6sanpGdn98ekE2Nt5FXHUwMDBlP0526EnqJ1x1MDAwYlNTriVlS1dTPfF6XHUwMDEwYZxqhs1cdTAwMWNcdTAwMTeEmq250n6nsESQSWlwskY1fVR5KVx1MDAwMSVT2V5cdTAwMWXwauZDJ6Yvli2nd4hLXU6b1rN0OVx1MDAxNUrVS3+7tYKclGeR8sxevXPcbu9t41x1MDAxZlHn+PVAuNHhh83dT6vt1ZpcdTAwMTJEXGbhY3rKXHUwMDE1RlQ+XG5BZZpcdTAwMThGXHJ+XFyCOui8IzvXhzS+erlx/vPD7lx1MDAxZVx0T/b+gqAuK+2dOu7yr0svPVx1MDAwMFBi4o1LgrVcdTAwMTBSkjnS6ebTX22qXHUwMDEwXHUwMDEyKVB3Q5TmUupcbl1cYvBkLiCszSVZmeVxXHUwMDA2J7ngwUxcZktcdMR1XHUwMDFm+SdSKYL5Q97MhLiVwP9cdTAwMDeT/5efg4HjLVH+71DDuvw3rWdcdTAwMTHyf8snXHJcdTAwMGUtJyo/RIdUak3p7P48nZBX058xRlx1MDAxYzY6dodXUIU0yW5AiUxxS4nPon1cdTAwMTlS+eZcdTAwMDdcdTAwMTHAN5FmXHUwMDE0V56SXHUwMDE4Oa+SXHUwMDEyg1x1MDAwYlx1MDAxNaZaoN7Tu/V+kbloybROnG56QTtzgMrChs/bzOJUude7XHUwMDE32SqfYqSUwlx1MDAxNGONpTZYXHUwMDE28pVcdTAwMTnMibK9Qpssvlx1MDAxYtuyXHLady9lenhfWlxuRoJSXHUwMDAzRyqN4lx1MDAwNlwiTjW2XHUwMDEyoDGiXHUwMDA1XHUwMDA3jc1cdTAwMDJBXCLHT8F3kvRF2O97KVx1MDAxOPwg9IK0btjcglx1MDAxYpm3d60zduqwpXJdnVx1MDAxNqJsxGo0V3xaLzwn/zL6/OVJY+unNVxc52VlSFx1MDAxN1x1MDAwM6yV3+emMIXrhaNb5YYx+J3nXHUwMDAy/9TQbzVcdTAwMTmMQI7CKpdcdTAwMDBy/uJcdTAwMDZpwYSGXHUwMDEwYLn8XHUwMDA10Vx1MDAwNcrcvsxSXHUwMDA1h1x1MDAwMY2S6oNWQ1x1MDAwNoO4hFx0ztgqXHUwMDEy2Ly3ppZJYFx1MDAxMuzLqWRcdTAwMThDWFm6Q1xc0Fx1MDAwNkZmaP/7cdj0K/5cdTAwMTVcdTAwMGVTXHUwMDFhWFxmXHUwMDBiKlxyZYRg2rRcdTAwMTitsdKa/C84rFx1MDAwMdt5eVx1MDAwMetFsVx1MDAxONP1wlx1MDAxMYtcdTAwMTFcdTAwMDNhmJnjgdDpieZqslx1MDAxOJaoKVxuXHUwMDEzXHUwMDFhKSaZMWzJLFx1MDAwNnSERJ5KNdJcdTAwMThcdTAwMDOS5ZQ0PFx1MDAxMkpAzSHfI2ZcdTAwMDFp1OKJbM6H1ZZKZIpxkrFcdTAwMThkz1x1MDAxY1x1MDAxOHacO3h2bV3pe1x1MDAxM9n0h4AqRGa0YUJcdTAwMTJcdTAwMDFpuoJcZliML0YhaVx1MDAxOP2/XHUwMDEwWVx1MDAxM7zzilx1MDAxMrInUdnacNiWXHUwMDEzRUcpIGx0XHUwMDA0gHSvPUzCi721fnp2sDnd/9aGlszIyObQv1m7+Vx1MDAwZmpOplx1MDAwMyJ9 - - - - - AtestingBtestingCtestingDwaiting \ No newline at end of file diff --git a/assets/file.excalidraw_(3).svg b/assets/file.excalidraw_(3).svg deleted file mode 100644 index e29c367..0000000 --- a/assets/file.excalidraw_(3).svg +++ /dev/null @@ -1,21 +0,0 @@ - - - eyJ2ZXJzaW9uIjoiMSIsImVuY29kaW5nIjoiYnN0cmluZyIsImNvbXByZXNzZWQiOnRydWUsImVuY29kZWQiOiJ4nN2Z2VLbSFx1MDAxNIbveVxuynOb6fS+5I4twclcdTAwMDBcdTAwMTlcYsmEqSlKltq2bFnSSFwitknx7nMkjCUvXHUwMDAyQzBxXHUwMDA2qijU3VIv+r/zn25939rebmTj2DbebDfsyHVcdTAwMDLfS5xh41Ve/s0mqVx1MDAxZoVQRYvrNLpK3KJlN8vi9M3r105cdTAwMWOjjp+1oqiP3Ghwe5tccuzAhllcblxy/4br7e3vxd9KR4l1MyfsXHUwMDA0trihqKr0Reh86XFcdTAwMTRcdTAwMTb9SqaN5FjqaVx1MDAwMz/dh+4y60Ft21x0UlvW5EVcclx1MDAxMtJg6GTsbHy22/zrXVx1MDAxZuOjJi17bftBcJaNg2JUaVx1MDAwNJMv69Isifr2i+9l3btcdTAwMTWolNfdlURXnW5o03z2ZFpcdTAwMWHFjutn47xcZuNp6e1cdTAwMTK82S5LRnDFsUFYaGGYXHUwMDEy04r8VrhGVMyNYy9cbqIkXHUwMDFmx2/E5r/lSFqO2+/AcEKvbGONdW27bDOczE5JiiSjmlSe37V+p5vlU2dcdTAwMThRKoWkXHUwMDAyXHUwMDE3P6xcdTAwMWODLVaeUGVcYuOaljPJe46bXiGCf8r1TpyBbea3hFdBUF200Jss2kxFK684qOipfNRV7Dm3750oXCJcdTAwMTTmhDPFS2VcdTAwMDR+2J9/XFxcdTAwMTC5/VIqRenNqydIlFT6mZMoY1QyxplcXFmi159cdTAwMWT39LSjXHUwMDEy/DE9vr48t0e65264RLlCfF6enHGk2EzxXHUwMDEzJJolTpjGTlx1MDAwMi98UaaEKcSUNlhSbvJcdTAwMWa1qNZ72kzUSlx055JcdTAwMGLGn0Gtd7oplcMmJTf1XCKe3lPeXVFeZkfl1Cs6yS76XHUwMDA3X5vxXHUwMDBluehcdTAwMDTjt9fH78PR+15j2u5m8t89hGBuhFLPRcjMOKtwUF5cdTAwMDdcdTAwMDehWFx1MDAxYS7pI1x1MDAwMvjyWW80XHUwMDFkXHUwMDAyMyS4JkIqpmdjasGJMohcdTAwMGJKsDDSQJDna8OFXG5cdTAwMDTvXVx1MDAxOKrmuJzCXHUwMDAyXHUwMDFjXHUwMDFi4JZzRjCMs+K8d5FdcKU5NVj/OCtPjuyP1m05qijMzvzrgkuNXHUwMDE4TEZcYlx1MDAwZa9cdTAwMDZjMtPmrTPwg/HMXHUwMDFiL+Rcciu805gp2lx0/E6u8oZcdTAwMGJcdTAwMDO3yVxmXHUwMDAwmVx1MDAwZvnTtMHA97yqfbjQj+OHNmmuXHUwMDEy9qPE7/ihXHUwMDEzfFpcdTAwMWNcdTAwMDbM31x1MDAxZU7DXTUlaDmpzWtcdTAwMGJcdTAwMWZ6ur3RisPPXHUwMDEzTCBcdTAwMGbQIJfV/e0q7l+Tj6Ms+vfgwv3a41x1MDAxN0fDXHUwMDBmg80mWFKNMDGCLbD7Mlx1MDAxZYdpXHKvyyqnOVx1MDAxOIbIylx1MDAxNSuX6VdwtfHl6SltjVx1MDAwZa6/xO6Q78T48NNcdTAwMTHeRFfT9a4mJFx1MDAwNEnFK9nvQ0wsn/VmM6HzLVx1MDAwMthcdTAwMDQxgFx1MDAwN6RXXHUwMDBipsY4XHUwMDAxZ8dC5tysz9RcdTAwMTTSXHUwMDE4cyUxrDjRNa6msZGCMC3BYGUlIbmDhcMjOGNC/jgsP93WXHUwMDAwekVcdMFGiNVtbXdNtvZAtJ+3td2XtDVZb2tGKUkkfVx1MDAwNMIn/MDbb120902P9YLkQ3h+otubjbAmXHUwMDFjLVx1MDAxZSu8iKdJxJaeLSzUTFx1MDAwMIVdgmbSPFx1MDAwN58vaGZfvVx1MDAwZqp3zsPDcfr7O9+z7k7S/mNcdTAwMDPNjLLaIzaIjNRcdTAwMTCi8OpbtOWz3mxcdTAwMTJcdTAwMTRGXHUwMDAyg/JcdGiPa81miVx1MDAwMDNcdTAwMDObg41cdTAwMTOlRoHHrM/MOOKGMKJcdTAwMTQ3RV6nXHUwMDE3XHUwMDExXHUwMDAxN1x1MDAxM5BbSNi5gLdSiVx1MDAxN91MaYq1pOz/4GaYMSkoZ5RcdTAwMGJcXFx1MDAxMeFcdTAwMDN2trcmO3sgys/b2d5cdTAwMGLaXHUwMDE5w6ZcdTAwMTZiSUFNsG9fXHUwMDFkYvHZfLLpn1x1MDAwN+1B0sVEmrej42FrsyE2RiCtpZpJXHUwMDAyX8zRXHUwMDA0XHUwMDAyvWPwrntOXCJr29ydRFxualx1MDAxONfq13K50Fx0SSZps3t1KS9sT3yMu7uHm+hyWtRcdTAwMDJcdTAwMDK5MjdcZlc2dVx1MDAwZlx1MDAwMbJ81lx1MDAxYlxyXGKBdUZYSbBcdTAwMTWDmVx1MDAwNotYsDlcYrFK5Mc5uUjZ2nChXHUwMDA2wVx1MDAwYlx1MDAxN1x1MDAwNlx1MDAxYk5vz0SX+lx1MDAxY8eaKiqJXHUwMDAydMkynyNaYPFzPzM9j88pZKiUXHUwMDA0tqnM4Ef43P6afO6B8D/vc/vP63N1XHUwMDA089o0lUJwVYJcdTAwMTK5OsBmeOmT93vj8Pjky3nzspuoxN3wc0iAXHUwMDA15bt6tfBcdTAwMTGBafCW23OYXHUwMDFm+yR8L7ZKXCKJXHUwMDE1pMDEwL5ALtvAscWDSFxmoIAt65/6yYAxWrH/p1FKV8ay5WRut1x1MDAwNs3AtrN7wMyiuI7KmeHOIzjX5UpcdTAwMThScYvh1sSdXHUwMDFiTlx1MDAxY59lsHLTXHUwMDA0XHUwMDAyXpXvTaZfdt/45tvh7v3i2ZrgnYNki4zkZuvmP188jNQifQ== - - - - - ABCDbatch \ No newline at end of file diff --git a/assets/file.excalidraw_(4).svg b/assets/file.excalidraw_(4).svg deleted file mode 100644 index bf89fcf..0000000 --- a/assets/file.excalidraw_(4).svg +++ /dev/null @@ -1,21 +0,0 @@ - - - eyJ2ZXJzaW9uIjoiMSIsImVuY29kaW5nIjoiYnN0cmluZyIsImNvbXByZXNzZWQiOnRydWUsImVuY29kZWQiOiJ4nNVZW1PiSFx1MDAxNH73V1Ds62ymr+lu37yN4o6XWtxRd2trKyRcckRCkkmCXGJT/vc9iUhcdTAwMTIwguJcdTAwMDWkypLT3elzTn/f+U7aX1u1Wj1cdTAwMTmFur5dq+s72/JcXCeyhvUvqf1WR7FcdTAwMWL4MESy73EwiOxsZjdJwnj761crXGaNjpu0gqBn2EH/YZn2dF/7SVxmXHUwMDEz/4Hvtdqv7Hdho0jbieV3PJ0tyIbyvSSfNZ5cdTAwMDZ+ti1mkivCmTCnM9x4XHUwMDFmtku0XHUwMDAzw23Li3U+kprq7jm1jmPT7TR7XHUwMDA3e4GkQexfJPmubdfzmsnIy7yKXHUwMDAzXGI+XHUwMDFmi5Mo6OlL10m6j1x1MDAxOSjYq1ZFwaDT9XWcRo+n1iC0bDdcdTAwMTmlNoSm1odcdTAwMTRs13LLXHUwMDFkfONIXHUwMDE5kmNukjxcdTAwMTHpSiqwISlXVPBcdTAwMTlf9lx1MDAwMi+IUl9+wzr95N60LLvXXHUwMDAxl3wnn6OVtnU7nzOcRGhcdTAwMTJpKE5xYduudjvdJPVaXCKDUElkcTTWWdqZSSnjhOdcdTAwMTlKd1xmXHUwMDFiTlx1MDAwNoB/81xcR1ZfN9JcdTAwMTX+wPOKXHTznUnCSlx1MDAwM6104KCApfxRg9CxXHUwMDFlzlx1MDAxY1x1MDAwYsxcdTAwMDVimFx1MDAxMkpzzzzX780+zlx1MDAwYuxeXHUwMDBlk8x6/+VcdTAwMTXwxFxcVeJcdTAwMTOZijBaPOJF+LTj31x1MDAwZvb6XFw5x1x1MDAwM28gbz323+HZxZrjk1KDcFGCQrqSYW5cdTAwMDBG0apcdTAwMDBNXCLLj0MrgmOfXHUwMDA3KcbUwIgzU1xujtJcdTAwMWb8XHUwMDA0WKvnPEJcdTAwMTZcdTAwMGJcdTAwMTiDXGJWh+wjeHL40Inlvlx1MDAxYcnTNfnqXHUwMDAy/Fx1MDAxMn2XR17Ayl9H7ilqXFx1xseX1zt7zqFtypN2fTrvfvLXMzRBXHUwMDE4m1xmv1x1MDAxNU1Kflx1MDAxNlx1MDAxOYJEXHUwMDE1Q1xiwENcdELx8lx1MDAxNfzpqNebIUJcdTAwMWFYmmlJXCKIIyHLPGHKXHUwMDEwpoKqSVx0x5TRdyNcdTAwMGJcdTAwMDGiMvDDXHUwMDE0SCrJiryckoUxg1wiSaXicDBcdTAwMTRcdTAwMTe9eeBcbsZYKkbh1D6xvr9cdTAwMTi4uVeBnzTdcUZMbjBmXG54lFBcdTAwMDJcdTAwMTSWlSZ9s/quNyqdeVx1MDAwNnBI8k69ZNrx3I6fXHUwMDE1b/BcXEclXG4kLrRQ01x0fddxiipiwz6W6+uosUzxXHUwMDBmXCK34/qWdzHvXHUwMDA2JEBcdTAwMWZN651RqMQtK9bpaFx1MDAxYS9+vcpcdTAwMTFcXN2FXHUwMDExkzJcdTAwMDVVNVx1MDAwN8RcIlx1MDAwZVPe+K5cdTAwMTW9uohCK9BnSjeG4fV6c9iUgFx1MDAxNrMsZlx1MDAxYqZyWCHBhaJmXHUwMDBl9E2QOXP8/c/98Vx1MDAxZtZlKFx1MDAxYuPmuX97jnfJOspcXCGvs1x1MDAxNMFcdTAwMTJcdTAwMGLBVWHGXCKKPFx1MDAxZPVaU0RcdTAwMTBlYHhfXHUwMDAwXHUwMDFkw9BcdTAwMTSSMk8+TuU4bMSkXHUwMDAw9VI0fT+S82RZqHLw8sNcdKFcZq1OlVxyXHUwMDE1ud13XHUwMDEyuVx1MDAwNbV/VuR2P1LkmJy1Tlx1MDAxOazgI1x1MDAxOEb5jEVcZlx1MDAwZfvDg/jk5McpXHUwMDFhXHUwMDFl8fGpRDFyhuvNYOhcdI3N1jhBXHUwMDEwcJ/IzZK4m9Epu1x1MDAxOUSt7/ZA028tp3foN1svkzioYohcdTAwMTap8i5cdTAwMTKnKt/ksDBcdKKMvOAu7umo15sgUlx1MDAxYVx1MDAwMD1GQV84k5KWefJxXHUwMDFhR1xmXHQ6y6RSXG6BTL1K41x1MDAxOFx1MDAxYye4LOXqXFx5vca9XHUwMDE0t2+qcXvvpHFcdTAwMGJK/6zG7b2txlWxl1eTl2DChGTLt6fU29/f2d8lrVx1MDAxYp/8XGKHw79v5dXVenOXXHUwMDEzaYCAkLmLSiqRoUzATior70ZZzlxmgZXinDGWtqV5rqeMnb9Lh2ZaXHUwMDAwv7n83D5cdTAwMTSqxKpcdTAwMWMlqGR9hpQtK7G7XHUwMDE1xPR0O3mGlklcdTAwMTBWcbLk7ixcdTAwMDFntlxcioRYrtBoisrbXHUwMDE0gYSAbLPl/2VwdKZY8nPM28HNtX1cdTAwMTbvjEaYXHUwMDFjvylcdTAwMTVcdTAwMWQr7uo35Vwi8MAgjGA512lcdTAwMTLDJFx1MDAxYtFppjfXhHD+abcpzzFWgnesiOelusGtSYtZt8KwmcAjp65BaK4zYXL+mPqtq4e7z2d/a1x1MDAxMkOKQ53Fer91/z9cdTAwMWKtjL4ifQ== - - - - - ABCbatch \ No newline at end of file diff --git a/assets/file.excalidraw_(5).svg b/assets/file.excalidraw_(5).svg deleted file mode 100644 index 078c2af..0000000 --- a/assets/file.excalidraw_(5).svg +++ /dev/null @@ -1,21 +0,0 @@ - - - eyJ2ZXJzaW9uIjoiMSIsImVuY29kaW5nIjoiYnN0cmluZyIsImNvbXByZXNzZWQiOnRydWUsImVuY29kZWQiOiJ4nOVba1PqyFx1MDAxNv1+flx1MDAwNeX9Omb6/ThV94OIz/GNgjBzy1xu0EAkJDFcdTAwMDRBpvzv08lR8pAoepVcdTAwMTPmlFx1MDAxNCXdafq11l57d2/+/lYqbVx1MDAwNFx1MDAwZp7a+F7aUNO2aVtcdTAwMWTfnGz8XHUwMDE2lt8rf2S5jq5C0eeRO/bb0ZP9IPBG33//3fQ8o2dcdTAwMDUt11x1MDAxZFx1MDAxOG13+KOZstVQOcFIP/in/lxcKv1cdTAwMWS9JzryVTswnZ6tolx1MDAwNlFV3Fx1MDAxN4Q8W3riOlG/QjJcZjlcdTAwMTdi/oA1qujuXHUwMDAy1dG1XdNcdTAwMWWpuCYs2jhrXHUwMDAy6pS7NcvyJrv+iTs5Plx1MDAwZVx1MDAxYXGvXcu2q8GDXHUwMDFkjWrk6snHdaPAd1x1MDAwN6pudYL+81xuJMrzWvnuuNd31CicPZyXup7ZtoKHsFxmgHnpjyX4XopLpvpcdTAwMTNF2OCEXHUwMDAxiTmdV0RNXHUwMDA1MGhmXHUwMDE4267t+uEw/lx1MDAwM1X4XHUwMDE3XHUwMDBmpGW2XHUwMDA3PT1cdTAwMWGnXHUwMDEzP6Okaqtu/MzkaXKMXHUwMDAxg2BMU132ldXrXHUwMDA3Ub/SXHUwMDEwlFx1MDAxMiZcdTAwMTK1I1x1MDAxNa05xFx1MDAwMEvKpYjnXHUwMDEwdupcdTAwMWR0ou3/X7zSvjlUXHUwMDA3YVx1MDAxM2ds28nlcjpPy5WqaIVcdTAwMTU7XHQkxV819jrmj1x1MDAxZIdcdTAwMWNSXHUwMDBlXGJmXFzCeINsy1x1MDAxOWS/znbbg1x1MDAxOCRR6eNvXHUwMDFmXHUwMDAwJ1x1MDAxM7nYXHUwMDA0SFxigSRaXHUwMDFhm5VcdTAwMDN2t6Va25dcdTAwMDd01m7MaFx1MDAxYrj0oeDYpNJAQlx1MDAwMobS0ESIXHUwMDFhLFvxXHUwMDAxhFx1MDAwNr7pjDzT15v+XHUwMDEypVx1MDAxMFx1MDAxMlx1MDAwM6e7iDH6su5cdKFcYjCMoECJnfswQp+xXHUwMDEyo1x1MDAwNT+VPOZcdTAwMDN33iZunUBboKbxVFx1MDAxM+C4sFx1MDAxYXs7XHUwMDAxw+Oad+DdNcvkXG7V1Mb8ucen/1bGitQ4k9ZcdTAwMWHkXHUwMDExglxujFx0Z5wtTYjFcy40IbSZNiRcIphzQiDR+EvTglx1MDAwM1x1MDAwMyOi4ce/kFx1MDAxNtiAUEpJtXCmOpqT4yUpXHUwMDA0XHUwMDBibbZI7M1cdTAwMWFY7XhUrlx1MDAxM1StmYrYnSrdNYeW/ZDazVxiuHpccrc2UkVbttVcdTAwMGLxu9HWQ1V+XG7agaX9oPlcdTAwMDNDq9NJikFb92NajvJcdTAwMGaWseKub/Usx7QvX1x1MDAwZUPPWO3PXHKYkdilljlSYW0kvlx1MDAxZlx1MDAxNyuIYbb0mZySXHUwMDAzwClLLN9b5HSxRbvty+3KcHOn11x1MDAxML0qqFecYpOTa3JcdTAwMDJI10isJGeAcMnIWmnV1cybglx1MDAwYnJcclx1MDAwZrc3j4+7dTa57NtcdTAwMDXUKk7y6MCwxEBgsTxcdTAwMWRcdTAwMTbPudh0oMKQXHUwMDA06JegTGLM0qxYiVZcdTAwMTGDYq1VXHUwMDE4XHRcdTAwMDb1W2LPX1ErXHUwMDFk8HHEOPlcdTAwMDRcdTAwMGZuXdSq/EVq9YZcdTAwMTXPqlV5hWqFQK5acUi4kOhcdTAwMWRq1bCPq+NpZVx1MDAxOMxcdTAwMDRcdTAwMWWUT4/qlsPvi01P7ZNcdTAwMTmQXHUwMDEwiuH66JXeXHUwMDE3SULn81x1MDAxM1x1MDAxY8lcdTAwMTVcblx1MDAxNq5unt57fzhcdTAwMTVVv51OXHUwMDBmVbM23Fx1MDAxOVx1MDAxNlCwNFx1MDAxY/IogSBcdTAwMTSQSEGXP29YPOtCc0JCalxiKVx1MDAwNVx1MDAxMVx1MDAxModcdTAwMDFcdTAwMGL+XHSShVxmwfVcdTAwMTAkQ1x1MDAwNCBMsHhJkFx1MDAwNcTg2sEmhOFfKMLa/lwizXrDlmc1a3uVmoVxtvSZoFCvPtXeVsKXf4ugXHUwMDA3o1p9bLauJoPB1LSb1mDSnJSLTVBcYpAwXHUwMDEwQmydoiyIXHUwMDA0QVx1MDAxMkm6Xqo1a0z3zmeV09bJ/fC62lx1MDAxZVx1MDAxYzWa125cdTAwMTFVS+Q6clx1MDAxMGizXGI55sur1uJZXHUwMDE3nFx1MDAxNJxcdTAwMWJcZnLJMUZcXGtcdTAwMDBPM2MlskVccqr9Mlx0XHUwMDA0ITyi53KyRSmVXGJ+XHUwMDA2M9ZFtipfJFtvWPOsbFU+V7byyIlzPUqol1x1MDAxZFx1MDAxM8rfIViXM/OqUVx1MDAxZe/sNXeapt/xT0f1nlVsblJcdTAwMDK1LMFcdTAwMDW3q1x1MDAxMlx1MDAxOCCrXHUwMDE3n0xKSlxmrqMlSlx0ia5b45V+lZNcdTAwMTIx+DlqtS6cbJlBu5/DS1t1g1dYXHUwMDE5uF5cdTAwMWUlU8PN8i/T5c887iCCMaS5uPxxx+XDXHUwMDE53Nt2W719ub03XHUwMDEwste88lx1MDAwYn5cdTAwMWFJXHUwMDExMdIpXHUwMDA1YTtcdTAwMDLZXHUwMDBip61cdTAwMTCJXHUwMDBlTJtHXHUwMDFk0yXC7TUg4cfRXHRp7r2udim4RidcXP5el9SOxIn44/jWcWqzXHUwMDFhb93cn6GLgqNTXHUwMDAzXHUwMDA1ZC9UI4AyaFAo06d0xVxia7SnicPkqPW6PFx1MDAxYfnnx7Pe5uW91zm/xcFccuuoh+NcdTAwMDJGNTLXceJcdTAwMWElUNB3ZKUtnnOhXHTBoLacjFx1MDAwM4yxYFxigzQrKGBcdTAwMDaCRFx1MDAxMPy1iVx1MDAwZYSHiVx1MDAwZVx1MDAxY3FcdTAwMWFRYCn3iWJcdTAwMWR08mRi1lx1MDAxYVju/8992vmikOZcckOedal2VulOwfyTuFCxMFx1MDAxNWT5wFx1MDAwNl/xu3L5ttHtn7mOeXHpV8v1VrFcdFx1MDAxYSY7cCzCO9W1USztaUmcspxroVgqMOHsvFx1MDAwMyxFQfmOMW94dV9AxYI0N1lVXHUwMDEy/Vx1MDAwNyVe/lx1MDAxOG7xpIvNXGJccnw9Ryo1wH6WZMGPSJaOQSQnv1R23u5cdTAwMTcp1lx1MDAxYoY8q1i7q1Qs8crdXHUwMDExktrTSlx1MDAxZfm8xc+bra2bputV1KhcdTAwMWRUx9OLs1x1MDAwYrXZKzY/w4RcdTAwMDfB1irEQpQxPbQ1S3eYXHUwMDAwYlx1MDAxZE3VSeuwur/XtIBz3jqtXHUwMDE0ULBcdTAwMTDKXHUwMDE1LFx1MDAwNLRcdTAwMTfH8Xsujlx1MDAxNs+60IxcYtNcdTAwMWQgR1x1MDAxNFx1MDAwMFx1MDAxZG5hXG7TXHUwMDE5eqtRLGrwULGEfkeSLZlPXHUwMDBlXHRcdTAwMGWTIzj/hYKsvS+SrDcseVay9lYoWVx1MDAxONJs6VxcsnSArT15+Y5cdTAwMTTam+Z5fffCuVWn+4cnt1x1MDAwM0Cu7P5msVx0XHUwMDFhpTvIXHUwMDA1OXpF1izAuXZ/8ZpFWXfX7a1cdTAwMGXtzpg4qo/OhceHbOuhiKLF8jnBXHUwMDExXHUwMDA0XHUwMDEwcbA8J1x1MDAxNs+64JxcdTAwMTDAYFx1MDAxOOhcdTAwMTdcdK9cdTAwMDVIilx1MDAxOKtcbrO0fyCFXHUwMDA0YdbVsj+CglrkIODwM35tsS6itf9VovW6Lc+K1v7nilbuXHRcYsx1KCEnjFx1MDAwMUTYO35LfnaDdqaN4GbrqCFrXHUwMDE3N91ru3dUbG5SglxmTLKpXHUwMDBlXHUwMDA0USPr3Fx1MDAxNSPVgVwiJlx1MDAwNYaJm+9/PSP/XHUwMDFkqVx1MDAwZXlcZmS5t2ZcdTAwMDJoYGhcXCyvjT7bo7XDs8321UDLXCKG++RkdFlw/iFp0JepRlx1MDAxOGu94pSL17zFLlx1MDAxMG1cdTAwMDA+zkBEpIG1xlx1MDAwMSGplFxmLKJg4pb/+UheO5KCyZ/KQMKots4rY+CZcjqW0yvtmpY99lWporygX/pvXHT+5ayWl0tccmQptpJcdTAwMWZueegkR+u8YXpeNdCrPPfh9bZanaelilx1MDAwN7Vxb6lJ+XWgfXuyXHUwMDAyIeFUXHUwMDE0XHUwMDE0PH57/Fx1MDAwN3FcdIdFIn0= - - - - - ABCDbatchEFGHbatchPending Failure Depth = 1 \ No newline at end of file diff --git a/assets/file.excalidraw_(6).svg b/assets/file.excalidraw_(6).svg deleted file mode 100644 index 6c7b9f3..0000000 --- a/assets/file.excalidraw_(6).svg +++ /dev/null @@ -1,21 +0,0 @@ - - - eyJ2ZXJzaW9uIjoiMSIsImVuY29kaW5nIjoiYnN0cmluZyIsImNvbXByZXNzZWQiOnRydWUsImVuY29kZWQiOiJ4nOVbWXPaSFx1MDAxN333r3B5XuOe3pe8ecF74j1cdTAwMWXnqymXQFx1MDAwMjRcYolcdTAwMTHCmJnKf/+uZIxcdTAwMTZAlmOwcUauXCLQi3q755x7uzv/rq2vb0SjnrPxeX3DeWhYnmuH1nDjU5x+74R9N/Ahiya/+8EgbCQl21HU63/+/Xer10MtN6pcdTAwMDdBXHUwMDA3NYLuYzXHc7qOXHUwMDFm9aHg/+D3+vq/yWemodBpRJbf8pykQpKVtkWoKqZ+XHL8pF2imFx1MDAxNFRhyicl3P4utFx1MDAxNzk2ZDctr++kOXHSxtfuXHUwMDA12Vx1MDAwZfZ3nCPnXHUwMDAwe3JveN2Wx2mzTdfzLqORl3SrXHUwMDFmwOjTvH5cdTAwMTRcdTAwMDZcdTAwMWTnxrWj9tNcdTAwMTRk0ufVXG6DQavtO/14+GSSXHUwMDFh9KyGXHUwMDFijeI0jCepj3PweT1NeYBfklx1MDAxMSRccuFYUjHJiKtuXHUwMDEyTFx1MDAxMMZGSZHNeuzPTuBcdTAwMDVh3J/fiFx1MDAxM/+lPapbjU5cdTAwMGK65duTMlFo+f2eXHUwMDE1wjKl5YbjkUpNXHUwMDEwL7TRdtxWO4JMJjEyUqtM804y+URcdTAwMTLCmNQ6XHUwMDFkTNxo79BOXGbhz3TKQ6vrXHUwMDFjxlX8gedl5823x/P2ZDCpybBxyo90VHH5WsbU0lx1MDAxNlx1MDAwNj3berRcYqIo5kJzXHUwMDAyXHUwMDFkk5N8z/U7xea9oNFJjWgt09ZcdTAwMGKtl+m51mswzCqmL7BeaV2en/7VNp7fXHUwMDBlTy+ilpLnrb9X3HpcdTAwMTVBWlNNpoxXIK20wa803d9cdTAwMWPjNJzmtNnCMiNtNGdqltlO2yvD2lx1MDAxOPiQ72Svkzpp7YyRRc5DXG7NjEXcXHUwMDBm6ZXdXHR27k9cdTAwMWUu5V2d7J+esKONSblcdTAwMWbjb/PBoIxShrFFgSHXzyxcdTAwMGXwPFx1MDAxOEBcdTAwMGYwIUTQyiiYPeaVRoHhXHUwMDFjODQhn4JJjol8IVgopXFmkFwixiisMCOzqJxOYUJwxYUhRr9cdTAwMWVcdTAwMTK5jFx1MDAxN3C1MiB9hrzAPNNeXHUwMDA1fnTp/lx1MDAxMy9cdTAwMDbFudQ9q+t6o9xiJnZcdTAwMGJT2LVcXH8jl7rlua3YgjdcdTAwMWHQWyfMXHUwMDE5d+SChzQp0HVtO6tcdTAwMDJccmhcbl7nhIdVyDtcYt2W61ve1cyewLidg6eVXCIos1Z1q+/EuXG6LsWm43lurz9Tplx1MDAxOCfF1Cd8MjBXzjIz+Fx1MDAxYzzN5fbFMLDOvoyk7Zw6zePhl1x1MDAwZVtteFx1MDAwMjBcdTAwMTBjwlx1MDAxNIHJJTJcIoeWXCIqXHUwMDFkXHUwMDAwXHUwMDEzI+VcbtVsNkzDzHCsXHUwMDA04nP0aTprXHUwMDAySVwisZEs9Vx1MDAxOVx1MDAxNqhSdFkqZXdcdTAwMGVcdTAwMDbfdHNzXHUwMDEwPjyYi+PG4f63U5pRqU+zX/tYeX//puebPq53T2+PXHUwMDA3XHUwMDBmR9/6/d1avpWn9q0wXGaGL1M/XHLSY5atfmZcdTAwMWW4XGZcdTAwMDBLSqwrg2v2TK42uLhAXHUwMDEwp1x1MDAxOcwlNVRxnYOYVFxi89j3giBCS51Zi5+AWqlcdTAwMDBcdTAwMTKGXGJcYqBcdTAwMTGEKKJnoW5aXHUwMDAxKfiEXHUwMDA0VPk/pIBbS5K/Z2ShKH9bb6Z9RvFi6lx1MDAxMzw5hFx1MDAwM0rg6lx1MDAwMdpcdTAwMWbmYuBcdTAwMWPcj05Ojq5ObprmoPaX3FxcbXhcdTAwMDJcdTAwMDUhUdxa4OAsUvrMvoJRpEHLpc9cdTAwMTK2bs5cYs5+RvpcYlx1MDAxNpqB+0zTuflcYtqn/nH656pNuC+3d3S4X/tjUFx1MDAxM1W17/C8eYRH3fqRjb+bmnV3eebX3GraV/pefevhr7X2yET7XHUwMDFk3bpcdTAwMTV/XHUwMDBisrW3OE1cdTAwMTWMiyx8l6Gpc/dVaFx1MDAxY8RT8JMqo3b2XHUwMDEyrTZqhYq3VbjUmHONM5hIRNUgTaikXHUwMDE4PNpyUa1cdTAwMDDiclHlSDBcdTAwMTBVRrUkLN7mqVx1MDAxNFgyJqHX/6XAcntJsvqM4lx1MDAxNGV1+81kXHUwMDE1vKy5ukpcYsFcdTAwMWNjTNI2n8No48xqs+udnebR7flX+/5cdTAwMGW3vnlcdTAwMTerjVFCJEF6VlhpXHUwMDEwVrosrKRN43Berq112qT1+mK0lTFcdTAwMDZ4wFx1MDAxZkxaL5h3fdbYbV9cdTAwMDanp5JcdTAwMWadPCh5M6osgXvS+LXuTV905PfWjXNf+25fL0RcdTAwMDJcdTAwMTWDIFx1MDAwNzRIZ6H2tpuqXHUwMDA0XHUwMDE2U1xuobWqXGaw2ZO54lx1MDAwMNNcdTAwMTKBi1xuY1UwWixYXHUwMDBlZqCCXHUwMDA09LFCaFlcdTAwMDFu5SookFx1MDAwMFx1MDAxMTRYc65cdTAwMTK3uYpcblx1MDAwMkNcdTAwMWHQQaj2XjKYWCrRmVx1MDAxM6hly+DukmTwXHUwMDE5eSjK4O6bySAnc1VcdTAwMTB0gWOqaDpLz2HUZ/R2cHRcdTAwMWV+XHQ6yrlsX9z25Oh2tTGqKWhOflx1MDAwZjWpyGGq5TObq1JxYptyXHUwMDE1tHG9ubBcYpNcdTAwMWIpwS35YLurd3ff+c1Vy1xme6PoXHUwMDE4e/w6uHJ23yDCrCCDylx1MDAxMIGzUFtKJEjnIUzHXGZrOK6ugrPncrVcdTAwMTEmXGKSmGHDtVx1MDAxMlpIk1x1MDAwN1x1MDAxYfig4IJyI1x1MDAxZWNB+Vx1MDAxYbSViyBFWlx1MDAxOVx1MDAwM3JLgdZcdTAwMTjP3HwoXHUwMDE1QcpcdTAwMThmmi/A9XyFXGLibDC6bFx1MDAxMdxZklxiPiNcdTAwMGZFXHUwMDEx3FmsXGI+0sNcZnyaufgkIFx1MDAwMGCVxlQ//C8/XHJaTYAyhUyCXGJVXHUwMDE0QY3G/uJcdTAwMTKvb2FUaHqCRq5cdTAwMTBcdTAwMTbFSHRcZkuK4U8y/F5cdTAwMTHhUnZu+pFcdTAwMTVG265vu36rWMXx7TQn0+Xx9cUqh1x1MDAxOFx08lx1MDAxYoO4/1x1MDAxOGFcdTAwMGWONeVCY0M5XHUwMDAzYs6Ualm9eDhTs+BZ/Wgn6HbdXGJcdTAwMDZ8XHUwMDE2uH5U7GUygq1cdTAwMThpbceamnVcdTAwMThDNq9cYsle/Ma8o5F+W0+tNvkx+f7np5mlN6dcZitJzdhU+oa17L8v5lx1MDAwZjX3+FSDx1x1MDAxNl9cdTAwMDCtvo1U7u6sJn1ogiCKXHUwMDA1l1VoxWDGpzjEaK5cYlxiXHUwMDE4XHUwMDA2jSeFfi2OSIREXHUwMDE0tJpreFxiIyRtKSVcdTAwMTRcdTAwMTF7XCJaXGJcdTAwMGVBuSTZy3RPvKKUgpiXsKVcXFx1MDAwYn1cdTAwMDWv/Iy3Oo9X5rFHuT5n2GNcdTAwMTMjwlxmpUQxQVxyjTf5p9lcdTAwMDNJKVx0x8AyMJ9AMVNjr8Ro5fvHeUajOnYkjWSaSSWpZrM6RZXU8VU3QSnouvrQXHUwMDFjN9fk42dz2tpcdTAwMTfFeCWXhsFcImRsp9U9ppO7O3wyPKGHQ/+c3dp7dWmZ+mpTnlx1MDAxMlxm4YJjMlx1MDAxYTssitPc7Y1F01x1MDAxY0Yyf9U+XHJeXGaiwF6zdlx1MDAwZTRNXHUwMDAyrPfaN3hfZyn/slx1MDAwZlx1MDAwNO/iUsfPZmaVXHUwMDE3XHUwMDA154xkXHUwMDE34YxjKWTkXHUwMDA11yvLj+5XXHUwMDEyzkaBnk3fMFx1MDAxMWSpgU/ah/TaP1x1MDAwNl9cdPOZ/19FXHUwMDAwzVx1MDAxYlxmcvpcdTAwMGJjeFx1MDAwMU5cdTAwMDBcdTAwMDFth8iQgVx1MDAwZsA5zrjeJWHNL8JcdTAwMTY5msiY0sJUf26cQ+Kb+1pQVv2ooPx4cyVpglx1MDAxMMOQoFx1MDAxONxdiCBZZrBcdGFohFxyl0aD26s44fM3Ml9LXHUwMDFjXHUwMDA0XCJYxeFcdTAwMDHPW+Y2p7JEXHUwMDAyXHUwMDFkhYDBkKQzmVx1MDAwYqJPfGJcYsFcdTAwMTQ+Vo1PfuZ0+qV8Un5cdTAwMWGXXHUwMDBmdDC4s5xhzGNOXHUwMDExTNFMsV+bUObaWfxsTpvYPJ5ZXHUwMDFit7Bh9XqXXHUwMDExLPVkZcDkXFx7vD2dXHUwMDBlc+PedYbb5aBYXHUwMDFiT3dMXHUwMDE0TmKDP9Z+/Fx1MDAxZlmxpdwifQ== - - - - - mainABDC \ No newline at end of file diff --git a/assets/file.excalidraw_(7).svg b/assets/file.excalidraw_(7).svg deleted file mode 100644 index 9820549..0000000 --- a/assets/file.excalidraw_(7).svg +++ /dev/null @@ -1,21 +0,0 @@ - - - eyJ2ZXJzaW9uIjoiMSIsImVuY29kaW5nIjoiYnN0cmluZyIsImNvbXByZXNzZWQiOnRydWUsImVuY29kZWQiOiJ4nOWYW3PaOFx1MDAxNMff+Vx1MDAxNFxm+1x1MDAxYVxc3W3ljSSlkO6mTdKWdHc6XHUwMDE5YVx1MDAwYlAwtsc2t3Ty3Vc2XHUwMDA020CAZEKWdt2ZXHUwMDA2XHUwMDFmST7S0fn9dflZKpcr8TSQleNyRU5s4SonXHUwMDE048pRYlx1MDAxZskwUr6ni1D6XHUwMDFl+cPQTmv24jiIjt+9XHUwMDEzQWB0Vdz2/b5h+4NZM+nKgfTiSFf8R7+Xyz/T/3OOQmnHwuu6Mm2QXHUwMDE2LfnK2y58L/Vcbk3ETEChtaigojPtLJaOLu1cYjeSWUliqnz/Plwit63xl+uWXHUwMDE43oXtZvWqWW9kPjvKda/jqZv2KfL10LOyKFx1MDAwZf2+bCkn7lx1MDAxNfo0sz/VKvSH3Z4no2TscGH1XHUwMDAzYat4mthcdTAwMDBYWGdcdTAwMDE4LmeWSeFtWnib+T31XT9M/P5cdTAwMDFl8i/z3Fx1MDAxNna/q917zqJOXHUwMDFjXG4vXG5EqOdcIqs3flx1MDAxY1x1MDAxMTdMurD2pOr2Ym0mJPMo07hahGPELcJcdTAwMTZcdTAwMDWJm6DppPP7I1x1MDAwYmYoXHUwMDA2spm08Iaum4+I58wjUihoJ1x1MDAwNe+XUyWfLoWUieUkXHUwMDFiRW6KScuciFx1MDAwZjdn/MNl25zaft2pduqVRb2Ho/WfnTVcdTAwMWWdfFx1MDAxY9Sb51x1MDAxN77/7bx2O/3rtNa671x1MDAxNL08+lx1MDAxN2Hoj3Pfnf/Kxj9cZlx1MDAxYzHLRGhCRjhgiFx1MDAxMphFzVVefzlcdTAwMDaub/ez5C3lOvw8ZPBTyFiMXCJimpjsjEw0uJ18fN9cdTAwMWJ5oPd13GDihMJcdTAwMDY+bGRcdTAwMTAwXHUwMDAwJczK5XPSkJO9wcO4UXC3XHUwMDAxXHUwMDFmrFx1MDAxM8FEXHUwMDFhoMPEZ9RcdTAwMDffXHUwMDA27Ob08tO4XHUwMDExg1x1MDAxNuq1aj34XHUwMDA2+Gz87qTFKFN3jeb91U2r/vf1UF2R6i+I5dMrXHUwMDE5R9CimCG0M5f0buxcXFb/XGb51d09bZjwU713e37YXFyahSVmv8tcdTAwMTlcdTAwMDZcdTAwMDYmeDckIbAsamG9lzhMJr9i7vsnXHUwMDEzR47vL1x1MDAxOP983jlcdTAwMWK9b/2G7Mz8P2c5o5xoanRHdsZms0RcdTAwMWQkNpBcdTAwMWF4iVx1MDAxYkJcZro3dKBpaC1aXHUwMDAzXHUwMDBlNTijy/AgQjiFjL7CepZl72NKoLnl4WmkXjM7cyFcdTAwMTVhfKI8R3ndYsfmZ5nmXHUwMDBlx4lk0L49jPJcdTAwMTOahEdcdTAwMDRJXHUwMDE3V0YmPWe7x827sZzHKjZcdTAwMTCzOOKMWYzoXHI7llVzpVx1MDAxN8RcdTAwMDCY8OwxV3rliig+9Vx1MDAwN1x1MDAwM1x1MDAxNevQfvaVXHUwMDE3L4cwjVUtQbcnxcr86lHly5ZcdTAwMTlcdTAwMGaSL1x1MDAxNqUx+1XOx02/LH7/OFpbu1xuMiqSXHUwMDA3aUq2tYEsn+zJo9nSaZ61K+X/vp50MVxugGkyuLt0bVbyg5QuTlxmktvTvI10oVxcQmdr/Vx1MDAxNlx1MDAxOdNcblx1MDAwNk2CX2FcdTAwMDfwy6nY5p1kXlP0uSp5LIS1slhcXFx1MDAxZmV0xMxcdTAwMWNzbyRu834gRjnHXHUwMDE0IH2eJYii313fXiBv1VTf1lx1MDAxMpE8L9K6wjZ1p9NccoSIYczN3W9cdTAwMWTW3+JcdTAwMWO01plcdTAwMDajXHUwMDE4I4AhWrp32KfgaYElXHUwMDE4U772qINWZI5yvVx1MDAxOVx1MDAwMDCnyXs66WxcdTAwMTO0rGfPXHUwMDEwtI7vxdfqPtVUULDWxUC508LcpWmq41erXHUwMDE0TDVXdZNsrdi6qzIsJHKsbOEuKlxmlOPkz/G29iOUJ8NdNn9cdTAwMTU/VF3lXHT3y2o39Ihl43GOoJGbpbaIZFKa2K3XhZBifcwlXGLuzuD6q6CDZlx1MDAxMGs9XHUwMDAzayGEXHUwMDAw71x1MDAxMUPMXHLOXHUwMDAxeuLGYVx1MDAxNUOoT61cdTAwMDCw/1x1MDAxNYdcdTAwMDPNzp5Q3HJHvYxisSf/XHSNXHUwMDE46UVcdTAwMTHjZ9zDr79cdTAwMDQ6aFx1MDAxYS3TMFx1MDAwMYCcXHUwMDFl9JJcYlx0xaZlWnDvN/KHw+LZnkDccim9XGbi2Vx1MDAwYikszfe7XHUwMDE1XHUwMDExXHUwMDA017GO5OJQoadOOfNwZNGrjJRcdTAwMWOfbM6j0pzuhCSZnuNcdTAwMWVKXHUwMDBm/1x1MDAwMkf/nKUifQ== - - - - - AmainD \ No newline at end of file diff --git a/assets/file.excalidraw_(8).svg b/assets/file.excalidraw_(8).svg deleted file mode 100644 index 49c4121..0000000 --- a/assets/file.excalidraw_(8).svg +++ /dev/null @@ -1,21 +0,0 @@ - - - eyJ2ZXJzaW9uIjoiMSIsImVuY29kaW5nIjoiYnN0cmluZyIsImNvbXByZXNzZWQiOnRydWUsImVuY29kZWQiOiJ4nOVaXXfaRlx1MDAxMH33r/Chr7G6s9+bN/xcdTAwMTHXJ6mT1Elqt+nJkUGAaiEpQlx1MDAxOOOe/PfOXG5cdTAwMDdcdCFhjMHBLbYxaFfa2Z25c+eu9M/O7m4jXHUwMDFkx17j5W7Du2m5gd9O3FHjhT1+7SVcdTAwMDM/XG6xiWbfXHUwMDA30TBpZT17aVx1MDAxYVx1MDAwZl7+/LNcdTAwMWLHTtdPL6PoymlF/clpXuD1vTBcdTAwMWRgxz/x++7uP9l7YaDEa6Vu2FxyvOyErClcdTAwMWZLgyhcdTAwMWY9jcJsXFwwoLRWXFySaVx1MDAwZn9wiOOlXlx1MDAxYps7bjDw8lx1MDAxNnuocTG8Vlx1MDAxMb+ln15ftvTB+Fx1MDAwM3nb/+NrPmzHXHUwMDBmgrN0XHUwMDFjZGZcclwinH3eNkiT6Mr73W+nPTt26XjdWUk07PZCb2Cnn5tcdTAwMTnFbstPx/Y6JD86WYNiv1x1MDAxYrtcdTAwMDBcXDtGaSG04IRxTqet9nyuuKNccuFaXHUwMDExJlx1MDAwMIRcdTAwMTQlw1x1MDAwZaIgSqxhP4Fnf3LTLt3WVVx1MDAxN+1cdTAwMGLbeVx1MDAxZle0daeT91x1MDAxOX2fLlx1MDAxM46mUlx1MDAxOFx1MDAwM1x1MDAxNIhhSk+79Dy/20uzPuBcYuBSaSNcdTAwMDRRXHUwMDFhcktcdTAwMDde5lx1MDAxMbSPSU1B5C3WgPiknUXHX7lcdTAwMWZcdTAwMTK3753YU8JhXHUwMDEwXHUwMDE0XHUwMDE3M2zfLeb3KMrjiN1cdTAwMWT5ls/Q9j8qx18xXHUwMDA2Z+Iw9W7S6dRcdTAwMGJBXHUwMDEzNdvH5lxya7Y77/bi66/Bq1x1MDAwZYO4Me337UX1ZScnu6ed8Wt6cMrOP/wmLoZ/XHUwMDFm0lt4PzvK9/HdJIlGhevefcqXZVx1MDAxOLfdSWyDXHUwMDAygVx1MDAxZVx1MDAwNy2oUtP2wFx1MDAwZq/Ka1x1MDAxNkStq1x1MDAxY1x1MDAwZTtcdTAwMDWD53A4M/9cdTAwMDJcdTAwMDSVMvVcdTAwMTBUwjCqXHJdXHUwMDFhgtWruVx1MDAwMlx1MDAwNOlcblx1MDAxMITVICjBUVJqobGvNlx1MDAwMDNcdTAwMTBcdTAwMTSEOZorXHUwMDBiXHUwMDBmwD+uXHUwMDFlg8A0ccNB7CZcdTAwMTixXHUwMDE1KFx1MDAwNOJQMY88xVx1MDAxY1nGmkCsKkWAPFx1MDAxZWozXHJzmLovPOVcdTAwMDPCM7cqXG7TM/82XHUwMDBiMTlz9JXb94PxjPuzuMW1a35cdTAwMGU/h4NcdTAwMTRteLmbeoPUXHUwMDBmu5/DOPGjXHUwMDA0XHUwMDFkjeNGSd9ccnZcdTAwMWIz5zVcdTAwMDO/a2O80cL5eMlM+Kc+XHUwMDEy4LRD32+3i1x1MDAxNNVCY1xcP/SSk2WYXHUwMDA1bej6oVx1MDAxYnxY0VZcXDvvl2mmdVxupHjpXHUwMDBlPNuKx6VZiO+FPGtcbum6XGZygVx1MDAwNCMw7JdcdTAwMDe56ERN+vGLSsKLw18v+VH/9rZ7td08XHUwMDBihGlHSsm1UFpyljPck/Isc1xmNYaB5ERSTC3VPKuYQFx1MDAxM1x1MDAxMF+Cyzw5f6dZXHUwMDA1XHUwMDFjXHUwMDE0XHUwMDA1liPnOdCsXGJH8rc3be72hoF834bu+Nq9eVx1MDAwMppdeN3zg875sVx1MDAxYVx1MDAxZPU+8qMvhvy6f3Peaq2FviW6XHUwMDEw0I+6iPFN0DeyVy2ypY1kQvXyXHUwMDE1dLWXtpq+gVxi6ihqXHUwMDAwi1IhlSBiXHUwMDA22pa/jUD6NpKinjDllLNG/iZYRVx1MDAxMy61WpLEjaJcXEqs+n9cdTAwMTSJr1x1MDAxMqSPI/H9ZYhxQ1x1MDAxY35cdTAwMGZrlTn8gaaug8InOaZSJteCXFyBjTnQeVx1MDAxMN2H8cXJdMNcdTAwMThfmb1cdTAwMTHFXGJiXHUwMDAwKXCyOSlmXHUwMDEw58xRmkjFXHUwMDE11bhcdTAwMWP1XHUwMDEw71x1MDAxMN1cImR1iONAREnBNNEojFx1MDAxNM3FwFx1MDAxNOhcdTAwMDSVtLGYUlx1MDAxYzDhXHUwMDE4yDl+SuBWS1x1MDAxME3XgPt5XHUwMDAyp/dcdTAwMTP4RtJcdTAwMDFcIiVJ9/2wjTiZNexui2hcdTAwMTlcdTAwMTDapXBju9CO1MwuISOUXHUwMDFiQlmhRydqXHLtPPaIQ4QyWFBpMHbbXHUwMDAxy7u5yXth+36jXHUwMDE2V/dTo6SDkWpsvaw1MJ5L84JRaFx1MDAxMzHKXHUwMDEwXHUwMDAylEpkJIqd54xcbtxBelx1MDAxMPX7foqL/y7yw7S8yNlqNm066HnuXFxcdTAwMDTgpIpt5bxcdTAwMTHbK86Wavmn3Vx1MDAxY1rZl+nnv15U9t6rjfmsdT7c81x1MDAwYu5cdTAwMTT/ryRbUJLX5T2K4lx1MDAxN1x1MDAwNTt9gGxcdTAwMTlHwdnfsHdzTdzjY/WFXHUwMDFkXHUwMDA0zfdsy2VcdTAwMGJcdTAwMDftUCtbJEhcZqvCdtBcdTAwMDZ0S6fjtYyp0i1cdTAwMWF1i8RcdTAwMWFKMKjaokDXO1x1MDAxOFx1MDAxZUJcdTAwMDFFc7BjYYUmSY9qJjVRfCM5b2OipX9z2Dvg5KJcdTAwMTW0P42/nv7hvU7PPi0rLo5cXFx1MDAxOFxyz49Et7lvgt570fVGg1+e196gJLXbXHUwMDA2jFwiWUj2XHUwMDAwbVG9mNutLThDgFFMelhmMcHoLPxcdTAwMDRcdTAwMDFHMjxKXHUwMDExm1x1MDAxMrX6o+C3XnGB6Vx1MDAwMmlcdTAwMTPx+sPUxSpR+jh1cVAo2WMvY/0nU1x1MDAxN/eQS1ldPNDUjapcdTAwMGJgovZcdTAwMTaA1LZcIsSie2mYL95T2U55QcE4xGA2XHUwMDAzwEKGleRcdTAwMDUzXHUwMDBlQ4RcdTAwMDOnmmJ1p0t2rU9eMOlQhplcdTAwMDb5XHUwMDEz3yvlXHUwMDA1+lx1MDAxZYAxjlx1MDAwNYFhKHnm1Fx1MDAwNSWKXG5jXGJsZHtwy9VFfNa6XHUwMDFj9mNNP7JO6sGr8Tt6JedcdTAwMGJ5XHUwMDAwXHUwMDE0k8CMlFxuhZjRskpeYCWvbYkpKKZ+itld8NXUxZKSx979sVtUgKRcdTAwMDNcdTAwMWErOl5tXHUwMDE0I4CFXHUwMDE2VmKSo8Ig7JnLi9qYt6+5aF+nuKCy/tlcdTAwMDMkL0pccs9j9b6stzjytlNbUFx1MDAwNijt7EtLKrje6D2Rem3BXHUwMDFkXHUwMDE0+Nr6d3JXpEpeXHUwMDE4h1x1MDAwYvRcdTAwMDdow61cdTAwMDQppOBJ1pNcZucrTKHhOchcdTAwMGKqTuImXHUwMDA0+vjrJ9lcdTAwMTm9oX46ulx1MDAxOD/BvYv/umyhitRcIltcdTAwMGKmKebY5aFd7aXt1i1UXHUwMDEwh9tcIlx1MDAwMFx1MDAxMSOUmH2qSFx1MDAxMOpoQLWijKZY89DNPdNgpJM9K7SsaEGrsMrS4oc/1/B0ouUwV1x1MDAwMlx1MDAxNUKgh6u1IcVyXHUwMDBmZZVcdTAwMTXLQ+zcrFxcwTKhnrhcdTAwMDVTjEu2/K7g4ny3nXqFI4JcdTAwMDVWiowrI60uKVx0XHUwMDE2hJamXHUwMDE0yyZkS8NYybD1XHRcdTAwMTaqXHUwMDFj++SIloIyzlSlYqGorFxml1x1MDAwNKnbYGdl5iSLvXXDsOX/qFhcdTAwMTZvXHUwMDFi5OJAoEdcdTAwMDXWYVgqK8nzu9iz2oBcdTAwMTPFXHUwMDAxKLe3Qlx1MDAwNGqIubkvJViWVFHG3urATGk9Lyrs2UNBwyRcdTAwMTGoru3OXHUwMDE511ihzVx1MDAxOfSstEp9uE+a51wiPb/iTvH/g1NeIVx1MDAwNMtcdFx1MDAwZlx1MDAxNSqTXHUwMDAy+PL3f9Xbq7fvXkt+dFx1MDAxMZ6mhydcdTAwMDE9XHUwMDFjXHUwMDBm+lue8KhhXHUwMDBls6lcZnOaMjD7iFx1MDAwN5P2XHUwMDExXHUwMDBmzrTGhGd/N5bvUC+hc0FcbsJMtVTRiELFrInlLtNcdTAwMTJHSSCGsI08Jf24bMclonlcdTAwMWTZrlxm0ZqW51x1MDAwNP4619vXnNPrcL9zd/mGXHUwMDFix2e2lJo6XHUwMDExg8Bv39WO+Vx1MDAxY1x1MDAxYte+N9pfXHUwMDFjpDt33rbA9bKo+Lbz7V8w10FSIn0= - - - - - Astate: testingpriority: normal Bstate: testingpriority: normalCstate: pendingpriority: normalDstate:pendingpriority: high \ No newline at end of file diff --git a/assets/file.excalidraw_(9).svg b/assets/file.excalidraw_(9).svg deleted file mode 100644 index 49c4121..0000000 --- a/assets/file.excalidraw_(9).svg +++ /dev/null @@ -1,21 +0,0 @@ - - - eyJ2ZXJzaW9uIjoiMSIsImVuY29kaW5nIjoiYnN0cmluZyIsImNvbXByZXNzZWQiOnRydWUsImVuY29kZWQiOiJ4nOVaXXfaRlx1MDAxMH33r/Chr7G6s9+bN/xcdTAwMTHXJ6mT1Elqt+nJkUGAaiEpQlx1MDAxOOOe/PfOXG5cdTAwMDdcdCFhjMHBLbYxaFfa2Z25c+eu9M/O7m4jXHUwMDFkx17j5W7Du2m5gd9O3FHjhT1+7SVcdTAwMDM/XG6xiWbfXHUwMDA30TBpZT17aVx1MDAxYVx1MDAwZl7+/LNcdTAwMWLHTtdPL6PoymlF/clpXuD1vTBcdTAwMWRgxz/x++7uP9l7YaDEa6Vu2FxyvOyErClcdTAwMWZLgyhcdTAwMWY9jcJsXFwwoLRWXFySaVx1MDAwZn9wiOOlXlx1MDAxYps7bjDw8lx1MDAxNnuocTG8Vlx1MDAxMb+ln15ftvTB+Fx1MDAwM3nb/+NrPmzHXHUwMDBmgrN0XHUwMDFjZGZcclwinH3eNkiT6Mr73W+nPTt26XjdWUk07PZCb2Cnn5tcdTAwMTnFbstPx/Y6JD86WYNiv1x1MDAxYrtcdTAwMDBcXDtGaSG04IRxTqet9nyuuKNccuFaXHUwMDExJlx1MDAwMIRcdTAwMTQlw1x1MDAwZaIgSqxhP4Fnf3LTLt3WVVx1MDAxN+1cdTAwMGLbeVx1MDAxZle0daeT91x1MDAxOX2fLlx1MDAxM46mUlx1MDAxOFx1MDAwM1x1MDAxNIhhSk+79Dy/20uzPuBcYuBSaSNcdTAwMDRRXHUwMDFhcktcdTAwMDde5lx1MDAxMbSPSU1B5C3WgPiknUXHX7lcdTAwMWZcdTAwMTK3753YU8JhXHUwMDEwXHUwMDE0XHUwMDE3M2zfLeb3KMrjiN1cdTAwMWT5ls/Q9j8qx18xXHUwMDA2Z+Iw9W7S6dRcdTAwMGJBXHUwMDEzNdvH5lxya7Y77/bi66/Bq1x1MDAwZYO4Me337UX1ZScnu6ed8Wt6cMrOP/wmLoZ/XHUwMDFm0lt4PzvK9/HdJIlGhevefcqXZVx1MDAxOLfdSWyDXHUwMDAygVx1MDAxZVx1MDAwNy2oUtP2wFx1MDAwZq/Ka1x1MDAxNkStq1x1MDAxY1x1MDAwZTtcdTAwMDWD53A4M/9cdTAwMDJcdTAwMDSVMvVcdTAwMTBUwjCqXHJdXHUwMDFhgtWruVx1MDAwMlx1MDAwNOlcblx1MDAxMITVICjBUVJqobGvNlx1MDAwMDNcdTAwMTBcdTAwMTSEOZorXHUwMDBiXHUwMDBmwD+uXHUwMDFlg8A0ccNB7CZcdTAwMTixXHUwMDE1KFx1MDAwNOJQMY88xVx1MDAxY1nGmkCsKkWAPFx1MDAxZWozXHJzmLovPOVcdTAwMDPCM7cqXG7TM/82XHUwMDBiMTlz9JXb94PxjPuzuMW1a35cdTAwMGU/h4NcdTAwMTRteLmbeoPUXHUwMDBmu5/DOPGjXHUwMDA0XHUwMDFkjeNGSd9ccnZcdTAwMWIz5zVcdTAwMDO/a2O80cL5eMlM+Kc+XHUwMDEy4LRD32+3i1x1MDAxNNVCY1xcP/SSk2WYXHUwMDA1bej6oVx1MDAxYnxY0VZcXDvvl2mmdVxupHjpXHUwMDBlPNuKx6VZiO+FPGtcbum6XGZygVx1MDAwNCMw7JdcdTAwMDe56ERN+vGLSsKLw18v+VH/9rZ7td08XHUwMDBihGlHSsm1UFpyljPck/Isc1xmNYaB5ERSTC3VPKuYQFx1MDAxM1x1MDAxMF+Cyzw5f6dZXHUwMDA1XHUwMDFjXHUwMDE0XHUwMDA1liPnOdCsXGJH8rc3be72hoF834bu+Nq9eVx1MDAwMppdeN3zg875sVx1MDAxYVx1MDAxZPU+8qMvhvy6f3Peaq2FviW6XHUwMDEw0I+6iPFN0DeyVy2ypY1kQvXyXHUwMDE1dLWXtpq+gVxi6ihqXHUwMDAwi1IhlSBiXHUwMDA22pa/jUD6NpKinjDllLNG/iZYRVx1MDAxMy61WpLEjaJcXEqs+n9cdTAwMTSJr1x1MDAxMqSPI/H9ZYhxQ1x1MDAxY35cdTAwMGZrlTn8gaaug8InOaZSJteCXFyBjTnQeVx1MDAxMN2H8cXJdMNcdTAwMThfmb1cdTAwMTHFXGJiXHUwMDAwKXCyOSlmXHUwMDEw58xRmkjFXHUwMDE11bhcdTAwMWP1XHUwMDEw71x1MDAxMN1cImR1iONAREnBNNEojFx1MDAxNM3FwFx1MDAxNOhcdTAwMDSVtLGYUlx1MDAxYzDhXHUwMDE4yDl+SuBWS1x1MDAxME3XgPt5XHUwMDAyp/dcdTAwMTP4RtJcdTAwMDFcIiVJ9/2wjTiZNexui2hcdTAwMTlcdTAwMTDapXBju9CO1MwuISOUXHUwMDFiQlmhRydqXHLtPPaIQ4QyWFBpMHbbXHUwMDAxy7u5yXth+36jXHUwMDE2V/dTo6SDkWpsvaw1MJ5L84JRaFx1MDAxMzHKXHUwMDEwXHUwMDAylEpkJIqd54xcbtxBelx1MDAxMPX7foqL/y7yw7S8yNlqNm066HnuXFxcdTAwMDTgpIpt5bxcdTAwMTHbK86Wavmn3Vx1MDAxY1rZl+nnv15U9t6rjfmsdT7c81x1MDAwYu5cdTAwMTT/ryRbUJLX5T2K4lx1MDAxN1x1MDAwNTt9gGxcdTAwMTlHwdnfsHdzTdzjY/WFXHUwMDFkXHUwMDA0zfdsy2VcdTAwMGJcdTAwMDftUCtbJEhcZqvCdtBcdTAwMDZ0S6fjtYyp0i1cdTAwMWF1i8RcdTAwMWFKMKjaokDXO1x1MDAxOFx1MDAxZUJcdTAwMDFFc7BjYYUmSY9qJjVRfCM5b2OipX9z2Dvg5KJcdTAwMTW0P42/nv7hvU7PPi0rLo5cXFx1MDAxOFxyz49Et7lvgt570fVGg1+e196gJLXbXHUwMDA2jFwiWUj2XHUwMDAwbVG9mNutLThDgFFMelhmMcHoLPxcdTAwMDRcdTAwMDFHMjxKXHUwMDExm1x1MDAxMrX6o+C3XnGB6Vx1MDAwMmlcdTAwMTPx+sPUxSpR+jh1cVAo2WMvY/0nU1x1MDAxN/eQS1ldPNDUjapcdTAwMGJgovZcdTAwMTaA1LZcIsSie2mYL95T2U55QcE4xGA2XHUwMDAzwEKGleRcdTAwMDUzXHUwMDBlQ4RcdTAwMDOnmmJ1p0t2rU9eMOlQhplcdTAwMDb5XHUwMDEz3yvlXHUwMDA1+lx1MDAxZYAxjlx1MDAwNYFhKHnm1Fx1MDAwNSWKXG5jXGJsZHtwy9VFfNa6XHUwMDFj9mNNP7JO6sGr8Tt6JedcdTAwMGJ5XHUwMDAwXHUwMDE0k8CMlFxuhZjRskpeYCWvbYkpKKZ+itld8NXUxZKSx979sVtUgKRcdTAwMDNcdTAwMWErOl5tXHUwMDE0I4CFXHUwMDE2VmKSo8Ig7JnLi9qYt6+5aF+nuKCy/tlcdTAwMDMkL0pccs9j9b6stzjytlNbUFx1MDAwNijt7EtLKrje6D2Rem3BXHUwMDFkXHUwMDE0+Nr6d3JXpEpeXHUwMDE4h1x1MDAwYvRcdTAwMDdow61cdTAwMDQppOBJ1pNcZucrTKHhOchcdTAwMGKqTuImXHUwMDA0+vjrJ9lcdTAwMTm9oX46ulx1MDAxOD/BvYv/umyhitRcIltcdTAwMGKmKebY5aFd7aXt1i1UXHUwMDEwh9tcIlx1MDAwMFx1MDAxMSOUmH2qSFx1MDAxMOpoQLWijKZY89DNPdNgpJM9K7SsaEGrsMrS4oc/1/B0ouUwV1x1MDAwMlx1MDAxNUKgh6u1IcVyXHUwMDBmZZVcdTAwMTXLQ+zcrFxcwTKhnrhcdTAwMDVTjEu2/K7g4ny3nXqFI4JcdTAwMDVWiowrI60uKVx0XHUwMDE2hJamXHUwMDE0yyZkS8NYybD1XHRcdTAwMTaqXHUwMDFj++SIloIyzlSlYqGorFxml1x1MDAwNKnbYGdl5iSLvXXDsOX/qFhcdTAwMTZvXHUwMDFi5OJAoEdcdTAwMDXWYVgqK8nzu9iz2oBcdTAwMTPFXHUwMDAxKLe3Qlx1MDAwNGqIubkvJViWVFHG3urATGk9Lyrs2UNBwyRcdTAwMTGoru3OXHUwMDE511ihzVx1MDAxOfSstEp9uE+a51wiPb/iTvH/g1NeIVx1MDAwNMtcdFx1MDAwZlx1MDAxNSqTXHUwMDAy+PL3f9Xbq7fvXkt+dFx1MDAxMZ6mhydcdTAwMDE9XHUwMDFjXHUwMDBm+lue8KhhXHUwMDBls6lcZnOaMjD7iFx1MDAwN5P2XHUwMDExXHUwMDBmzrTGhGd/N5bvUC+hc0FcbsJMtVTRiELFrInlLtNcdTAwMTJHSSCGsI08Jf24bMclonlcdTAwMWTZrlxm0ZqW51x1MDAwNP4619vXnNPrcL9zd/mGXHUwMDFix2e2lJo6XHUwMDExg8Bv39WO+Vx1MDAxY1x1MDAxYte+N9pfXHUwMDFjpDt33rbA9bKo+Lbz7V8w10FSIn0= - - - - - Astate: testingpriority: normal Bstate: testingpriority: normalCstate: pendingpriority: normalDstate:pendingpriority: high \ No newline at end of file diff --git a/assets/flake8.gif b/assets/flake8.gif deleted file mode 100644 index 890c89f..0000000 Binary files a/assets/flake8.gif and /dev/null differ diff --git a/assets/flaky-fullscreen.png b/assets/flaky-fullscreen.png deleted file mode 100644 index 3c95b11..0000000 Binary files a/assets/flaky-fullscreen.png and /dev/null differ diff --git a/assets/flaky-tests-failure-details-dark.png b/assets/flaky-tests-failure-details-dark.png deleted file mode 100644 index fa151ff..0000000 Binary files a/assets/flaky-tests-failure-details-dark.png and /dev/null differ diff --git a/assets/flaky-tests-failure-details-light.png b/assets/flaky-tests-failure-details-light.png deleted file mode 100644 index 25be67e..0000000 Binary files a/assets/flaky-tests-failure-details-light.png and /dev/null differ diff --git a/assets/flaky-tests-list-dark.png b/assets/flaky-tests-list-dark.png deleted file mode 100644 index 9dd2913..0000000 Binary files a/assets/flaky-tests-list-dark.png and /dev/null differ diff --git a/assets/flaky-tests-list-light.png b/assets/flaky-tests-list-light.png deleted file mode 100644 index 2556309..0000000 Binary files a/assets/flaky-tests-list-light.png and /dev/null differ diff --git a/assets/flaky-tests-overview-table-v2-dark.png b/assets/flaky-tests-overview-table-v2-dark.png deleted file mode 100644 index 31e8ed1..0000000 Binary files a/assets/flaky-tests-overview-table-v2-dark.png and /dev/null differ diff --git a/assets/flaky-tests-overview-table-v2-light.png b/assets/flaky-tests-overview-table-v2-light.png deleted file mode 100644 index 9624baa..0000000 Binary files a/assets/flaky-tests-overview-table-v2-light.png and /dev/null differ diff --git a/assets/gemini.png b/assets/gemini.png deleted file mode 100644 index 239ea6a..0000000 Binary files a/assets/gemini.png and /dev/null differ diff --git a/assets/github-actions-secrets b/assets/github-actions-secrets deleted file mode 100644 index 23ec849..0000000 Binary files a/assets/github-actions-secrets and /dev/null differ diff --git a/assets/github-comment-dark.png b/assets/github-comment-dark.png deleted file mode 100644 index ca9a7eb..0000000 Binary files a/assets/github-comment-dark.png and /dev/null differ diff --git a/assets/github-comment-dark_(1).png b/assets/github-comment-dark_(1).png deleted file mode 100644 index d7e9113..0000000 Binary files a/assets/github-comment-dark_(1).png and /dev/null differ diff --git a/assets/github-comment-light.png b/assets/github-comment-light.png deleted file mode 100644 index 5bbf080..0000000 Binary files a/assets/github-comment-light.png and /dev/null differ diff --git a/assets/github-comment-light_(1).png b/assets/github-comment-light_(1).png deleted file mode 100644 index 6f11ddd..0000000 Binary files a/assets/github-comment-light_(1).png and /dev/null differ diff --git a/assets/github.png b/assets/github.png deleted file mode 100644 index 39bf172..0000000 Binary files a/assets/github.png and /dev/null differ diff --git a/assets/github_(1).png b/assets/github_(1).png deleted file mode 100644 index 5e5e35d..0000000 Binary files a/assets/github_(1).png and /dev/null differ diff --git a/assets/github_copilot.png b/assets/github_copilot.png deleted file mode 100644 index 65e87e6..0000000 Binary files a/assets/github_copilot.png and /dev/null differ diff --git a/assets/gitlab-logo.png b/assets/gitlab-logo.png deleted file mode 100644 index 767898b..0000000 Binary files a/assets/gitlab-logo.png and /dev/null differ diff --git a/assets/gitlab.png b/assets/gitlab.png deleted file mode 100644 index 159f50c..0000000 Binary files a/assets/gitlab.png and /dev/null differ diff --git a/assets/gitlab_(1).png b/assets/gitlab_(1).png deleted file mode 100644 index 441b51e..0000000 Binary files a/assets/gitlab_(1).png and /dev/null differ diff --git a/assets/gitleaks.gif b/assets/gitleaks.gif deleted file mode 100644 index 7946ce5..0000000 Binary files a/assets/gitleaks.gif and /dev/null differ diff --git a/assets/go-junit-report-logo.png b/assets/go-junit-report-logo.png deleted file mode 100644 index 2197b24..0000000 Binary files a/assets/go-junit-report-logo.png and /dev/null differ diff --git a/assets/go-junit-report.png b/assets/go-junit-report.png deleted file mode 100644 index e3c2d18..0000000 Binary files a/assets/go-junit-report.png and /dev/null differ diff --git a/assets/google-cloud-build.png b/assets/google-cloud-build.png deleted file mode 100644 index 5077f65..0000000 Binary files a/assets/google-cloud-build.png and /dev/null differ diff --git a/assets/googletest.png b/assets/googletest.png deleted file mode 100644 index 62c643f..0000000 Binary files a/assets/googletest.png and /dev/null differ diff --git a/assets/gotestsum.png b/assets/gotestsum.png deleted file mode 100644 index 18e9ebe..0000000 Binary files a/assets/gotestsum.png and /dev/null differ diff --git a/assets/gradle.png b/assets/gradle.png deleted file mode 100644 index 9aef59e..0000000 Binary files a/assets/gradle.png and /dev/null differ diff --git a/assets/idQDcTgM6U_1761333212059.png b/assets/idQDcTgM6U_1761333212059.png deleted file mode 100644 index 3c291c3..0000000 Binary files a/assets/idQDcTgM6U_1761333212059.png and /dev/null differ diff --git a/assets/image-2.png b/assets/image-2.png deleted file mode 100644 index b41f129..0000000 Binary files a/assets/image-2.png and /dev/null differ diff --git a/assets/image-3.png b/assets/image-3.png deleted file mode 100644 index 3bd2180..0000000 Binary files a/assets/image-3.png and /dev/null differ diff --git a/assets/image-4.png b/assets/image-4.png deleted file mode 100644 index 95c61d5..0000000 Binary files a/assets/image-4.png and /dev/null differ diff --git a/assets/image-5.png b/assets/image-5.png deleted file mode 100644 index 31d14d3..0000000 Binary files a/assets/image-5.png and /dev/null differ diff --git a/assets/image.png b/assets/image.png deleted file mode 100644 index 6e6e757..0000000 Binary files a/assets/image.png and /dev/null differ diff --git a/assets/image_(1).png b/assets/image_(1).png deleted file mode 100644 index 766dde7..0000000 Binary files a/assets/image_(1).png and /dev/null differ diff --git a/assets/image_(1)_(1).png b/assets/image_(1)_(1).png deleted file mode 100644 index 26ed4e5..0000000 Binary files a/assets/image_(1)_(1).png and /dev/null differ diff --git a/assets/image_(1)_(1)_(1).png b/assets/image_(1)_(1)_(1).png deleted file mode 100644 index 2520759..0000000 Binary files a/assets/image_(1)_(1)_(1).png and /dev/null differ diff --git a/assets/image_(1)_(1)_(1)_(1).png b/assets/image_(1)_(1)_(1)_(1).png deleted file mode 100644 index b9f2ff5..0000000 Binary files a/assets/image_(1)_(1)_(1)_(1).png and /dev/null differ diff --git a/assets/image_(1)_(1)_(1)_(1)_(1).png b/assets/image_(1)_(1)_(1)_(1)_(1).png deleted file mode 100644 index 1dad00c..0000000 Binary files a/assets/image_(1)_(1)_(1)_(1)_(1).png and /dev/null differ diff --git a/assets/image_(10).png b/assets/image_(10).png deleted file mode 100644 index 300b6b5..0000000 Binary files a/assets/image_(10).png and /dev/null differ diff --git a/assets/image_(11).png b/assets/image_(11).png deleted file mode 100644 index 55b3f47..0000000 Binary files a/assets/image_(11).png and /dev/null differ diff --git a/assets/image_(12).png b/assets/image_(12).png deleted file mode 100644 index 2cbaec3..0000000 Binary files a/assets/image_(12).png and /dev/null differ diff --git a/assets/image_(13).png b/assets/image_(13).png deleted file mode 100644 index c4e88ab..0000000 Binary files a/assets/image_(13).png and /dev/null differ diff --git a/assets/image_(14).png b/assets/image_(14).png deleted file mode 100644 index 57ae7fd..0000000 Binary files a/assets/image_(14).png and /dev/null differ diff --git a/assets/image_(15).png b/assets/image_(15).png deleted file mode 100644 index 16b95a8..0000000 Binary files a/assets/image_(15).png and /dev/null differ diff --git a/assets/image_(16).png b/assets/image_(16).png deleted file mode 100644 index dbd8b50..0000000 Binary files a/assets/image_(16).png and /dev/null differ diff --git a/assets/image_(17).png b/assets/image_(17).png deleted file mode 100644 index 6040926..0000000 Binary files a/assets/image_(17).png and /dev/null differ diff --git a/assets/image_(18).png b/assets/image_(18).png deleted file mode 100644 index 6dac4b8..0000000 Binary files a/assets/image_(18).png and /dev/null differ diff --git a/assets/image_(19).png b/assets/image_(19).png deleted file mode 100644 index fb5c9d6..0000000 Binary files a/assets/image_(19).png and /dev/null differ diff --git a/assets/image_(2).png b/assets/image_(2).png deleted file mode 100644 index b44dd3e..0000000 Binary files a/assets/image_(2).png and /dev/null differ diff --git a/assets/image_(2)_(1).png b/assets/image_(2)_(1).png deleted file mode 100644 index a0cbb0d..0000000 Binary files a/assets/image_(2)_(1).png and /dev/null differ diff --git a/assets/image_(2)_(1)_(1).png b/assets/image_(2)_(1)_(1).png deleted file mode 100644 index a0cbb0d..0000000 Binary files a/assets/image_(2)_(1)_(1).png and /dev/null differ diff --git a/assets/image_(2)_(2).png b/assets/image_(2)_(2).png deleted file mode 100644 index 070db44..0000000 Binary files a/assets/image_(2)_(2).png and /dev/null differ diff --git a/assets/image_(20).png b/assets/image_(20).png deleted file mode 100644 index 9e8061f..0000000 Binary files a/assets/image_(20).png and /dev/null differ diff --git a/assets/image_(21).png b/assets/image_(21).png deleted file mode 100644 index b9f2ff5..0000000 Binary files a/assets/image_(21).png and /dev/null differ diff --git a/assets/image_(22).png b/assets/image_(22).png deleted file mode 100644 index 1dad00c..0000000 Binary files a/assets/image_(22).png and /dev/null differ diff --git a/assets/image_(23).png b/assets/image_(23).png deleted file mode 100644 index 070db44..0000000 Binary files a/assets/image_(23).png and /dev/null differ diff --git a/assets/image_(24).png b/assets/image_(24).png deleted file mode 100644 index e1bdbe4..0000000 Binary files a/assets/image_(24).png and /dev/null differ diff --git a/assets/image_(25).png b/assets/image_(25).png deleted file mode 100644 index ad6b257..0000000 Binary files a/assets/image_(25).png and /dev/null differ diff --git a/assets/image_(26).png b/assets/image_(26).png deleted file mode 100644 index fbad772..0000000 Binary files a/assets/image_(26).png and /dev/null differ diff --git a/assets/image_(27).png b/assets/image_(27).png deleted file mode 100644 index 87bf998..0000000 Binary files a/assets/image_(27).png and /dev/null differ diff --git a/assets/image_(28).png b/assets/image_(28).png deleted file mode 100644 index 5e20a26..0000000 Binary files a/assets/image_(28).png and /dev/null differ diff --git a/assets/image_(29).png b/assets/image_(29).png deleted file mode 100644 index b20755f..0000000 Binary files a/assets/image_(29).png and /dev/null differ diff --git a/assets/image_(3).png b/assets/image_(3).png deleted file mode 100644 index 26ed4e5..0000000 Binary files a/assets/image_(3).png and /dev/null differ diff --git a/assets/image_(3)_(1).png b/assets/image_(3)_(1).png deleted file mode 100644 index beb7392..0000000 Binary files a/assets/image_(3)_(1).png and /dev/null differ diff --git a/assets/image_(3)_(1)_(1).png b/assets/image_(3)_(1)_(1).png deleted file mode 100644 index beb7392..0000000 Binary files a/assets/image_(3)_(1)_(1).png and /dev/null differ diff --git a/assets/image_(30).png b/assets/image_(30).png deleted file mode 100644 index cafd52e..0000000 Binary files a/assets/image_(30).png and /dev/null differ diff --git a/assets/image_(31).png b/assets/image_(31).png deleted file mode 100644 index 70de42f..0000000 Binary files a/assets/image_(31).png and /dev/null differ diff --git a/assets/image_(32).png b/assets/image_(32).png deleted file mode 100644 index 3e702ed..0000000 Binary files a/assets/image_(32).png and /dev/null differ diff --git a/assets/image_(33).png b/assets/image_(33).png deleted file mode 100644 index 9b059e2..0000000 Binary files a/assets/image_(33).png and /dev/null differ diff --git a/assets/image_(34).png b/assets/image_(34).png deleted file mode 100644 index f465dfc..0000000 Binary files a/assets/image_(34).png and /dev/null differ diff --git a/assets/image_(35).png b/assets/image_(35).png deleted file mode 100644 index 51a253d..0000000 Binary files a/assets/image_(35).png and /dev/null differ diff --git a/assets/image_(36).png b/assets/image_(36).png deleted file mode 100644 index 7bfdd27..0000000 Binary files a/assets/image_(36).png and /dev/null differ diff --git a/assets/image_(37).png b/assets/image_(37).png deleted file mode 100644 index 4599f0e..0000000 Binary files a/assets/image_(37).png and /dev/null differ diff --git a/assets/image_(38).png b/assets/image_(38).png deleted file mode 100644 index b642833..0000000 Binary files a/assets/image_(38).png and /dev/null differ diff --git a/assets/image_(39).png b/assets/image_(39).png deleted file mode 100644 index 2749ed4..0000000 Binary files a/assets/image_(39).png and /dev/null differ diff --git a/assets/image_(4).png b/assets/image_(4).png deleted file mode 100644 index 3371635..0000000 Binary files a/assets/image_(4).png and /dev/null differ diff --git a/assets/image_(40).png b/assets/image_(40).png deleted file mode 100644 index 4fc4471..0000000 Binary files a/assets/image_(40).png and /dev/null differ diff --git a/assets/image_(41).png b/assets/image_(41).png deleted file mode 100644 index b2c1f40..0000000 Binary files a/assets/image_(41).png and /dev/null differ diff --git a/assets/image_(42).png b/assets/image_(42).png deleted file mode 100644 index 90c5a89..0000000 Binary files a/assets/image_(42).png and /dev/null differ diff --git a/assets/image_(43).png b/assets/image_(43).png deleted file mode 100644 index 4b93564..0000000 Binary files a/assets/image_(43).png and /dev/null differ diff --git a/assets/image_(44).png b/assets/image_(44).png deleted file mode 100644 index adf748f..0000000 Binary files a/assets/image_(44).png and /dev/null differ diff --git a/assets/image_(45).png b/assets/image_(45).png deleted file mode 100644 index 44bd25f..0000000 Binary files a/assets/image_(45).png and /dev/null differ diff --git a/assets/image_(46).png b/assets/image_(46).png deleted file mode 100644 index fa74940..0000000 Binary files a/assets/image_(46).png and /dev/null differ diff --git a/assets/image_(47).png b/assets/image_(47).png deleted file mode 100644 index 545f2dd..0000000 Binary files a/assets/image_(47).png and /dev/null differ diff --git a/assets/image_(48).png b/assets/image_(48).png deleted file mode 100644 index 4e63917..0000000 Binary files a/assets/image_(48).png and /dev/null differ diff --git a/assets/image_(49).png b/assets/image_(49).png deleted file mode 100644 index 441035b..0000000 Binary files a/assets/image_(49).png and /dev/null differ diff --git a/assets/image_(5).png b/assets/image_(5).png deleted file mode 100644 index c9b6e57..0000000 Binary files a/assets/image_(5).png and /dev/null differ diff --git a/assets/image_(50).png b/assets/image_(50).png deleted file mode 100644 index a1eb06e..0000000 Binary files a/assets/image_(50).png and /dev/null differ diff --git a/assets/image_(51).png b/assets/image_(51).png deleted file mode 100644 index 0fcfcb1..0000000 Binary files a/assets/image_(51).png and /dev/null differ diff --git a/assets/image_(6).png b/assets/image_(6).png deleted file mode 100644 index 2520759..0000000 Binary files a/assets/image_(6).png and /dev/null differ diff --git a/assets/image_(7).png b/assets/image_(7).png deleted file mode 100644 index 51fdb13..0000000 Binary files a/assets/image_(7).png and /dev/null differ diff --git a/assets/image_(8).png b/assets/image_(8).png deleted file mode 100644 index b2ac6b9..0000000 Binary files a/assets/image_(8).png and /dev/null differ diff --git a/assets/image_(9).png b/assets/image_(9).png deleted file mode 100644 index 6a896ce..0000000 Binary files a/assets/image_(9).png and /dev/null differ diff --git a/assets/images.png b/assets/images.png deleted file mode 100644 index b344667..0000000 Binary files a/assets/images.png and /dev/null differ diff --git a/assets/investigate-flaky-tests-setting.png b/assets/investigate-flaky-tests-setting.png deleted file mode 100644 index dd283b6..0000000 Binary files a/assets/investigate-flaky-tests-setting.png and /dev/null differ diff --git a/assets/investigate-flaky-tests.png b/assets/investigate-flaky-tests.png deleted file mode 100644 index b3baac4..0000000 Binary files a/assets/investigate-flaky-tests.png and /dev/null differ diff --git a/assets/isort.gif b/assets/isort.gif deleted file mode 100644 index dfdb096..0000000 Binary files a/assets/isort.gif and /dev/null differ diff --git a/assets/jasmine-logo.png b/assets/jasmine-logo.png deleted file mode 100644 index fd93d4b..0000000 Binary files a/assets/jasmine-logo.png and /dev/null differ diff --git a/assets/jasmine.png b/assets/jasmine.png deleted file mode 100644 index d41c90e..0000000 Binary files a/assets/jasmine.png and /dev/null differ diff --git a/assets/jenkins-padded.png b/assets/jenkins-padded.png deleted file mode 100644 index ab6d31f..0000000 Binary files a/assets/jenkins-padded.png and /dev/null differ diff --git a/assets/jenkins.png b/assets/jenkins.png deleted file mode 100644 index f23f9fe..0000000 Binary files a/assets/jenkins.png and /dev/null differ diff --git a/assets/jenkins_(1).png b/assets/jenkins_(1).png deleted file mode 100644 index c0a3cee..0000000 Binary files a/assets/jenkins_(1).png and /dev/null differ diff --git a/assets/jest.png b/assets/jest.png deleted file mode 100644 index 4839928..0000000 Binary files a/assets/jest.png and /dev/null differ diff --git a/assets/jira-connect-form-dark.png b/assets/jira-connect-form-dark.png deleted file mode 100644 index 26e80bd..0000000 Binary files a/assets/jira-connect-form-dark.png and /dev/null differ diff --git a/assets/jira-connect-form-light.png b/assets/jira-connect-form-light.png deleted file mode 100644 index 596f139..0000000 Binary files a/assets/jira-connect-form-light.png and /dev/null differ diff --git a/assets/jira-ticket-creation-dark.png b/assets/jira-ticket-creation-dark.png deleted file mode 100644 index 0adeb36..0000000 Binary files a/assets/jira-ticket-creation-dark.png and /dev/null differ diff --git a/assets/jira-ticket-creation-light.png b/assets/jira-ticket-creation-light.png deleted file mode 100644 index 4293699..0000000 Binary files a/assets/jira-ticket-creation-light.png and /dev/null differ diff --git a/assets/jira.png b/assets/jira.png deleted file mode 100644 index 26107c8..0000000 Binary files a/assets/jira.png and /dev/null differ diff --git a/assets/job-details-view.png b/assets/job-details-view.png deleted file mode 100644 index 5b34a25..0000000 Binary files a/assets/job-details-view.png and /dev/null differ diff --git a/assets/karma.png b/assets/karma.png deleted file mode 100644 index ac3cf50..0000000 Binary files a/assets/karma.png and /dev/null differ diff --git a/assets/key-metrics-dark.png b/assets/key-metrics-dark.png deleted file mode 100644 index 1a5e04c..0000000 Binary files a/assets/key-metrics-dark.png and /dev/null differ diff --git a/assets/key-metrics-light.png b/assets/key-metrics-light.png deleted file mode 100644 index e7e1d4c..0000000 Binary files a/assets/key-metrics-light.png and /dev/null differ diff --git a/assets/kotest.png b/assets/kotest.png deleted file mode 100644 index 1efc492..0000000 Binary files a/assets/kotest.png and /dev/null differ diff --git a/assets/labelled-flaky-dark.png b/assets/labelled-flaky-dark.png deleted file mode 100644 index 8cd4649..0000000 Binary files a/assets/labelled-flaky-dark.png and /dev/null differ diff --git a/assets/labelled-flaky-light.png b/assets/labelled-flaky-light.png deleted file mode 100644 index f3d6f63..0000000 Binary files a/assets/labelled-flaky-light.png and /dev/null differ diff --git a/assets/labelled_as_flaky.png b/assets/labelled_as_flaky.png deleted file mode 100644 index 2819924..0000000 Binary files a/assets/labelled_as_flaky.png and /dev/null differ diff --git a/assets/labelled_as_flaky_(1).png b/assets/labelled_as_flaky_(1).png deleted file mode 100644 index 57d03dc..0000000 Binary files a/assets/labelled_as_flaky_(1).png and /dev/null differ diff --git a/assets/linear-integration-dark.png b/assets/linear-integration-dark.png deleted file mode 100644 index 41813a6..0000000 Binary files a/assets/linear-integration-dark.png and /dev/null differ diff --git a/assets/linear-integration-light.png b/assets/linear-integration-light.png deleted file mode 100644 index 5b4191f..0000000 Binary files a/assets/linear-integration-light.png and /dev/null differ diff --git a/assets/linear-ticket-creation-dark.png b/assets/linear-ticket-creation-dark.png deleted file mode 100644 index 38305f6..0000000 Binary files a/assets/linear-ticket-creation-dark.png and /dev/null differ diff --git a/assets/linear-ticket-creation-light.png b/assets/linear-ticket-creation-light.png deleted file mode 100644 index 438e217..0000000 Binary files a/assets/linear-ticket-creation-light.png and /dev/null differ diff --git a/assets/linear-v2.png b/assets/linear-v2.png deleted file mode 100644 index 3a341cc..0000000 Binary files a/assets/linear-v2.png and /dev/null differ diff --git a/assets/linear.png b/assets/linear.png deleted file mode 100644 index bc889b3..0000000 Binary files a/assets/linear.png and /dev/null differ diff --git a/assets/logo-with-wordmark.svg b/assets/logo-with-wordmark.svg deleted file mode 100644 index fc7972f..0000000 --- a/assets/logo-with-wordmark.svg +++ /dev/null @@ -1,32 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/assets/manual-test-mark.png b/assets/manual-test-mark.png deleted file mode 100644 index bb1ed7d..0000000 Binary files a/assets/manual-test-mark.png and /dev/null differ diff --git a/assets/manual-ticket-creation-dark.png b/assets/manual-ticket-creation-dark.png deleted file mode 100644 index ad25dd6..0000000 Binary files a/assets/manual-ticket-creation-dark.png and /dev/null differ diff --git a/assets/manual-ticket-creation-light.png b/assets/manual-ticket-creation-light.png deleted file mode 100644 index e44ff93..0000000 Binary files a/assets/manual-ticket-creation-light.png and /dev/null differ diff --git a/assets/maven.png b/assets/maven.png deleted file mode 100644 index 3c08e4f..0000000 Binary files a/assets/maven.png and /dev/null differ diff --git a/assets/merge b/assets/merge deleted file mode 100644 index bbccadb..0000000 --- a/assets/merge +++ /dev/null @@ -1,237 +0,0 @@ -Overview
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Trunk Merge is a service that enables your repository to adhere to The “Not Rocket Science Rule Of Software Engineering”: Automatically maintain a repository of code that always passes all the tests.

-

How It Works

-

Trunk Merge adds an additional test pass before merging pull requests. For example, a typical developer workflow for authoring a feature and merging the code to a repository might look like this:

-
    -
  1. Create a feature branch from the main branch
  2. -
  3. Author a Change
  4. -
  5. Open a Pull Request
  6. -
  7. Tests are Run
  8. -
  9. Code Review
  10. -
  11. When tests & code review pass, Author merges request
  12. -
-

In a repository with many contributors, the state of the main branch will have advanced significantly after step 1. Because of this, the results of the tests run in step 4 are out of date. Merge solves for this by adding another test pass to ensure no broken code lands on your main branch. A developer workflow with Merge integrated might look like this:

-
    -
  1. Create a feature branch from the main branch
  2. -
  3. Author a Change
  4. -
  5. Open a Pull Request
  6. -
  7. Tests are Run
  8. -
  9. Code Review
  10. -
  11. When tests & code review pass, Author submits pull request to Merge
  12. -
  13. Tests are run on a branch consisting of head of main + the pull request changes
  14. -
  15. If the tests pass, the pull request is merged automatically
  16. -
-

Demo

-

Watch this 5 minute demo to see how it works in practice

-

What’s Next

Take 5 minutes and set up Merge in your repository

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\ No newline at end of file diff --git a/assets/merge-add-repo.png b/assets/merge-add-repo.png deleted file mode 100644 index 12f6b05..0000000 Binary files a/assets/merge-add-repo.png and /dev/null differ diff --git a/assets/merge-add-repo_(1).png b/assets/merge-add-repo_(1).png deleted file mode 100644 index 450a53a..0000000 Binary files a/assets/merge-add-repo_(1).png and /dev/null differ diff --git a/assets/merge-anti-flake-protection.png b/assets/merge-anti-flake-protection.png deleted file mode 100644 index 92d085e..0000000 Binary files a/assets/merge-anti-flake-protection.png and /dev/null differ diff --git a/assets/merge-details.png b/assets/merge-details.png deleted file mode 100644 index bb417af..0000000 Binary files a/assets/merge-details.png and /dev/null differ diff --git a/assets/merge-details_(1).png b/assets/merge-details_(1).png deleted file mode 100644 index 627bff6..0000000 Binary files a/assets/merge-details_(1).png and /dev/null differ diff --git a/assets/merge-failure-retry.png b/assets/merge-failure-retry.png deleted file mode 100644 index 477e607..0000000 Binary files a/assets/merge-failure-retry.png and /dev/null differ diff --git a/assets/merge-failures.png b/assets/merge-failures.png deleted file mode 100644 index a3bbf12..0000000 Binary files a/assets/merge-failures.png and /dev/null differ diff --git a/assets/merge-github-classic-branch-rules.png b/assets/merge-github-classic-branch-rules.png deleted file mode 100644 index c35af90..0000000 Binary files a/assets/merge-github-classic-branch-rules.png and /dev/null differ diff --git a/assets/merge-github-comment.png b/assets/merge-github-comment.png deleted file mode 100644 index babb872..0000000 Binary files a/assets/merge-github-comment.png and /dev/null differ diff --git a/assets/merge-github-comment_(1).png b/assets/merge-github-comment_(1).png deleted file mode 100644 index cbbd004..0000000 Binary files a/assets/merge-github-comment_(1).png and /dev/null differ diff --git a/assets/merge-github-ruleset-prs.png b/assets/merge-github-ruleset-prs.png deleted file mode 100644 index 7f0d76d..0000000 Binary files a/assets/merge-github-ruleset-prs.png and /dev/null differ diff --git a/assets/merge-github-ruleset-push.png b/assets/merge-github-ruleset-push.png deleted file mode 100644 index 5050b8a..0000000 Binary files a/assets/merge-github-ruleset-push.png and /dev/null differ diff --git a/assets/merge-graph.png b/assets/merge-graph.png deleted file mode 100644 index 9336f33..0000000 Binary files a/assets/merge-graph.png and /dev/null differ diff --git a/assets/merge-health.png b/assets/merge-health.png deleted file mode 100644 index 156152b..0000000 Binary files a/assets/merge-health.png and /dev/null differ diff --git a/assets/merge-pending-failure-depth-setting.png b/assets/merge-pending-failure-depth-setting.png deleted file mode 100644 index 39218a3..0000000 Binary files a/assets/merge-pending-failure-depth-setting.png and /dev/null differ diff --git a/assets/merge-pr-details-action.png b/assets/merge-pr-details-action.png deleted file mode 100644 index b520548..0000000 Binary files a/assets/merge-pr-details-action.png and /dev/null differ diff --git a/assets/merge-pr-details-action_(1).png b/assets/merge-pr-details-action_(1).png deleted file mode 100644 index b520548..0000000 Binary files a/assets/merge-pr-details-action_(1).png and /dev/null differ diff --git a/assets/merge-queue-screen.png b/assets/merge-queue-screen.png deleted file mode 100644 index c32a48b..0000000 Binary files a/assets/merge-queue-screen.png and /dev/null differ diff --git a/assets/merge-settings-tab.png b/assets/merge-settings-tab.png deleted file mode 100644 index c4b8c71..0000000 Binary files a/assets/merge-settings-tab.png and /dev/null differ diff --git a/assets/merge-slack-connect.png b/assets/merge-slack-connect.png deleted file mode 100644 index 8ed1c60..0000000 Binary files a/assets/merge-slack-connect.png and /dev/null differ diff --git a/assets/merge-slack-settings.png b/assets/merge-slack-settings.png deleted file mode 100644 index 404d5d9..0000000 Binary files a/assets/merge-slack-settings.png and /dev/null differ diff --git a/assets/merge-square.svg b/assets/merge-square.svg deleted file mode 100644 index 9dd8ed6..0000000 --- a/assets/merge-square.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/mergeapi.json b/assets/mergeapi.json deleted file mode 100644 index 5720af0..0000000 --- a/assets/mergeapi.json +++ /dev/null @@ -1,975 +0,0 @@ -{ - "openapi": "3.0.3", - "info": { - "title": "Trunk Merge API", - "version": "0.0.1" - }, - "servers": [ - { - "url": "https://api.trunk.io/v1", - "description": "Server description" - } - ], - "components": { - "securitySchemes": { - "x-api-token": { - "name": "x-api-token", - "type": "apiKey", - "in": "header", - "description": "Trunk API token - see https://docs.trunk.io/apis#authentication" - } - }, - "schemas": { - "repo": { - "type": "object", - "properties": { - "host": { - "type": "string", - "example": "github.com", - "description": "The host of the repository. Currently only supports 'github.com'", - "enum": [ - "github.com" - ] - }, - "owner": { - "type": "string", - "example": "trunk-io", - "description": "The owner of the repository" - }, - "name": { - "type": "string", - "example": "trunk", - "description": "The name of the repository" - } - }, - "example": { - "host": "github.com", - "owner": "trunk-io", - "name": "trunk" - } - }, - "pr": { - "type": "object", - "properties": { - "number": { - "type": "number", - "example": 1, - "description": "The PR number" - } - }, - "example": { - "number": 1 - } - }, - "prWithSha": { - "type": "object", - "properties": { - "number": { - "type": "number", - "example": 1, - "description": "The PR number" - }, - "sha": { - "type": "string", - "example": "1234567890abcdef", - "description": "The SHA of the PR" - } - }, - "example": { - "number": 1, - "sha": "1234567890abcdef" - } - }, - "targetBranch": { - "type": "string", - "example": "main", - "description": "The branch the merge queue will be merging PRs into" - }, - "prReadiness": { - "type": "object", - "properties": { - "hasImpactedTargets": { - "type": "boolean", - "example": false, - "description": "Whether the PR has had impacted targets uploaded for it" - }, - "requiresImpactedTargets": { - "type": "boolean", - "example": false, - "description": "Whether the PR requires impacted targets to be uploaded for it before moving into the queue. This is true if the queue is in parallel mode" - }, - "doesBaseBranchMatch": { - "type": "boolean", - "example": true, - "description": "Whether the base branch of the PR matches the target branch of the queue" - }, - "gitHubMergeability": { - "type": "string", - "enum": [ - "MERGEABLE", - "NOT_MERGEABLE", - "IN_PROGRESS", - "UNSPECIFIED" - ], - "example": "MERGEABLE", - "description": "The mergeability status of the PR on GitHub" - } - }, - "example": { - "hasImpactedTargets": false, - "requiresImpactedTargets": false, - "doesBaseBranchMatch": true, - "gitHubMergeability": "MERGEABLE" - } - }, - "pullRequest": { - "type": "object", - "properties": { - "number": { - "type": "number", - "example": 1, - "description": "The PR number" - }, - "title": { - "type": "string", - "example": "Add new feature", - "description": "The title of the PR" - }, - "sha": { - "type": "string", - "example": "1234567890abcdef", - "description": "The head SHA of the PR" - }, - "baseBranch": { - "type": "string", - "example": "main", - "description": "The base branch of the PR" - }, - "author": { - "type": "string", - "example": "dependabot", - "description": "The author of the PR" - } - }, - "example": { - "number": 1, - "title": "Add new feature", - "sha": "1234567890abcdef", - "baseBranch": "main", - "author": "dependabot" - } - }, - "mergeItem": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "The ID of the merge item in the queue" - }, - "state": { - "type": "string", - "enum": [ - "NOT_READY", - "PENDING", - "TESTING", - "TESTS_PASSED", - "PENDING_FAILURE", - "FAILED", - "CANCELLED", - "MERGED" - ], - "description": "The state of the PR in the queue - see https://docs.trunk.io/merge/reference#pr-states" - }, - "stateChangedAt": { - "type": "string", - "format": "date-time", - "description": "The time the state of the PR changed" - }, - "priorityValue": { - "type": "number", - "example": 100, - "description": "The priority value of the PR in the queue - ranges from 0-255 with 0 being the highest" - }, - "priorityName": { - "type": "string", - "enum": [ - "low", - "medium", - "high", - "urgent" - ], - "example": "medium", - "description": "The priority name of the PR in the queue. Defaults to medium" - }, - "usedDefaultPriorityName": { - "type": "string", - "enum": [ - "low", - "medium", - "high", - "urgent" - ], - "example": "medium", - "description": "The default priority given to the PR if no explicit priority was specified" - }, - "skipTheLine": { - "type": "boolean", - "example": false, - "description": "Whether the PR will skip the line of the queue" - }, - "isCurrentlySubmittedToQueue": { - "type": "boolean", - "description": "Whether the PR is currently submitted to the queue." - }, - "readiness": { - "$ref": "#/components/schemas/prReadiness" - }, - "prNumber": { - "type": "number", - "example": 1, - "description": "The PR number" - }, - "prTitle": { - "type": "string", - "example": "Add new feature", - "description": "The title of the PR" - }, - "prSha": { - "type": "string", - "example": "1234567890abcdef", - "description": "The head SHA of the PR" - }, - "prBaseBranch": { - "type": "string", - "example": "main", - "description": "The base branch of the PR" - }, - "prAuthor": { - "type": "string", - "example": "dependabot", - "description": "The author of the PR" - } - }, - "example": { - "id": "1234567890abcdef", - "state": "PENDING", - "stateChangedAt": "2021-01-01T00:00:00Z", - "priorityValue": 100, - "priorityName": "medium", - "usedDefaultPriorityName": "medium", - "skipTheLine": false, - "isCurrentlySubmittedToQueue": true, - "readiness": { - "hasImpactedTargets": false, - "requiresImpactedTargets": false, - "doesBaseBranchMatch": true, - "gitHubMergeability": "MERGEABLE" - }, - "prNumber": 1, - "prTitle": "Add new feature", - "prSha": "1234567890abcdef", - "prBaseBranch": "main", - "prAuthor": "dependabot" - } - }, - "mergeItemWithoutReadiness": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "The ID of the merge item in the queue" - }, - "state": { - "type": "string", - "enum": [ - "NOT_READY", - "PENDING", - "TESTING", - "TESTS_PASSED", - "PENDING_FAILURE", - "FAILED", - "CANCELLED", - "MERGED" - ], - "description": "The state of the PR in the queue - see https://docs.trunk.io/merge/reference#pr-states" - }, - "stateChangedAt": { - "type": "string", - "format": "date-time", - "description": "The time the state of the PR changed" - }, - "priorityValue": { - "type": "number", - "example": 100, - "description": "The priority value of the PR in the queue - ranges from 0-255 with 0 being the highest" - }, - "priorityName": { - "type": "string", - "enum": [ - "low", - "medium", - "high", - "urgent" - ], - "example": "medium", - "description": "The priority name of the PR in the queue. Defaults to medium" - }, - "usedDefaultPriorityName": { - "type": "string", - "enum": [ - "low", - "medium", - "high", - "urgent" - ], - "example": "medium", - "description": "The default priority given to the PR if no explicit priority was specified" - }, - "skipTheLine": { - "type": "boolean", - "example": false, - "description": "Whether the PR will skip the line of the queue" - }, - "prNumber": { - "type": "number", - "example": 1, - "description": "The PR number" - }, - "prTitle": { - "type": "string", - "example": "Add new feature", - "description": "The title of the PR" - }, - "prSha": { - "type": "string", - "example": "1234567890abcdef", - "description": "The head SHA of the PR" - }, - "prBaseBranch": { - "type": "string", - "example": "main", - "description": "The base branch of the PR" - }, - "prAuthor": { - "type": "string", - "example": "dependabot", - "description": "The author of the PR" - } - }, - "example": { - "id": "1234567890abcdef", - "state": "PENDING", - "stateChangedAt": "2021-01-01T00:00:00Z", - "priorityValue": 100, - "priorityName": "medium", - "usedDefaultPriorityName": "medium", - "skipTheLine": false, - "prNumber": 1, - "prTitle": "Add new feature", - "prSha": "1234567890abcdef", - "prBaseBranch": "main", - "prAuthor": "dependabot" - } - }, - "queue": { - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "RUNNING", - "PAUSED", - "DRAINING", - "SWITCHING_MODES" - ], - "example": "RUNNING", - "description": "The current state of the queue - see https://docs.trunk.io/merge-queue/set-up-trunk-merge/advanced-settings#merge-queue-states" - }, - "branch": { - "type": "string", - "example": "main", - "description": "The branch the merge queue is merging PRs into" - }, - "concurrency": { - "type": "number", - "example": 1, - "description": "The number of PRs that can be tested at the same time" - }, - "testingTimeoutMins": { - "type": "number", - "example": 60, - "description": "The number of minutes a PR can be testing before it is marked as PENDING_FAILURE" - }, - "mode": { - "type": "string", - "enum": [ - "SINGLE", - "PARALLEL" - ], - "description": "The mode the queue is in - see https://docs.trunk.io/merge-queue/parallel-queues", - "example": "SINGLE" - }, - "canOptimisticallyMerge": { - "type": "boolean", - "example": true, - "description": "Whether the queue can optimistically merge PRs if one further down the queue passes tests" - }, - "pendingFailureDepth": { - "type": "number", - "example": 1, - "description": "The number of PRs that must complete testing behind a failed PR before marking the failed PR as FAILED" - }, - "batchingMode": { - "type": "string", - "enum": [ - "NONE", - "SPECULATIVE" - ], - "description": "The batching mode the queue is in. SPECULATIVE means batching is active. See https://docs.trunk.io/merge-queue/batching", - "example": "SPECULATIVE" - }, - "batchingMaxWaitTimeMins": { - "type": "number", - "example": 60, - "description": "The maximum time a PR can wait in the queue before being batched" - }, - "batchingMinSize": { - "type": "number", - "example": 1, - "description": "The minimum number of PRs that must be dependent on each other before they are put into a batch" - }, - "createPrsForTestingBranches": { - "type": "boolean", - "example": true, - "description": "Whether the queue will create PRs for testing branches as opposed to just making a branch" - }, - "enqueuedPullRequests": { - "type": "array", - "items": { - "$ref": "#/components/schemas/mergeItemWithoutReadiness" - } - } - }, - "example": { - "state": "RUNNING", - "branch": "main", - "concurrency": 1, - "testingTimeoutMins": 60, - "mode": "SINGLE", - "canOptimisticallyMerge": true, - "pendingFailureDepth": 1, - "batchingMode": "SPECULATIVE", - "batchingMaxWaitTimeMins": 60, - "batchingMinSize": 1, - "createPrsForTestingBranches": true, - "enqueuedPullRequests": [ - { - "id": "1234567890abcdef", - "state": "PENDING", - "stateChangedAt": "2021-01-01T00:00:00Z", - "priorityValue": 100, - "priorityName": "medium", - "usedDefaultPriorityName": "medium", - "skipTheLine": false, - "prNumber": 1, - "prTitle": "Add new feature", - "prSha": "1234567890abcdef", - "prBaseBranch": "main", - "prAuthor": "dependabot" - } - ] - } - }, - "priority": { - "oneOf": [ - { - "type": "string", - "enum": [ - "low", - "medium", - "high", - "urgent" - ], - "default": "medium", - "description": "The priority name to assign to the PR when it begins testing in the queue", - "example": "high" - }, - { - "type": "number", - "example": 1, - "description": "The priority number (0 - 255, 0 is the highest) to assign to the PR when it begins testing in the queue" - } - ] - } - } - }, - "paths": { - "/submitPullRequest": { - "post": { - "tags": [ - "Merge Items" - ], - "security": [ - { - "x-api-token": [] - } - ], - "summary": "Submit a pull request to be tested and merged", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "$ref": "#/components/schemas/repo" - }, - "pr": { - "$ref": "#/components/schemas/pr" - }, - "targetBranch": { - "$ref": "#/components/schemas/targetBranch" - }, - "priority": { - "$ref": "#/components/schemas/priority" - } - }, - "example": { - "repo": { - "host": "github.com", - "owner": "trunk-io", - "name": "trunk" - }, - "pr": { - "number": 1 - }, - "targetBranch": "main", - "priority": "medium" - } - } - } - } - }, - "responses": { - "200": { - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - } - } - }, - "/setImpactedTargets": { - "post": { - "tags": [ - "Impacted Targets" - ], - "security": [ - { - "x-api-token": [] - } - ], - "summary": "Set impacted targets", - "description": "Upload impacted targets for the PR and its current SHA. Used specifically when running the queue in Parallel mode", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "$ref": "#/components/schemas/repo" - }, - "pr": { - "$ref": "#/components/schemas/prWithSha" - }, - "targetBranch": { - "$ref": "#/components/schemas/targetBranch" - }, - "impactedTargets": { - "oneOf": [ - { - "type": "array", - "items": { - "type": "string", - "example": "services_backend", - "description": "Name of the target impacted by the changes in the pull request" - } - }, - { - "type": "string", - "enum": [ - "IMPACTS_ALL" - ], - "description": "Special value to indicate that all targets are impacted by the changes in the pull request" - } - ] - } - }, - "example": { - "repo": { - "host": "github.com", - "owner": "trunk-io", - "name": "trunk" - }, - "pr": { - "number": 1, - "sha": "1234567890abcdef" - }, - "targetBranch": "main", - "impactedTargets": [ - "services_backend", - "services_frontend" - ] - } - } - } - } - }, - "responses": { - "200": { - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - } - } - }, - "/queuePullRequest": { - "post": { - "tags": [ - "Merge Items" - ], - "security": [ - { - "x-api-token": [] - } - ], - "summary": "Submits a Pull Request to the queue", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "$ref": "#/components/schemas/repo" - }, - "pr": { - "$ref": "#/components/schemas/pr" - }, - "targetBranch": { - "$ref": "#/components/schemas/targetBranch" - }, - "priority": { - "$ref": "#/components/schemas/priority" - } - }, - "example": { - "repo": { - "host": "github.com", - "owner": "trunk-io", - "name": "trunk" - }, - "pr": { - "number": 1 - }, - "targetBranch": "main", - "priority": "medium" - } - } - } - } - }, - "responses": { - "200": { - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - } - } - }, - "/restartTestsOnPullRequest": { - "post": { - "tags": [ - "Merge Items" - ], - "security": [ - { - "x-api-token": [] - } - ], - "summary": "Restarts tests on a PR in the queue without moving its position or causing other PRs to be retested", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "$ref": "#/components/schemas/repo" - }, - "pr": { - "$ref": "#/components/schemas/pr" - }, - "targetBranch": { - "$ref": "#/components/schemas/targetBranch" - } - } - }, - "example": { - "repo": { - "host": "github.com", - "owner": "trunk-io", - "name": "trunk" - }, - "pr": { - "number": 1 - }, - "targetBranch": "main" - } - } - } - }, - "responses": { - "200": { - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - } - } - }, - "/cancelPullRequest": { - "post": { - "tags": [ - "Merge Items" - ], - "security": [ - { - "x-api-token": [] - } - ], - "summary": "Cancel a pull request already in the queue.", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "$ref": "#/components/schemas/repo" - }, - "pr": { - "$ref": "#/components/schemas/pr" - }, - "targetBranch": { - "$ref": "#/components/schemas/targetBranch" - } - } - }, - "example": { - "repo": { - "host": "github.com", - "owner": "trunk-io", - "name": "trunk" - }, - "pr": { - "number": 1 - }, - "targetBranch": "main" - } - } - } - }, - "responses": { - "200": { - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - } - } - }, - "/getSubmittedPullRequest": { - "post": { - "tags": [ - "Merge Items" - ], - "security": [ - { - "x-api-token": [] - } - ], - "summary": "Get information on a pull request that has been submitted to the queue", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "$ref": "#/components/schemas/repo" - }, - "pr": { - "$ref": "#/components/schemas/pr" - }, - "targetBranch": { - "$ref": "#/components/schemas/targetBranch" - }, - "priority": { - "$ref": "#/components/schemas/priority" - } - }, - "example": { - "repo": { - "host": "github.com", - "owner": "trunk-io", - "name": "trunk" - }, - "pr": { - "number": 1 - }, - "targetBranch": "main", - "priority": "medium" - } - } - } - } - }, - "responses": { - "200": { - "description": "Information on the pull request", - "content": { - "application/json": { - "schema": { - "type": "object", - "$ref": "#/components/schemas/mergeItem" - }, - "example": { - "id": "1234567890abcdef", - "state": "PENDING", - "stateChangedAt": "2021-01-01T00:00:00Z", - "priorityValue": 100, - "priorityName": "medium", - "usedDefaultPriorityName": "medium", - "skipTheLine": false, - "isCurrentlySubmittedToQueue": true, - "readiness": { - "hasImpactedTargets": false, - "requiresImpactedTargets": false, - "doesBaseBranchMatch": true, - "gitHubMergeability": "MERGEABLE" - }, - "prNumber": 1, - "prTitle": "Add new feature", - "prSha": "1234567890abcdef", - "prBaseBranch": "main", - "prAuthor": "dependabot" - } - } - } - }, - "401": { - "description": "Unauthorized" - }, - "404": { - "description": "Pull request with the provided number does not exist or has never been submitted to the specified queue before" - } - } - } - }, - "/getQueue": { - "post": { - "tags": [ - "Merge Queue" - ], - "security": [ - { - "x-api-token": [] - } - ], - "summary": "Get information on your merge queue and the PRs currently in it", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "$ref": "#/components/schemas/repo" - }, - "targetBranch": { - "$ref": "#/components/schemas/targetBranch" - } - } - }, - "example": { - "repo": { - "host": "github.com", - "owner": "trunk-io", - "name": "trunk" - }, - "targetBranch": "main" - } - } - } - }, - "responses": { - "200": { - "description": "Information on the pull request", - "content": { - "application/json": { - "schema": { - "type": "object", - "$ref": "#/components/schemas/queue" - }, - "example": { - "state": "RUNNING", - "branch": "main", - "concurrency": 1, - "testingTimeoutMins": 60, - "mode": "SINGLE", - "canOptimisticallyMerge": true, - "pendingFailureDepth": 1, - "batchingMode": "SPECULATIVE", - "batchingMaxWaitTimeMins": 60, - "batchingMinSize": 1, - "createPrsForTestingBranches": true, - "enqueuedPullRequests": [ - { - "id": "1234567890abcdef", - "state": "PENDING", - "stateChangedAt": "2021-01-01T00:00:00Z", - "priorityValue": 100, - "priorityName": "medium", - "usedDefaultPriorityName": "medium", - "skipTheLine": false, - "prNumber": 1, - "prTitle": "Add new feature", - "prSha": "1234567890abcdef", - "prBaseBranch": "main", - "prAuthor": "dependabot" - } - ] - } - } - } - }, - "401": { - "description": "Unauthorized" - }, - "404": { - "description": "Pull request with the provided number does not exist or has never been submitted to the specified queue before" - } - } - } - } - } -} \ No newline at end of file diff --git a/assets/microsoft_teams.png b/assets/microsoft_teams.png deleted file mode 100644 index d3030bf..0000000 Binary files a/assets/microsoft_teams.png and /dev/null differ diff --git a/assets/minitest.png b/assets/minitest.png deleted file mode 100644 index 95d915d..0000000 Binary files a/assets/minitest.png and /dev/null differ diff --git a/assets/mocha.png b/assets/mocha.png deleted file mode 100644 index d69acfb..0000000 Binary files a/assets/mocha.png and /dev/null differ diff --git a/assets/mypy.gif b/assets/mypy.gif deleted file mode 100644 index edf3aff..0000000 Binary files a/assets/mypy.gif and /dev/null differ diff --git a/assets/neo_vim.svg b/assets/neo_vim.svg deleted file mode 100644 index 7ddd851..0000000 --- a/assets/neo_vim.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - diff --git a/assets/nightwatch.png b/assets/nightwatch.png deleted file mode 100644 index 72e7639..0000000 Binary files a/assets/nightwatch.png and /dev/null differ diff --git a/assets/nunit-logo.png b/assets/nunit-logo.png deleted file mode 100644 index 8b8858f..0000000 Binary files a/assets/nunit-logo.png and /dev/null differ diff --git a/assets/nunit.png b/assets/nunit.png deleted file mode 100644 index fc5c02b..0000000 Binary files a/assets/nunit.png and /dev/null differ diff --git a/assets/om-pfd-settings.png b/assets/om-pfd-settings.png deleted file mode 100644 index 828ae27..0000000 Binary files a/assets/om-pfd-settings.png and /dev/null differ diff --git a/assets/onboarding-add-org.png b/assets/onboarding-add-org.png deleted file mode 100644 index b5185a8..0000000 Binary files a/assets/onboarding-add-org.png and /dev/null differ diff --git a/assets/onboarding-create-org-dark.png b/assets/onboarding-create-org-dark.png deleted file mode 100644 index 2023d1c..0000000 Binary files a/assets/onboarding-create-org-dark.png and /dev/null differ diff --git a/assets/onboarding-create-org-light.png b/assets/onboarding-create-org-light.png deleted file mode 100644 index bf27aa0..0000000 Binary files a/assets/onboarding-create-org-light.png and /dev/null differ diff --git a/assets/onboarding-pick-product-dark.png b/assets/onboarding-pick-product-dark.png deleted file mode 100644 index 9b60c5d..0000000 Binary files a/assets/onboarding-pick-product-dark.png and /dev/null differ diff --git a/assets/onboarding-pick-product-light.png b/assets/onboarding-pick-product-light.png deleted file mode 100644 index dc839c3..0000000 Binary files a/assets/onboarding-pick-product-light.png and /dev/null differ diff --git a/assets/openapi.json b/assets/openapi.json deleted file mode 100644 index 0f8f731..0000000 --- a/assets/openapi.json +++ /dev/null @@ -1,331 +0,0 @@ -{ - "openapi": "3.1.0", - "servers": [ - { - "url": "https://api.trunk.io/v1" - } - ], - "info": { - "title": "Trunk APIs", - "version": "1.0.0", - "license": { - "name": "UNLICENSED" - } - }, - "paths": { - "/flaky-tests/list-quarantined-tests": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "summary": "Get a list of quarantined tests", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The host of the repository. For example, `github.com` or `gitlab.com`. If self-hosting, this will be the hostname of your instance.", - "examples": [ - "github.com", - "gitlab.com", - "github-enterprise.my-org-tld.com", - "gitlab-enterprise.my-org-tld.com" - ] - }, - "owner": { - "type": "string", - "description": "The owner of the repository. For example, `my-github-org` or `my-gitlab-org/my/sub/group`.", - "examples": ["my-github-org", "my-gitlab-org/my/sub/group"] - }, - "name": { - "type": "string", - "description": "The name of the repository.", - "examples": ["my-repo"] - } - }, - "required": ["host", "owner", "name"], - "description": "The repository to list quarantined tests for." - }, - "org_url_slug": { - "type": "string", - "description": "The slug of your organization. Find this at https://app.trunk.io//settings under \"Organization Name\" > \"Slug\"", - "examples": ["my-trunk-org-slug"] - }, - "page_query": { - "type": "object", - "properties": { - "page_size": { - "type": "integer", - "minimum": 1, - "maximum": 100, - "description": "The number of tests to return per page." - }, - "page_token": { - "type": "string", - "description": "The page token to use for pagination. This is returned from the previous call to this endpoint. For the first page, this should be empty.", - "examples": [""] - } - }, - "required": ["page_size"], - "description": "Pagination options for the list of quarantined tests." - } - }, - "required": ["repo", "org_url_slug", "page_query"] - } - } - } - }, - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "quarantined_tests": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "The name of the test case." - }, - "parent": { - "type": ["string", "null"], - "description": "The parent of the test case." - }, - "file": { - "type": ["string", "null"], - "description": "The file of the test case." - }, - "classname": { - "type": ["string", "null"], - "description": "The class name of the test case." - }, - "status": { - "type": "string", - "enum": ["HEALTHY", "FLAKY", "BROKEN"], - "description": "The status of the test case." - }, - "codeowners": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The latest codeowners of the test case." - }, - "quarantine_setting": { - "type": "string", - "enum": ["ALWAYS_QUARANTINE", "AUTO_QUARANTINE"], - "description": "The quarantine setting of the test case." - }, - "status_last_updated_at": { - "type": "string", - "format": "date-time", - "description": "The last time the status of the test case was updated." - }, - "test_case_id": { - "type": "string", - "description": "The ID of the test case. This value is unstable and should not be relied upon." - } - }, - "required": [ - "name", - "parent", - "file", - "classname", - "status", - "codeowners", - "quarantine_setting", - "status_last_updated_at", - "test_case_id" - ], - "description": "A quarantined test case." - }, - "description": "A page of quarantined test cases." - }, - "page": { - "type": "object", - "properties": { - "total_rows": { - "type": "integer", - "exclusiveMinimum": 0, - "description": "The total number of quarantined test cases in the paginated list." - }, - "total_pages": { - "type": "integer", - "exclusiveMinimum": 0, - "description": "The total number of pages in the paginated list of quarantined test cases." - }, - "next_page_token": { - "type": "string", - "description": "The next page token to use for pagination. See `pageToken` in the request for more information." - }, - "prev_page_token": { - "type": "string", - "description": "The previous page token to use for pagination. See `pageToken` in the request for more information." - }, - "last_page_token": { - "type": "string", - "description": "The last page token to use for pagination. See `pageToken` in the request for more information." - }, - "page_index": { - "type": "integer", - "exclusiveMinimum": 0, - "description": "The index of the current page in the paginated list of quarantined test cases." - } - }, - "required": [ - "total_rows", - "total_pages", - "next_page_token", - "prev_page_token", - "last_page_token", - "page_index" - ], - "description": "Pagination information for the list of quarantined test cases." - } - }, - "required": ["quarantined_tests", "page"] - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - }, - "/status": { - "get": { - "security": [], - "summary": "Get the status of Trunk services", - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "overallStatus": { - "oneOf": [ - { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "up" - }, - "color": { - "type": "string", - "enum": ["green"] - }, - "overallStatusDescription": { - "type": "string", - "const": "All systems operational" - } - }, - "required": ["type", "color", "overallStatusDescription"] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "impacted" - }, - "color": { - "type": "string", - "enum": ["yellow", "red"] - }, - "overallStatusDescription": { - "type": "string", - "const": "Impacted" - }, - "impactedStatuses": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "color": { - "type": "string", - "enum": ["yellow", "red"] - }, - "statusDescription": { - "type": "string" - } - }, - "required": ["name", "color", "statusDescription"] - } - } - }, - "required": [ - "type", - "color", - "overallStatusDescription", - "impactedStatuses" - ] - } - ] - }, - "statuses": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "color": { - "type": "string", - "enum": ["green", "yellow", "red"] - }, - "statusDescription": { - "type": "string" - } - }, - "required": ["name", "color", "statusDescription"] - } - } - }, - "required": ["overallStatus", "statuses"] - } - } - } - } - } - } - } - }, - "components": { - "securitySchemes": { - "ApiKeyAuth": { - "type": "apiKey", - "in": "header", - "name": "x-api-token" - } - } - } -} diff --git a/assets/optimistic-merge-toggle.png b/assets/optimistic-merge-toggle.png deleted file mode 100644 index b064570..0000000 Binary files a/assets/optimistic-merge-toggle.png and /dev/null differ diff --git a/assets/optimistic-merge-ui b/assets/optimistic-merge-ui deleted file mode 100644 index 55683f9..0000000 Binary files a/assets/optimistic-merge-ui and /dev/null differ diff --git a/assets/org-slug-and-token.png b/assets/org-slug-and-token.png deleted file mode 100644 index fd8b59e..0000000 Binary files a/assets/org-slug-and-token.png and /dev/null differ diff --git a/assets/org-slug-dark.png b/assets/org-slug-dark.png deleted file mode 100644 index 87bdfe4..0000000 Binary files a/assets/org-slug-dark.png and /dev/null differ diff --git a/assets/org-slug-light.png b/assets/org-slug-light.png deleted file mode 100644 index 8ce9ddf..0000000 Binary files a/assets/org-slug-light.png and /dev/null differ diff --git a/assets/org-team-manage-domain.png b/assets/org-team-manage-domain.png deleted file mode 100644 index 8736065..0000000 Binary files a/assets/org-team-manage-domain.png and /dev/null differ diff --git a/assets/org-team-manage-domain_(1).png b/assets/org-team-manage-domain_(1).png deleted file mode 100644 index cc82f3b..0000000 Binary files a/assets/org-team-manage-domain_(1).png and /dev/null differ diff --git a/assets/org-team-members-invite.png b/assets/org-team-members-invite.png deleted file mode 100644 index 90383e2..0000000 Binary files a/assets/org-team-members-invite.png and /dev/null differ diff --git a/assets/org-team-members.png b/assets/org-team-members.png deleted file mode 100644 index 5b62d53..0000000 Binary files a/assets/org-team-members.png and /dev/null differ diff --git a/assets/org-team-pending-invites.png b/assets/org-team-pending-invites.png deleted file mode 100644 index 825cb4e..0000000 Binary files a/assets/org-team-pending-invites.png and /dev/null differ diff --git a/assets/org-team.png b/assets/org-team.png deleted file mode 100644 index a0b3657..0000000 Binary files a/assets/org-team.png and /dev/null differ diff --git a/assets/org-token-dark.png b/assets/org-token-dark.png deleted file mode 100644 index 3fd6e58..0000000 Binary files a/assets/org-token-dark.png and /dev/null differ diff --git a/assets/org-token-light.png b/assets/org-token-light.png deleted file mode 100644 index 1e1e9f7..0000000 Binary files a/assets/org-token-light.png and /dev/null differ diff --git a/assets/organization-api-token b/assets/organization-api-token deleted file mode 100644 index b1af4f6..0000000 Binary files a/assets/organization-api-token and /dev/null differ diff --git a/assets/other.png b/assets/other.png deleted file mode 100644 index 3d5eaef..0000000 Binary files a/assets/other.png and /dev/null differ diff --git a/assets/override-dark.png b/assets/override-dark.png deleted file mode 100644 index d8ad255..0000000 Binary files a/assets/override-dark.png and /dev/null differ diff --git a/assets/override-light.png b/assets/override-light.png deleted file mode 100644 index 7684e82..0000000 Binary files a/assets/override-light.png and /dev/null differ diff --git a/assets/overview b/assets/overview deleted file mode 100644 index 5ddd354..0000000 --- a/assets/overview +++ /dev/null @@ -1,246 +0,0 @@ -Overview
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Getting Started

-

To use trunk locally, install via:

-
curl https://get.trunk.io -fsSL | bash
-
-

For other installation options (npm, brew, direct download, etc) and details on exactly what we install or how to uninstall, see the Install Trunk doc.

-

Also check out the other ways to use Trunk:

- -

What can it do?

-

The Trunk CLI can be used for:

-
    -
  • Trunk Merge: a merge queue to make merging code in github safer and easier
  • -
  • Trunk Check: a pluginable superlinter with a builtin language server and pre-existing issue detection
  • -
  • Trunk Actions: workflow automation for software engineers
  • -
-

Trunk Check and Trunk Actions can be used entirely locally without depending on hosted services or even having a Trunk account

-

Initialize Trunk in your repo

-

Whether you aim to use Trunk Check, Trunk Merge, Trunk Actions, or all of the above, the first step is to initialize Trunk in your git repo:

-
trunk init
-
-

Note: for an extra layer of security you can optionally run trunk init --lock instead of trunk init which adds sha256s of the trunk cli to the trunk config file. This is then used by the Trunk Launcher when it downloads the trunk binary.

-

init scans the files in your repo and generates a .trunk/trunk.yaml configuration file tailored to your repo (it may also generate linter-specific config files, such as .shellcheckrc). The scan will identify all the particular languages and technologies you use and automatically configure the correct set of linters / formatters to run.

-

If you only want to use Trunk Merge, you can safely ignore the linter setup, or even strip enabled linters from .trunk/trunk.yaml. You will additionally need to login to use Trunk Merge via trunk login.

-

Next Steps

-

Check out the docs for Trunk Merge, Trunk Check, and Trunk Actions.


- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\ No newline at end of file diff --git a/assets/pest.png b/assets/pest.png deleted file mode 100644 index c214220..0000000 Binary files a/assets/pest.png and /dev/null differ diff --git a/assets/phpunit.png b/assets/phpunit.png deleted file mode 100644 index fda0486..0000000 Binary files a/assets/phpunit.png and /dev/null differ diff --git a/assets/pika-1715033198689-2x.png b/assets/pika-1715033198689-2x.png deleted file mode 100644 index 2409fdf..0000000 Binary files a/assets/pika-1715033198689-2x.png and /dev/null differ diff --git a/assets/pika-1715033350907-2x.png b/assets/pika-1715033350907-2x.png deleted file mode 100644 index a865f6d..0000000 Binary files a/assets/pika-1715033350907-2x.png and /dev/null differ diff --git a/assets/playwright.png b/assets/playwright.png deleted file mode 100644 index d40b144..0000000 Binary files a/assets/playwright.png and /dev/null differ diff --git a/assets/pmd.gif b/assets/pmd.gif deleted file mode 100644 index 621dc8f..0000000 Binary files a/assets/pmd.gif and /dev/null differ diff --git a/assets/png-clipart-metabase-logo-landscape-tech-companies.png b/assets/png-clipart-metabase-logo-landscape-tech-companies.png deleted file mode 100644 index dbd4d61..0000000 Binary files a/assets/png-clipart-metabase-logo-landscape-tech-companies.png and /dev/null differ diff --git a/assets/pr-drill-down-list.png b/assets/pr-drill-down-list.png deleted file mode 100644 index eb3b446..0000000 Binary files a/assets/pr-drill-down-list.png and /dev/null differ diff --git a/assets/pr-restart-menu.png b/assets/pr-restart-menu.png deleted file mode 100644 index ff612d3..0000000 Binary files a/assets/pr-restart-menu.png and /dev/null differ diff --git a/assets/pr-test-summary.png b/assets/pr-test-summary.png deleted file mode 100644 index 8b6c302..0000000 Binary files a/assets/pr-test-summary.png and /dev/null differ diff --git a/assets/pr-test-summary_(1).png b/assets/pr-test-summary_(1).png deleted file mode 100644 index fd48142..0000000 Binary files a/assets/pr-test-summary_(1).png and /dev/null differ diff --git a/assets/pragma-once.gif b/assets/pragma-once.gif deleted file mode 100644 index 5cfdaaf..0000000 Binary files a/assets/pragma-once.gif and /dev/null differ diff --git a/assets/prettier.gif b/assets/prettier.gif deleted file mode 100644 index ac83ac8..0000000 Binary files a/assets/prettier.gif and /dev/null differ diff --git a/assets/prs-impacted-dark.png b/assets/prs-impacted-dark.png deleted file mode 100644 index 7e37835..0000000 Binary files a/assets/prs-impacted-dark.png and /dev/null differ diff --git a/assets/prs-impacted-dark_(1).png b/assets/prs-impacted-dark_(1).png deleted file mode 100644 index d12f9b4..0000000 Binary files a/assets/prs-impacted-dark_(1).png and /dev/null differ diff --git a/assets/prs-impacted-light.png b/assets/prs-impacted-light.png deleted file mode 100644 index 76392d0..0000000 Binary files a/assets/prs-impacted-light.png and /dev/null differ diff --git a/assets/prs-impacted-light_(1).png b/assets/prs-impacted-light_(1).png deleted file mode 100644 index 321bee8..0000000 Binary files a/assets/prs-impacted-light_(1).png and /dev/null differ diff --git a/assets/prs_impacted_(1).png b/assets/prs_impacted_(1).png deleted file mode 100644 index bd15095..0000000 Binary files a/assets/prs_impacted_(1).png and /dev/null differ diff --git a/assets/pylint.gif b/assets/pylint.gif deleted file mode 100644 index 097542a..0000000 Binary files a/assets/pylint.gif and /dev/null differ diff --git a/assets/pytest.png b/assets/pytest.png deleted file mode 100644 index 524d946..0000000 Binary files a/assets/pytest.png and /dev/null differ diff --git a/assets/quarantine-flaky-status.png b/assets/quarantine-flaky-status.png deleted file mode 100644 index bf5d0f1..0000000 Binary files a/assets/quarantine-flaky-status.png and /dev/null differ diff --git a/assets/quarantined_test.png b/assets/quarantined_test.png deleted file mode 100644 index f0f2bb9..0000000 Binary files a/assets/quarantined_test.png and /dev/null differ diff --git a/assets/quarantined_test1.png b/assets/quarantined_test1.png deleted file mode 100644 index 841274a..0000000 Binary files a/assets/quarantined_test1.png and /dev/null differ diff --git a/assets/qurantine-audit-logs.png b/assets/qurantine-audit-logs.png deleted file mode 100644 index 9854026..0000000 Binary files a/assets/qurantine-audit-logs.png and /dev/null differ diff --git a/assets/qurantine-enable-settings.png b/assets/qurantine-enable-settings.png deleted file mode 100644 index 0d33a77..0000000 Binary files a/assets/qurantine-enable-settings.png and /dev/null differ diff --git a/assets/qurantine-enable-settings_(1).png b/assets/qurantine-enable-settings_(1).png deleted file mode 100644 index dbc00e3..0000000 Binary files a/assets/qurantine-enable-settings_(1).png and /dev/null differ diff --git a/assets/qurantine-individual-tests-revert.png b/assets/qurantine-individual-tests-revert.png deleted file mode 100644 index 22c865e..0000000 Binary files a/assets/qurantine-individual-tests-revert.png and /dev/null differ diff --git a/assets/qurantine-individual-tests.png b/assets/qurantine-individual-tests.png deleted file mode 100644 index 006814d..0000000 Binary files a/assets/qurantine-individual-tests.png and /dev/null differ diff --git a/assets/robot.png b/assets/robot.png deleted file mode 100644 index 271325b..0000000 Binary files a/assets/robot.png and /dev/null differ diff --git a/assets/rspec.png b/assets/rspec.png deleted file mode 100644 index e1eba0b..0000000 Binary files a/assets/rspec.png and /dev/null differ diff --git a/assets/run-details-dark.png b/assets/run-details-dark.png deleted file mode 100644 index 3e71a5b..0000000 Binary files a/assets/run-details-dark.png and /dev/null differ diff --git a/assets/run-details-light.png b/assets/run-details-light.png deleted file mode 100644 index 1c59710..0000000 Binary files a/assets/run-details-light.png and /dev/null differ diff --git a/assets/runs-view-table.png b/assets/runs-view-table.png deleted file mode 100644 index ffebd04..0000000 Binary files a/assets/runs-view-table.png and /dev/null differ diff --git a/assets/scaffoldhub.png b/assets/scaffoldhub.png deleted file mode 100644 index 1f43f8e..0000000 Binary files a/assets/scaffoldhub.png and /dev/null differ diff --git a/assets/semaphore-logo.png b/assets/semaphore-logo.png deleted file mode 100644 index 1d24e8c..0000000 Binary files a/assets/semaphore-logo.png and /dev/null differ diff --git a/assets/semaphore.png b/assets/semaphore.png deleted file mode 100644 index f86249a..0000000 Binary files a/assets/semaphore.png and /dev/null differ diff --git a/assets/shellcheck.gif b/assets/shellcheck.gif deleted file mode 100644 index ffb4823..0000000 Binary files a/assets/shellcheck.gif and /dev/null differ diff --git a/assets/slack-dm-start-connection.png b/assets/slack-dm-start-connection.png deleted file mode 100644 index 3b38b67..0000000 Binary files a/assets/slack-dm-start-connection.png and /dev/null differ diff --git a/assets/slack-home-connect.png b/assets/slack-home-connect.png deleted file mode 100644 index 32916c5..0000000 Binary files a/assets/slack-home-connect.png and /dev/null differ diff --git a/assets/slack-multi-channel.png b/assets/slack-multi-channel.png deleted file mode 100644 index c888563..0000000 Binary files a/assets/slack-multi-channel.png and /dev/null differ diff --git a/assets/slack-multiple-channels.png b/assets/slack-multiple-channels.png deleted file mode 100644 index a0620b5..0000000 Binary files a/assets/slack-multiple-channels.png and /dev/null differ diff --git a/assets/slack-notification-topics.png b/assets/slack-notification-topics.png deleted file mode 100644 index 6d666ea..0000000 Binary files a/assets/slack-notification-topics.png and /dev/null differ diff --git a/assets/slack-workspace-connect.png b/assets/slack-workspace-connect.png deleted file mode 100644 index 8f94d32..0000000 Binary files a/assets/slack-workspace-connect.png and /dev/null differ diff --git a/assets/slack-workspace-connected.png b/assets/slack-workspace-connected.png deleted file mode 100644 index b8f2da6..0000000 Binary files a/assets/slack-workspace-connected.png and /dev/null differ diff --git a/assets/slack-workspace-oauth.png b/assets/slack-workspace-oauth.png deleted file mode 100644 index c1da180..0000000 Binary files a/assets/slack-workspace-oauth.png and /dev/null differ diff --git a/assets/slack.png b/assets/slack.png deleted file mode 100644 index 53ae325..0000000 Binary files a/assets/slack.png and /dev/null differ diff --git a/assets/status_history.png b/assets/status_history.png deleted file mode 100644 index 84bb4b2..0000000 Binary files a/assets/status_history.png and /dev/null differ diff --git a/assets/svix.svg b/assets/svix.svg deleted file mode 100644 index f7240a5..0000000 --- a/assets/svix.svg +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - \ No newline at end of file diff --git a/assets/swift-testing.png b/assets/swift-testing.png deleted file mode 100644 index 8b3e4dc..0000000 Binary files a/assets/swift-testing.png and /dev/null differ diff --git a/assets/teamcity.png b/assets/teamcity.png deleted file mode 100644 index b3d36f2..0000000 Binary files a/assets/teamcity.png and /dev/null differ diff --git a/assets/terminal-icon.svg b/assets/terminal-icon.svg deleted file mode 100644 index 3781403..0000000 --- a/assets/terminal-icon.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/terminal-icon_(1).svg b/assets/terminal-icon_(1).svg deleted file mode 100644 index 0ebb564..0000000 --- a/assets/terminal-icon_(1).svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - \ No newline at end of file diff --git a/assets/terminal-icon_(2).svg b/assets/terminal-icon_(2).svg deleted file mode 100644 index 049c74d..0000000 --- a/assets/terminal-icon_(2).svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - diff --git a/assets/terraform-drift-mergequeue.png b/assets/terraform-drift-mergequeue.png deleted file mode 100644 index 5b8e067..0000000 Binary files a/assets/terraform-drift-mergequeue.png and /dev/null differ diff --git a/assets/terraform-in-sync.png b/assets/terraform-in-sync.png deleted file mode 100644 index 8dc4628..0000000 Binary files a/assets/terraform-in-sync.png and /dev/null differ diff --git a/assets/test-analtyics-pr-comment-screenshot.png b/assets/test-analtyics-pr-comment-screenshot.png deleted file mode 100644 index c217f06..0000000 Binary files a/assets/test-analtyics-pr-comment-screenshot.png and /dev/null differ diff --git a/assets/test-details-label-picker.png b/assets/test-details-label-picker.png deleted file mode 100644 index 608e5e2..0000000 Binary files a/assets/test-details-label-picker.png and /dev/null differ diff --git a/assets/test-details-labels.png b/assets/test-details-labels.png deleted file mode 100644 index 57c6b51..0000000 Binary files a/assets/test-details-labels.png and /dev/null differ diff --git a/assets/test-history-dark.png b/assets/test-history-dark.png deleted file mode 100644 index 0c46aea..0000000 Binary files a/assets/test-history-dark.png and /dev/null differ diff --git a/assets/test-history-light.png b/assets/test-history-light.png deleted file mode 100644 index 2453ce0..0000000 Binary files a/assets/test-history-light.png and /dev/null differ diff --git a/assets/test-labels-settings.png b/assets/test-labels-settings.png deleted file mode 100644 index 6742edc..0000000 Binary files a/assets/test-labels-settings.png and /dev/null differ diff --git a/assets/test-run-detail-dark.png b/assets/test-run-detail-dark.png deleted file mode 100644 index 926ac9e..0000000 Binary files a/assets/test-run-detail-dark.png and /dev/null differ diff --git a/assets/test-run-detail-light.png b/assets/test-run-detail-light.png deleted file mode 100644 index a3060bd..0000000 Binary files a/assets/test-run-detail-light.png and /dev/null differ diff --git a/assets/testplan-box.png b/assets/testplan-box.png deleted file mode 100644 index 3f5ed6a..0000000 Binary files a/assets/testplan-box.png and /dev/null differ diff --git a/assets/testplan-box_(1).png b/assets/testplan-box_(1).png deleted file mode 100644 index 5fbd58d..0000000 Binary files a/assets/testplan-box_(1).png and /dev/null differ diff --git a/assets/testplan.png b/assets/testplan.png deleted file mode 100644 index c0bd555..0000000 Binary files a/assets/testplan.png and /dev/null differ diff --git a/assets/tests-list-filtered-by-label.png b/assets/tests-list-filtered-by-label.png deleted file mode 100644 index 658ff2c..0000000 Binary files a/assets/tests-list-filtered-by-label.png and /dev/null differ diff --git a/assets/testtrunkintegration.slack.com_oauth_client_id=1523871431059.3961451315218&scope=incoming-webhook%2Cchannels%3Ajoin%2Cchannels%3Amanage&user_scope=&redirect_uri=https%3A%2F%2Fapp.trunk.io%2Fslack%2F07e100e0-5053-42ed-8d13-cd953bba3b42%3Frep.png b/assets/testtrunkintegration.slack.com_oauth_client_id=1523871431059.3961451315218&scope=incoming-webhook%2Cchannels%3Ajoin%2Cchannels%3Amanage&user_scope=&redirect_uri=https%3A%2F%2Fapp.trunk.io%2Fslack%2F07e100e0-5053-42ed-8d13-cd953bba3b42%3Frep.png deleted file mode 100644 index 9ca2bca..0000000 Binary files a/assets/testtrunkintegration.slack.com_oauth_client_id=1523871431059.3961451315218&scope=incoming-webhook%2Cchannels%3Ajoin%2Cchannels%3Amanage&user_scope=&redirect_uri=https%3A%2F%2Fapp.trunk.io%2Fslack%2F07e100e0-5053-42ed-8d13-cd953bba3b42%3Frep.png and /dev/null differ diff --git a/assets/travis-ci-logo.png b/assets/travis-ci-logo.png deleted file mode 100644 index cddb59b..0000000 Binary files a/assets/travis-ci-logo.png and /dev/null differ diff --git a/assets/travis.png b/assets/travis.png deleted file mode 100644 index 4d1125e..0000000 Binary files a/assets/travis.png and /dev/null differ diff --git a/assets/trivy.gif b/assets/trivy.gif deleted file mode 100644 index 17e924e..0000000 Binary files a/assets/trivy.gif and /dev/null differ diff --git a/assets/trunk-sudo-ruleset-bypass-mode.png b/assets/trunk-sudo-ruleset-bypass-mode.png deleted file mode 100644 index aa02bef..0000000 Binary files a/assets/trunk-sudo-ruleset-bypass-mode.png and /dev/null differ diff --git a/assets/trunk-sudo-setup-checklist.png b/assets/trunk-sudo-setup-checklist.png deleted file mode 100644 index e9bac59..0000000 Binary files a/assets/trunk-sudo-setup-checklist.png and /dev/null differ diff --git a/assets/unique-failure-reason-dark.png b/assets/unique-failure-reason-dark.png deleted file mode 100644 index 9df677c..0000000 Binary files a/assets/unique-failure-reason-dark.png and /dev/null differ diff --git a/assets/unique-failure-reason-light.png b/assets/unique-failure-reason-light.png deleted file mode 100644 index f2f2993..0000000 Binary files a/assets/unique-failure-reason-light.png and /dev/null differ diff --git a/assets/unique_failure_reasons.png b/assets/unique_failure_reasons.png deleted file mode 100644 index cb8b4a0..0000000 Binary files a/assets/unique_failure_reasons.png and /dev/null differ diff --git a/assets/unique_failure_reasons_(1).png b/assets/unique_failure_reasons_(1).png deleted file mode 100644 index e1a7b76..0000000 Binary files a/assets/unique_failure_reasons_(1).png and /dev/null differ diff --git a/assets/unittest.png b/assets/unittest.png deleted file mode 100644 index c6037af..0000000 Binary files a/assets/unittest.png and /dev/null differ diff --git a/assets/variants-dark-border.png b/assets/variants-dark-border.png deleted file mode 100644 index 52d4e3d..0000000 Binary files a/assets/variants-dark-border.png and /dev/null differ diff --git a/assets/variants-light-border.png b/assets/variants-light-border.png deleted file mode 100644 index bb209aa..0000000 Binary files a/assets/variants-light-border.png and /dev/null differ diff --git a/assets/vitest.png b/assets/vitest.png deleted file mode 100644 index 3badfac..0000000 Binary files a/assets/vitest.png and /dev/null differ diff --git a/assets/waabi-logo-rev_copy.png b/assets/waabi-logo-rev_copy.png deleted file mode 100644 index f36a73e..0000000 Binary files a/assets/waabi-logo-rev_copy.png and /dev/null differ diff --git a/assets/waabi-logo.png b/assets/waabi-logo.png deleted file mode 100644 index 3dbea92..0000000 Binary files a/assets/waabi-logo.png and /dev/null differ diff --git a/assets/webhook-event-catalog.png b/assets/webhook-event-catalog.png deleted file mode 100644 index 3d4d69a..0000000 Binary files a/assets/webhook-event-catalog.png and /dev/null differ diff --git a/assets/webhook-event-catalog_(1).png b/assets/webhook-event-catalog_(1).png deleted file mode 100644 index 3d4d69a..0000000 Binary files a/assets/webhook-event-catalog_(1).png and /dev/null differ diff --git a/assets/webhooks-settings.png b/assets/webhooks-settings.png deleted file mode 100644 index 9557de3..0000000 Binary files a/assets/webhooks-settings.png and /dev/null differ diff --git a/assets/wordmark-dark.png b/assets/wordmark-dark.png deleted file mode 100644 index 1c976d6..0000000 Binary files a/assets/wordmark-dark.png and /dev/null differ diff --git a/assets/workflow-details-barchart-popup.png b/assets/workflow-details-barchart-popup.png deleted file mode 100644 index 6ef3278..0000000 Binary files a/assets/workflow-details-barchart-popup.png and /dev/null differ diff --git a/assets/workflow-jobs-overview.png b/assets/workflow-jobs-overview.png deleted file mode 100644 index e2b02c9..0000000 Binary files a/assets/workflow-jobs-overview.png and /dev/null differ diff --git a/assets/xctest-logo.png b/assets/xctest-logo.png deleted file mode 100644 index 3333c74..0000000 Binary files a/assets/xctest-logo.png and /dev/null differ diff --git a/assets/xctest.png b/assets/xctest.png deleted file mode 100644 index bb88525..0000000 Binary files a/assets/xctest.png and /dev/null differ diff --git a/ci-autopilot/faqs.mdx b/ci-autopilot/faqs.mdx deleted file mode 100644 index bfbc92d..0000000 --- a/ci-autopilot/faqs.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: "FAQs" -hidden: true ---- diff --git a/ci-autopilot/overview/get-started/connect-to-github.mdx b/ci-autopilot/overview/get-started/connect-to-github.mdx deleted file mode 100644 index e115a8c..0000000 --- a/ci-autopilot/overview/get-started/connect-to-github.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: "Connect to GitHub" -description: "Get started with Trunk CI Autopilot to get root cause analysis with fixes for CI/test failures." ---- diff --git a/ci-autopilot/overview/get-started/index.mdx b/ci-autopilot/overview/get-started/index.mdx deleted file mode 100644 index 9c1c62d..0000000 --- a/ci-autopilot/overview/get-started/index.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: "Get Started" -description: "Get started with Trunk's CI Autopilot" ---- diff --git a/ci-autopilot/overview/get-started/upload-test-results.mdx b/ci-autopilot/overview/get-started/upload-test-results.mdx deleted file mode 100644 index 9f44947..0000000 --- a/ci-autopilot/overview/get-started/upload-test-results.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: "Upload test results" -description: "Enhance CI Autopilot by uploading test results" ---- diff --git a/ci-autopilot/overview/index.mdx b/ci-autopilot/overview/index.mdx deleted file mode 100644 index ab03fab..0000000 --- a/ci-autopilot/overview/index.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: "Overview" -description: "Trunk's CI Autopilot offers AI root cause analysis and fixes for test and CI failures" ---- diff --git a/ci-autopilot/overview/use-ci-autopilot/apply-fixes-with-mcp.mdx b/ci-autopilot/overview/use-ci-autopilot/apply-fixes-with-mcp.mdx deleted file mode 100644 index 8400515..0000000 --- a/ci-autopilot/overview/use-ci-autopilot/apply-fixes-with-mcp.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: "Apply fixes with MCP" -description: "Learn about how to apply fix recommendations with MCP" ---- diff --git a/ci-autopilot/overview/use-ci-autopilot/index.mdx b/ci-autopilot/overview/use-ci-autopilot/index.mdx deleted file mode 100644 index 74a3a3d..0000000 --- a/ci-autopilot/overview/use-ci-autopilot/index.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: "Use CI Autopilot" -description: "Use Trunk's CI Autopilot in pull requests or with MCP" ---- diff --git a/ci-autopilot/overview/use-ci-autopilot/request-fixes-on-prs.mdx b/ci-autopilot/overview/use-ci-autopilot/request-fixes-on-prs.mdx deleted file mode 100644 index be2ebba..0000000 --- a/ci-autopilot/overview/use-ci-autopilot/request-fixes-on-prs.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: "Request fixes on PRs" -description: "Learn about how to request fixes on failed pull requests" ---- diff --git a/ci-autopilot/overview/use-ci-autopilot/understand-root-cause-analysis.mdx b/ci-autopilot/overview/use-ci-autopilot/understand-root-cause-analysis.mdx deleted file mode 100644 index c1b1787..0000000 --- a/ci-autopilot/overview/use-ci-autopilot/understand-root-cause-analysis.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: "Understand root cause analysis" -description: "Learn about Trunk's CI Autopilot root cause analysis and fix suggestions for failed pull requests" ---- diff --git a/ci-autopilot/overview/use-dashboard/change-settings.mdx b/ci-autopilot/overview/use-dashboard/change-settings.mdx deleted file mode 100644 index 223980e..0000000 --- a/ci-autopilot/overview/use-dashboard/change-settings.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: "Change settings" -description: "Dashboard: Settings" ---- diff --git a/ci-autopilot/overview/use-dashboard/index.mdx b/ci-autopilot/overview/use-dashboard/index.mdx deleted file mode 100644 index 3b0b770..0000000 --- a/ci-autopilot/overview/use-dashboard/index.mdx +++ /dev/null @@ -1,3 +0,0 @@ ---- -title: "Use Dashboard" ---- diff --git a/ci-autopilot/overview/use-dashboard/review-activity.mdx b/ci-autopilot/overview/use-dashboard/review-activity.mdx deleted file mode 100644 index 4b6f5e6..0000000 --- a/ci-autopilot/overview/use-dashboard/review-activity.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: "Review activity" -description: "Dashboard: Agent Activity Feed" ---- diff --git a/ci-autopilot/overview/use-dashboard/test-uploads.mdx b/ci-autopilot/overview/use-dashboard/test-uploads.mdx deleted file mode 100644 index 1984db9..0000000 --- a/ci-autopilot/overview/use-dashboard/test-uploads.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: "Test Uploads" -hidden: true ---- diff --git a/code-quality/ci-setup/general.mdx b/code-quality/ci-setup/general.mdx deleted file mode 100644 index f7d288c..0000000 --- a/code-quality/ci-setup/general.mdx +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: "Manual Setup" -description: "How to integrate Trunk Code Quality into CI for GitLab and other non-GitHub providers, or for GitHub without using the Trunk GitHub App" -layout: ---- - -If you use GitHub, we recommend you follow the [GitHub Integration](./get-started) guide. - - - - -If you're using GitHub but wish to setup up your own GitHub Actions Workflows, you can use the provided [Trunk GitHub Action](https://github.com/marketplace/actions/trunk-check). - -```yaml -name: Linter -on: - push: - branches: main - pull_request: - branches: main -jobs: - test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - # ... other setup steps - - name: Trunk Check - uses: trunk-io/trunk-action@v1 - with: - post-annotations: true - # ... other CI steps -``` - - -GitLab performs a shallow clone by default which limits trunk's ability to detect the upstream commit to compute changes from. This is easily solved by simply fetching your main branch before running `trunk`: - -```bash -git fetch origin main -trunk check --ci -``` - - -If your default branch is named something else (e.g. `master`), you should `fetch` that branch inst - - - -`trunk check --ci` will work on any CI provider. - -You may also want to specify `--upstream` if, for example, your PRs are not merged into your default branch, but into a `develop` branch. - - - -## Caching and Persistence - -* Trunk caches the version of `trunk` itself, linters, formatters, and lint results, in `~/.cache/trunk` -* If your build machines are persistent, make sure this directory is not wiped out between CI jobs for best performance. If Trunk has to re-download every linter for every job because this directory is wiped out, it will be very slow. -* If your build machines are ephemeral, there are a few options for caching: - * CI systems have support for caching between CI jobs on ephemeral runners: - * [GitHub Actions](https://github.com/actions/cache) - * [CircleCI](https://circleci.com/docs/caching/) - * [Travis CI](https://docs.travis-ci.com/user/caching/) - * You can include a seeded trunk cache in a regularly updated image used for CI by running `trunk check download`, which will download all requirements to `~/.cache/trunk` - -## Running `trunk check` on Hourly/Nightly Builds - -If you'd like to setup `trunk check` to run on a hourly/nightly CI run or release branch we recommend running with the following command: - -```bash -trunk check --all --ci-progress --monitor=false -``` - -`--ci-progress` will print out the tool's progress every 30 seconds, whereas `--no-progress` will suppress any progress reporting. - -You can also explicitly set the upstream branch if needed via `--upstream`, but we do detect your main branch by default. - -### Uploading Results From Hourly/Nightly Builds - -Trunk Code Quality has the ability to post its results to [app.trunk.io](https://app.trunk.io/login?intent=code%20quality). This will enable you to view your repository's Code Quality history over time so you can track the trend of issues in your code, as well as browse the issues in your repository to help you understand which issues should be prioritized to fix. - -In order to keep the data up-to-date, you should upload Trunk Code Quality results regularly in an automated fashion. Depending on the size of your repository and the linters you have configured to run, running Trunk Code Quality on your whole repository may take a while. Because this run may take a while, we recommend uploading Trunk Code Quality results once daily. However, the system supports uploading results for every commit, so the granularity of upload is up to you. - -### Running `trunk check --upload` - - -Before running `trunk check --upload` you must have [connected your GitHub repository to your Trunk account](./get-started). - - -#### CI Setup for nightly uploads - -You can use the [Trunk GitHub Action](https://github.com/marketplace/actions/trunk-check) to upload results nightly for your main branch. You can provide it with a `trunk-token` by navigating to Settings → Repositories → \{your repository} and clicking "View API Token". - -Example nightly workflow to upload results: [`nightly.yaml`](https://github.com/trunk-io/trunk-action/blob/main/.github/workflows/nightly.yaml) - -#### Running `trunk check --upload` locally - -1. `trunk check --upload` is different than a normal `trunk check` invocation because we explicitly want the Trunk CLI to find all of the issues in the repository. Because of this, we recommend adding the `--all` flag to your `trunk check --upload` invocation. Keep in mind, this won't override the ignore settings in your `trunk.yaml` file. Any linter or file-level ignores you have configured will be honored by `trunk check --upload`. -2. `trunk check --upload` accepts the same flags and filters as `trunk check` that you run locally and for CI, and it also has the same runtime dependencies. -3. You should run your `trunk check --upload` command locally without the `--upload` flag to verify that it is working as expected. If you have a large repository or many checks enabled, `--all` may take a long time. In this case, remember to use `--sample`. -4. Required command line parameters - 1. `--token`: The Trunk API token for this repository. You can find this by navigating to Settings → Repositories → \{your repository} and clicking "View API Token". - 2. `--series`: This is the name of the time-series this upload run is a part of. We recommend using the name of the branch you are running `trunk check` on. For example, we run `trunk check --upload` regularly on our `main` branch, so we use `--series main`. You may instead prefer to track specific releases or tags, or create an experimental series. The series name does not need to match any git object, it is available as a way to organize your upload data. If you're unsure of what to use for `--series`, just use the name of your main branch (typically `main` or `master`) - -```bash -trunk check --all --upload --series main --token REDACTED -``` - -#### Troubleshooting - -Normally we infer repo information from the `origin` remote, however if you don't have an `origin` or for another git configuration reason it can't be inferred, it can be explicitly defined in `trunk.yaml`: - -1. Add a `repo` section to your Trunk config. This allows the Trunk CLI to connect with the appropriate repository in the Trunk system. - 1. `host`: Where your repository is hosted. Currently only GitHub is supported, so this value should be `github.com`, - 2. `owner`: The GitHub Owner of the repository, typically the first path section of your repository URL. For example, if we were connecting with [https://github.com/google/googletest](https://github.com/google/googletest), the `owner` would be `google`. - 3. `name`: The name of the repository. Continuing with our example above, the `name` would be `googletest`. - -This is what the `repo` section of your config would look like if your repository was hosted at [https://github.com/google/googletest](https://github.com/google/googletest) - -```yaml -repo: - repo: - host: github.com - owner: google - name: googletest -``` - -Note the repo/repo nested structure. diff --git a/code-quality/ci-setup/get-started.mdx b/code-quality/ci-setup/get-started.mdx deleted file mode 100644 index 63ac228..0000000 --- a/code-quality/ci-setup/get-started.mdx +++ /dev/null @@ -1,140 +0,0 @@ ---- -title: "GitHub Integration" -description: "Automate your code quality enforcement with just a few clicks." ---- -Trunk Code Quality has the ability to post its results to the [Trunk Code Quality web app](https://app.trunk.io/login?intent=code%20quality). This will enable you to view your repository's Code Quality history over time so you can track the trend of issues in your code, as well as browse the issues in your repository to help you understand which issues should be prioritized to fix. - -## Connect your Trunk organization to GitHub - -Sign up at [app.trunk.io](https://app.trunk.io/signup?intent=code%20quality), create a Trunk organization, and connect it to your repositories. You will need to grant the following [GitHub App permissions](/setup-and-administration/github-app-permissions). - - - -## Set Up Trunk Code Quality - -Once your Trunk organization is connected to GitHub, create a .trunk repo in your account or organization and grant Trunk permissions to access the repo. The .trunk repo will hold the workflows to scan your codebase and pull requests. [Learn more about the .trunk repo](/code-quality/overview/setup-and-installation/github-integration#what-is-a-.trunk-repository). - - - -## Configure Slack Notifications (optional) - -If you would like to receive notifications for new issues Trunk finds in your repo, you can configure Trunk to be connected to Slack. - - - -## Use it! - -### Ensure that PRs are free of issues - -Check out [this example](https://github.com/trunk-io/plugins/pull/424/checks?check\_run\_id=15730277425) in our `plugins` repository! - -
- - -![](/assets/image_(35).png) - - -
- -## Scanning your repository - -Trunk Code Quality can scan your repository for Code Quality issues on a daily cadence, upload them to Trunk for you to review at your convenience, and notify you via Slack whenever new issues are discovered in your repository. - -This allows you to build confidence in the code health of your repositories: - -* You will be alerted quickly in a [Heartbleed-type](https://heartbleed.com/) event, giving you assurances about whether or not a newly discovered vulnerability affects any of your repositories, and -* You can monitor how many Code Quality issues exist in each of your repositories and make data-driven decisions about prioritizing efforts to reduce tech debt - -If you don't want Trunk Code Quality to scan your repository on a daily cadence or notify you, you can turn it off in [your repository's settings](https://app.trunk.io/signup?intent=code%20quality). - - -![](/assets/Screenshot_2023-08-23_173119.png) - - -### Get Slack notifications about new issues in your repository - -Not only do our daily scans allow you to browse and triage the issues in your repository, but they can also notify you when new security issues are discovered in packages you already depend on. - -
- - -![](/assets/Screenshot_2023-08-23_173252.png) - - -
- -## Checking pull requests - -Trunk Code Quality can automatically detect new Code Quality issues on your pull requests and flag them so that you can prevent pull requests from introducing any new issues in your repository. - -When running on a pull request, Trunk Code Quality will only flag _new_ issues, not existing ones, so that your engineers don't have to fix pre-existing linter issues in every file they touch - this is the same [hold-the-line technology](/merge-queue/reference/how-does-it-work#hold-the-line) that our VSCode extension and CLI use. - - - -To confirm that you've fixed issues identified by Trunk Code Quality before pushing your pull request, just run `trunk check`. - -If Trunk continues to identify new Code Quality issues on your PR, first try merging the latest changes from your base branch. When Trunk runs on a PR, it runs on a commit that merges your PR into its base branch, just like GitHub workflows. - -If this continues to fail, then run `git checkout refs/pull//merge && trunk check`. This is a reference to the merge commit GitHub creates. - - - -You can include `/trunk skip-check` in the body of a PR description (i.e. the first comment on a given PR) to mark Trunk Code Quality as "skipped". Trunk Code Quality will still run on your PR and report issues, but this will allow the PR to pass a GitHub required status check on `Trunk Check`. - -This can be helpful if Code Quality is flagging known issues in a given PR that you don't want to [ignore](/code-quality/overview/linters/ignoring-issues-and-files), which can come in handy if you're doing a large refactor. - - - -If you don't want Trunk Code Quality to run on pull requests, turn it off in [your repository's settings](https://app.trunk.io/login?intent=code%20quality). - -### Uploading Results - -The upload feature of Trunk Code Quality will upload all of the issues found by Trunk to the Trunk services. In order to get an accurate picture of the state of your repository, you'll want to upload all of the Trunk Code Quality issues for your whole repository. - -Generally this should be done within your Continuous Integration system (CI) automatically whenever **pull requests are filed or pushed to a specific branch** in your repo. Trunk Code Quality can also **run periodically** to check for new vulnerabilities in your dependencies. - -### How Does It Work? - -Under the hood, the GitHub integration does the following to your organization to enable Trunk Code Quality in GitHub Actions Workflows: - -* An installation of the Trunk.io GitHub app in your GitHub organization -* A `.trunk` repository in your GitHub organization. - -### What is a `.trunk` repository? - -The `.trunk` repository contains the workflows run to scan your codebase and pull requests. We recommend creating a `.trunk` repository in your GitHub organization using [this template repository](https://github.com/trunk-io/.trunk-template). - -Your `.trunk` repository must be added to your Trunk GitHub app installation. You can verify this by navigating to: `https://github.com/organizations//settings/installations`, clicking "configure" next to Trunk-io, and verifying that the repository access is either "All repositories" or that your `.trunk` repository is selected. - -To find Code Quality issues in your repositories and pull requests, we dispatch GitHub Actions workflows in your `.trunk` repository, which check out your repositories and pull requests and then run `trunk check` in them. This strategy allows you to: - -* start using Trunk Code Quality in all your repositories without any configuration, and -* be in full control over the environment where we analyze your code, since we're running on your GitHub Actions runners. - - -🚧 `.trunk` should have private visibility - -Since we use workflow runs in `.trunk` to analyze any repository in your organization and record Code Quality findings, you should think carefully about who has permissions to view workflow runs in your `.trunk` repository. For most organizations, simply making your `.trunk` repository private will be sufficient. - - -## (optional) Custom setup logic - -If you need to do some setup before `trunk check` runs in `your-org/your-repo`, you can [define a GitHub composite action](https://docs.github.com/en/actions/creating-actions/creating-a-composite-action) in `.trunk/setup-ci/action.yaml` in `your-repo`. This can be important if, for example, a linter needs some generated code to be present before it can run: - -```yaml -name: Trunk Code Quality setup -description: Set up dependencies for Trunk Code Quality - -runs: - using: composite - steps: - - name: Build required trunk check inputs - shell: bash - run: bazel build ... --build_tag_filters=pre-lint - - - name: Install eslint dependencies - shell: bash - run: npm install -``` - -Read more in the documentation for [our GitHub Action](https://github.com/trunk-io/trunk-action#custom-setup). diff --git a/code-quality/overview/cli/getting-started/actions/git-hooks.mdx b/code-quality/overview/cli/getting-started/actions/git-hooks.mdx deleted file mode 100644 index af37701..0000000 --- a/code-quality/overview/cli/getting-started/actions/git-hooks.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: "Git Hooks" -description: "Trunk supports triggering actions on all githooks" ---- -### Features - -* Seamlessly bring `git-hooks` under version control. `git-hooks` can be a major headache for organizations - they require manual installation and are not easily versioned along with the rest of your code. -* Take advantage of Trunk's powerful sandboxing and environment management to write and execute hooks using the programming language and runtime of your choice, as opposed to dealing with complicated bash scripts. - -### Manual installation - -```bash -trunk git-hooks sync -``` - -### Automatic Installation - -Trunk will automatically install and begin managing your `githooks` if you have any actions enabled in `trunk.yaml` which trigger from git events. - -### Triggering an action from a githook - -As an example let's examine how we implement the `git-lfs` action in the [plugins repo](https://github.com/trunk-io/plugins). - -#### Definition - -```yaml -- id: git-lfs - display_name: Git LFS - description: Git LFS hooks - run: git lfs "${hook}" "${@}" - triggers: - - git_hooks: [post-checkout, post-commit, post-merge, pre-push] -``` - -#### Template resolution - -As documented by [git](https://git-scm.com/docs/githooks), each githook generates a variable number of parameters that can be referenced in the `run` entry for the action. - -The following special variables are made available for template resolution when reacting to a git event: - -| Variable | Description | -| ----------------------------- | --------------------------------------------------------------- | -| `${hook}` | Hook that triggered this action (e.g. `pre-commit`, `pre-push`) | -| `${1}`,`${2}`, `${3}`, etc... | Positional parameters passed by `git` to the hook | -| `${@}` | All parameters passed to the hook | - -#### Interactivity - -```yaml -interactive: true -``` - -Setting `interactive` to true will allow your githook action to be run from an interactive terminal. This enables you to write more complicated hooks to react to user input. - -#### Testing a `githook` action - -The following command will simulate a githook event and execute all of the enabled actions for the provided hook in the order you defined them. - -```bash -trunk git-hooks callback -- -``` - -Alternatively, once an action is enabled you can call `git` and debug with the actual `git` provided data. This is sometimes easier since some git parameters point to txt files and fabricating those formats through manual testing can be tricky. - -#### Debugging a `githook` action - -You can observe the actions that are triggered by a `git` event by calling: - -```bash -trunk actions history -``` - -Which will print out the last 10 executions including timestamps of the specified action \\ - - -![](/assets/image_(18).png) - - -### Uninstalling - -Remove all actions that are triggered by githooks from `trunk.yaml` and run - -```bash -git config --unset core.hooksPath -``` diff --git a/code-quality/overview/cli/getting-started/actions/index.mdx b/code-quality/overview/cli/getting-started/actions/index.mdx deleted file mode 100644 index ca921c7..0000000 --- a/code-quality/overview/cli/getting-started/actions/index.mdx +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: "Actions" ---- -The most common Trunk Actions are provided out of the box with trunk, and are triggered to invisibly autoformat (`trunk fmt`) your commits every time you `git commit`, and run `trunk check` when you `git push`. - -### Triggers - -There are several different types of Trunk Actions, based on when they are triggered: - -| Trigger | Description | -|---|---| -| time-based | run on a schedule (once per hour, once per day, once per week) | -| file modification | run whenever a file or directory in your repo changes. | -| [githooks](./git-hooks) | run whenever a listed githook event fires (e.g. pre-commit, on-push) | -| manual | `trunk run ` | - -### **Command line** - -| trunk actions \ | Description | -|---|---| -| `list` | list all available actions in the repository | -| `history ` | print the history for execution of the provided action | -| `enable ` | enable the provided action | -| `disable ` | disable the provided action | -| `run ` | manually trigger the provided action
alias: `trunk run ` | - -### Discovering actions - -The trunk [plugins](https://github.com/trunk-io/plugins) repo ships with a collection of actions that can help supercharge your repository and provide examples of how to write your own actions. To see a list of actions that you can enable in your repo run: - -```bash -trunk actions list -``` - - -![](/assets/image_(27).png) - - -### Enable/Disable actions - -Trunk only runs actions listed in the `enabled` section of your `trunk.yaml`. Some built-in actions are enabled by default and can be disabled explicitly by adding them to the disabled list. You can always run `trunk actions list` to check the enabled status of an action. - -```yaml -actions: - enabled: - - trunk-announce - - git-lfs - - trunk-check-pre-push - - trunk-fmt-pre-commit - - trunk-cache-prune - - trunk-upgrade-available -``` diff --git a/code-quality/overview/cli/getting-started/announce.mdx b/code-quality/overview/cli/getting-started/announce.mdx deleted file mode 100644 index 351b648..0000000 --- a/code-quality/overview/cli/getting-started/announce.mdx +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: "Announce" ---- -### Trunk Announce - -Does your Git commit carry some important information to share with the rest of your organization? Now you can easily share it with the rest of the org by including `/trunk announce` at the beginning of one of the lines of your commit message. - - -If your org squashes commit messages, you should put it in your PR description - - -Any additional text on that line will form an optional title, and the remaining text of the commit message will form the commit body (both are optional, but either a title or body is required). These will then be displayed to other users when they pull or rebase. - -### Enable Trunk Announce - -Trunk Announce is a githook-triggered Trunk Action. You can enable this Trunk Action by running this command: - -``` -trunk actions enable trunk-announce -``` - -### Viewing Announcements - -When you pull new changes, new announcements are automatically shown. - -If you would like to see changes since some commit, use `trunk show-announcements since `. - -For example: - -``` - trunk show-announcements since HEAD~1 -``` diff --git a/code-quality/overview/cli/getting-started/caching.mdx b/code-quality/overview/cli/getting-started/caching.mdx deleted file mode 100644 index 42fa831..0000000 --- a/code-quality/overview/cli/getting-started/caching.mdx +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: "Caching" ---- -Trunk hermetically manages all the tools that it runs. To do this, it will download and install them into its cache folder only when needed. On Linux and macOS you may find the cache folder at `$HOME/.cache/trunk`. - -### Viewing your repo's cache - -If you need to debug your repo's cache, you can find its location by running the cache command. - -``` -trunk cache -``` - -### Cleaning cache - -Trunk will automatically clean up downloads that have not been used in a while, such as old versions of tools and linters. - -If you want to manually prune files in your cache directory that are no longer needed, you can run this command: - -``` -trunk cache prune -``` - -If you need to clean your entire cache manually, you can use the command: - -```sh -trunk cache clean --all -``` - -Remember to rerun the install command to reinstall the necessary tools and linters. - -``` -trunk install -``` diff --git a/code-quality/overview/cli/getting-started/code-quality.mdx b/code-quality/overview/cli/getting-started/code-quality.mdx deleted file mode 100644 index 5a50b5d..0000000 --- a/code-quality/overview/cli/getting-started/code-quality.mdx +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: "Code Quality" -description: "CLI Metalinter and VSCode extension for over 100 code checking tools." ---- - -Available as a CLI tool and VSCode extension, Code Quality is a separate from the Trunk Platform for CI Stability, which includes [Merge Queue](../../../broken-reference/) and [Flaky Tests](../../../broken-reference/). Code Quality runs entirely locally and does not require access to the Trunk web app or platform services. - - -Trunk Code Quality is a **metalinter** that lets you lint every language and every file in your project with a single tool using 100+ supported idiomatic code-checking tools, such as ESLint, Prettier, Ruff, and more for every language and project. - -Trunk Code Quality is trusted by popular open-source projects like [**ESLint**](https://eslint.org/) to improve their developer experience. [Learn more about how ESLint leverages Code Quality in their repos](https://trunk.io/blog/improving-linting-experience-in-eslint-s-open-source-repo-with-trunk-code-quality). - -### What is Code Quality? - - - - A tour of Code Quality, what it does, its key features, and its components. - - - How Code Quality works under the hood to level up your linting experience. - - - What makes Trunk Code Quality different from other metalinters. - - - Browse the 100+ supported static analysis tools to lint, format, and secure your projects. - - - -### How do I get started? - - - - - diff --git a/code-quality/overview/cli/getting-started/commands-reference/actions.mdx b/code-quality/overview/cli/getting-started/commands-reference/actions.mdx deleted file mode 100644 index be1cb78..0000000 --- a/code-quality/overview/cli/getting-started/commands-reference/actions.mdx +++ /dev/null @@ -1,202 +0,0 @@ ---- -title: "Actions" ---- -### Trunk Actions - -`trunk actions`: Workflow automation for your repo. - -#### **Usage** **example** - -``` -trunk actions [options] [subcommand] -``` - -#### Options - -* `--version`: The version -* `--monitor`: Enable the trunk daemon to monitor file changes in your repo -* `--ci`: Run in continuous integration mode -* `--no-progress`: Don't show progress updates -* `--ci-progress`: Rate limit progress updates to every 30s (implied by `--ci`) -* `--action_timeout`: Timeout for downloads, lint runs, etc -* `-v`, `--verbose`: Output details about what's happening under the hood -* `--color`: Enable/disable color output - -### Trunk Actions run - -`trunk actions run`: Run a specified trunk action. **Usage** **bash** - -``` -trunk actions run [options] -``` - -#### **Options** - -* `--nolog`: Don't create a log file for the action run -* `--version`: The version -* `--monitor`: Enable the trunk daemon to monitor file changes in your repo -* `--ci`: Run in continuous integration mode -* `--no-progress`: Don't show progress updates -* `--ci-progress`: Rate limit progress updates to every 30s (implied by `--ci`) -* `--action_timeout`: Timeout for downloads, lint runs, etc. -* `-v`, `--verbose`: Output details about what's happening under the hood -* `--color`: Enable/disable color output -* `--name `: Specify the name of the Trunk action to be executed -* `--branch `: Run the action on a specific branch -* `--retry `: Number of times to retry the action on failure - -### Trunk Actions history - -`trunk actions history`: View the history of Trunk actions. - -#### **Usage** example - -``` -trunk actions history [options] -``` - -#### **Options** - -* `--count`: Number of logs to show -* `--version`: The version -* `--monitor`: Enable the trunk daemon to monitor file changes in your repo -* `--ci`: Run in continuous integration mode -* `--no-progress`: Don't show progress updates -* `--ci-progress`: Rate limit progress updates to every 30s (implied by `--ci`) -* `--action_timeout`: Timeout for downloads, lint runs, etc. -* `-v`, `--verbose`: Output details about what's happening under the hood -* `--color`: Enable/disable color output - -### Trunk Actions list - -`trunk actions list`: List all Trunk actions. - -#### **Usage** example - -``` -trunk actions list [options] -``` - -#### **Options** - -* `--version`: The version -* `--monitor`: Enable the trunk daemon to monitor file changes in your repo -* `--ci`: Run in continuous integration mode -* `--no-progress`: Don't show progress updates -* `--ci-progress`: Rate limit progress updates to every 30s (implied by `--ci`) -* `--action_timeout`: Timeout for downloads, lint runs, etc. -* `-v`, `--verbose`: Output details about what's happening under the hood -* `--color`: Enable/disable color output - -### Trunk Actions enable - -`trunk actions enable`: Enable a specified Trunk action. - -#### **Usage** example - -``` -trunk actions enable [options] -``` - -#### **Options** - -* `--version`: The version -* `--monitor`: Enable the trunk daemon to monitor file changes in your repo -* `--ci`: Run in continuous integration mode -* `--no-progress`: Don't show progress updates -* `--ci-progress`: Rate limit progress updates to every 30s (implied by `--ci`) -* `--action_timeout`: Timeout for downloads, lint runs, etc. -* `-v`, `--verbose`: Output details about what's happening under the hood -* `--color`: Enable/disable color output - -### Trunk Actions disable - -`trunk actions disable`: Disable a specified Trunk action. - -#### **Usage** example - -``` -trunk actions disable [options] -``` - -#### **Options** - -* `--version`: The version -* `--monitor`: Enable the trunk daemon to monitor file changes in your repo -* `--ci`: Run in continuous integration mode -* `--no-progress`: Don't show progress updates -* `--ci-progress`: Rate limit progress updates to every 30s (implied by `--ci`) -* `--action_timeout`: Timeout for downloads, lint runs, etc. -* `-v`, `--verbose`: Output details about what's happening under the hood -* `--color`: Enable/disable color output - -### Trunk Shellhooks - -`trunk shellhooks`: Let Trunk manage your shell hooks similar to `direnvs` trunk shellhooks install \ - -#### **Usage** example - -``` -trunk shellhooks install [options] -``` - -### Trunk Git Hooks - -`trunk git-hooks sync`: Sync githooks with what's defined in `trunk.yaml` - -#### **Usage** example - -``` -trunk git-hook sync [options] -``` - -### Trunk show announcements since a commit - -**`trunk show-announcements since`**: Show announcements since a specified commit - -#### **Usage** example: - -```sh -trunk show-announcements since --commit abc123 -``` - -#### **Options**: - -* `--color`: Enable/disable color output -* `-v`, `--verbose`: Output details about what's happening under the hood -* `--action_timeout`: Timeout for downloads, lint runs, etc. -* `--ci-progress`: Rate limit progress updates to every 30s (implied by `--ci`) -* `--no-progress`: Don't show progress updates -* `--ci`: Run in continuous integration mode -* `--monitor`: Enable the trunk daemon to monitor file changes in your repo -* `--version`: The version - -### **Trunk show announcements post-merge** - -**`trunk show-announcements post-merge`**: Run on git pull/merge, usually run by a git-hook and not directly. - -**Usage Example**: - -```sh -trunk show-announcements post-merge --verbose -``` - -### **Trunk show announcements pre-rebase** - -**`trunk show-announcements pre-rebase`**: Run on git pre-rebase, usually run by a git-hook and not directly. - -#### **Usage** example: - -```sh -trunk show-announcements pre-rebase [options] [branch-refs...] -``` - -### **Trunk show announcements post-checkout** - -**`trunk show-announcements post-checkout`**: Run on git checkout/switch, usually run by a git-hook and not directly. - -#### **Usage** example:: - -```sh -trunk show-announcements post-checkout [options] [branch-refs...] -``` diff --git a/code-quality/overview/cli/getting-started/commands-reference/code-quality.mdx b/code-quality/overview/cli/getting-started/commands-reference/code-quality.mdx deleted file mode 100644 index ac50dd6..0000000 --- a/code-quality/overview/cli/getting-started/commands-reference/code-quality.mdx +++ /dev/null @@ -1,172 +0,0 @@ ---- -title: "Code Quality" ---- -### trunk check - -`trunk check`: Universal code checker. - -#### **Usage** **example** - -``` -trunk check [options] -``` - -#### Filtering options - -* `-a, --all`: Check all files instead of only changed files -* `--sample`: Run each linter on N files -* `--filter`: Comma-separated list of linters and/or issue codes to include or exclude -* `--exclude`: Shorthand for an inverse --filter -* `--scope`: Scope of checks to run \{all | security} -* `--ignore`: Glob pattern to exclude files from linting -* `--force`: Run on all files, even if ignored -* `--include-existing-autofixes`: Include existing issues that can be autofixed - -#### **CI** options - -* `--ci`: Run in non-interactive mode designed for CI environments -* `-j`, `--jobs`: Number of concurrent jobs - -#### Git Hooks options - -* `--index`: Run linter on git-indexed files -* `--index-file`: Run linter on git-indexed files based on specified index -* `--commit-ref`: Commit ref to lint (instead of current working tree) -* `--commit-ref-from-pre-push`: Commit ref to lint from the stdin of a pre-push git hook (instead of the current working tree) - -#### Output options - -* `--show-existing`: Show existing issues otherwise hidden by -* `--print-failures`: Print any failures that occur -* `--diff`: Diff printing mode \{none | compact | full} -* `-v, --verbose`: Show verbose output for debugging purposes -* `--debug`: Show debug output - -#### Behavior options - -* `-y, --fix`: Automatically apply all fixes without prompting -* `-n, --no-fix`: Don't automatically apply fixes -* `--cache`: Disable to skip cache for all check actions -* `--ignore-git-state`: Run linters even if a merge, rebase, or revert is in progress -* `--upstream`: Upstream branch used to compute changed files - -### Trunk Check Enable Linter - -`trunk check enable`: Enable linters for trunk check. - -#### **Usage** **example** - -``` -trunk check enable [options] -``` - -### Trunk Check Disable Linter - -`trunk check disable`: Disable linters for trunk check. - -#### **Usage** **example** - -``` -trunk check disable [options] -``` - -### Trunk Check List Linters - -`trunk check list`: List linters for trunk check. - -#### **Usage** **example** - -``` -trunk check list [options] -``` - -### Trunk Check Run Format - -`trunk fmt`: List linters for trunk check. - -#### **Usage** **example** - -``` -trunk fmt [options] -``` - -#### **Options** - -#### Filtering options - -* `-a, --all`: Check all files instead of only changed files -* `--filter`: Comma-separated list of linters and/or issue codes to include or exclude -* `--exclude`: Shorthand for an inverse --filter -* `--scope`: Scope of checks to run \{all | security} -* `--ignore`: Glob pattern to exclude files from linting -* `--force`: Run on all files, even if ignored -* `--show-existing`: Show existing issues otherwise hidden by [hold-the-line](/broken/pages/U4nTQBazaodt2vJadyRw#hold-the-line) -* `--ignore-git-state`: Run linters even if a merge, rebase, or revert is in progress - -#### Git Hooks options - -* `--index`: Run linter on git-indexed files -* `--index-file`: Run linter on git-indexed files based on specified index -* `--commit-ref`: Commit ref to lint (instead of current working tree) -* `--commit-ref-from-pre-push`: Commit ref to lint from the stdin of a pre-push git hook (instead of the current working tree) - -#### Output options - -* `--show-existing`: Show existing issues otherwise hidden by -* `--print-failures`: Print any failures that occur -* `--diff`: Diff printing mode \{none | compact | full} -* `-v, --verbose`: Show verbose output for debugging purposes -* `--debug`: Show debug output - -#### Behavior options - -* `-y, --fix`: Automatically apply all fixes without prompting -* `-n, --no-fix`: Don't automatically apply fixes -* `--cache`: Disable to skip cache for all check actions -* `--ignore-git-state`: Run linters even if a merge, rebase, or revert is in progress -* `--upstream`: Upstream branch used to compute changed files -* `-j`, `--jobs`: Number of concurrent jobs - -## Advanced Trunk Check features - -| Options & Flags | Explanation | -| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `--root` | Explicitly set the root of the repository to run against | -| `--upstream` | Specify the upstream branch used to calculate new vs existing issued. | -| `--trigger` | Supports running trunk check from inside a git hook. Options are manual (default), git-push, git-commit. Controls whether the check returns early and its interactivity. | -| `--output=format` | Output results in specified format: `text` (default) or `json` | -| `--output-file=FILE` | Write json results to specified file | - -#### --filter - -`--filter` argument allows you to restrict `trunk check` to a subset of the linters enabled in your repository. - -For example, to run `eslint` and `isort` on the entire repo: - -```bash -trunk check --all --filter=eslint,isort -``` - -Alternatively, to run every linter _except_ `clang-tidy` and `shellcheck`: - -```bash -trunk check --all --filter=-clang-tidy,-shellcheck -``` - -#### --sample - -`--sample=N` will attempt to run every enabled linter against the requested number of files. The goal of the `sample` flag is to test the setup of the linters in your repository as well as any specific configuration they might honor. - -The sample command will attempt to run each linter N times, but may run fewer if not enough applicable files exist in your set of files to lint. `--sample=N` can be combined with any other set of options for `trunk check`. - -For example, to run `prettier` against 10 different prettier supported files: - -```bash -trunk check --sample=10 --filter=prettier -``` - -Alternatively, to run every linter at most 5 times against its supported files: - -```bash -trunk check --sample=5 -``` diff --git a/code-quality/overview/cli/getting-started/commands-reference/index.mdx b/code-quality/overview/cli/getting-started/commands-reference/index.mdx deleted file mode 100644 index c9a65d9..0000000 --- a/code-quality/overview/cli/getting-started/commands-reference/index.mdx +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: "Commands reference" ---- -### trunk init - -`trunk init`: Set up trunk in this repo. - -#### **Usage** Example - -``` -trunk init -``` - -### trunk version - -`trunk version`: Output the version. - -#### **Usage** example - -``` -trunk version -``` - -### trunk upgrade - -`trunk upgrade`: Upgrade Trunk and its linters to the latest releases. - -#### **Usage** **example** - -``` -trunk upgrade [options] -``` - -#### **Options** - -* `-y, --yes-to-all`: Answer yes to all upgrade prompts -* `-n, --no-to-all`: Answer no to all upgrade prompts -* `--apply-to`: Apply upgrades to a specified file -* `--filter`: Filter the upgraded linters -* `--dry-run`: Detect available upgrades, but do not apply changes - -### trunk login - -`trunk login`: Login to trunk.io. - -#### **Usage** example - -``` -trunk login -``` - -### trunk logout - -`trunk logout`: Logout from trunk.io. - -#### **Usage** example - -``` -trunk logout -``` - -### trunk plugins add - -`trunk plugins add`: Add a plugin by URI. - -#### **Usage** example - -``` -trunk plugins [uri] [ref] [options] -``` - -### trunk tools - -`trunk tools`: Universal tool manager. - -#### **Usage** example - -``` -trunk tools [options] -``` - -### trunk daemon status - -Report the status of the daemon. - -#### **Usage** example - -``` -trunk daemon status -``` - -### trunk daemon start - -Start the trunk daemon in the background if it’s not already running. - -#### **Usage** example - -``` -trunk daemon start -``` - -### **trunk daemon shutdown** - -`trunk daemon shutdown`: Shutdown the trunk daemon if it is running. - -#### **Usage** example - -``` -trunk daemon shutdown -``` - -### **trunk daemon launch** - -`trunk daemon launch`: Start the trunk daemon in the foreground if it’s not already running. - -#### **Usage** example - -``` -trunk daemon launch -``` - -### trunk whoami - -`trunk whoami`: print who you're logged in as - -#### **Usage** example - -``` -trunk whoami -``` - -### trunk deinit - -`trunk deinit`: Deinitialize Trunk in your repo - -#### **Usage** example - -``` -trunk deinit [options] -``` - -#### **Options** - -* `-y`, `--yes`: Proceed unconditionally -* `-v`, `--verbose`: Output details about what's happening under the hood -* `--color`: Enable/disable color output - -### trunk config share - -`trunk config share`: Remove Trunk config files from your local git ignores. - -#### **Usage** example - -``` -trunk config share -``` - -### trunk config hide - -`trunk config hide`: Add Trunk config files to your local git ignores. - -#### **Usage** example - -``` -trunk config hide -``` - -### trunk config print - -`trunk config print`: Print the resolved trunk config. - -#### **Usage** example - -``` -trunk config print -``` - -### trunk cache clean - -`trunk cache clean`: Clean cached files used by Trunk. - -#### **Usage** Example - -``` -trunk cache clean -``` - -### trunk cache prune - -`trunk cache prune`: Prune unused cached files. - -#### **Usage** example - -``` -trunk cache clean -``` - -### trunk install - -`trunk install`: Download & install enabled runtimes/linters. - -#### **Usage** example - -``` -trunk install [options] -``` - -#### **Options** - -* `--version`: The version -* `--monitor`: Enable the trunk daemon to monitor file changes in your repo -* `--ci`: Run in continuous integration mode -* `--no-progress`: Don't show progress updates -* `--ci-progress`: Rate limit progress updates to every 30s (implied by `--ci`) -* `--action_timeout`: Timeout for downloads, lint runs, etc. -* `-v`, `--verbose`: Output details about what's happening under the hood -* `--color`: Enable/disable color output diff --git a/code-quality/overview/cli/getting-started/compatibility.mdx b/code-quality/overview/cli/getting-started/compatibility.mdx deleted file mode 100644 index 35de783..0000000 --- a/code-quality/overview/cli/getting-started/compatibility.mdx +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: "Compatibility" ---- -### Linux - -Trunk will run on most Linux flavors, including Ubuntu, Arch, and others. We do require glibc version 2.19 or later. Alpine Linux is not supported. - -### macOS - -Trunk will run on macOS version 10.15 or later. - -### Windows - -Trunk only supports Windows with the following versions and above: - -| Tool | Where to Modify | Minimum Required Version | -|---|---|---| -| CLI | `cli` `version` in `.trunk/trunk.yaml` | `1.13.0` | -| Plugins | `ref` for the `trunk` plugin in `.trunk/trunk.yaml` | `v1.0.0` | -| VSCode | Reload VSCode to update | `3.4.4` | - -You will also need to install [C and C++ runtime libraries](https://aka.ms/vs/17/release/vc_redist.x64.exe) to run some linters. - -#### Getting in touch - -Thank you for being a beta tester of Trunk Check on Windows! We are actively working to improve the experience. If you have any feedback or questions, please contact us directly on [Slack](https://slack.trunk.io/). - -If you want to override a repo-wide setting just for your Windows machine, you can modify your [`.trunk/user.yaml`](./configuration/per-user-overrides). - -#### Supported features - -We intend to bring full feature support to Windows for Trunk. Currently, the following features are supported: - -* [Trunk Code Quality](./code-quality) -* Non-interactive [Trunk Actions](./actions/) and [git-hooks](./actions/git-hooks) -* [VSCode](../../ide-integration/vscode) - -### Plugin compatibility - - -This section was last updated for Plugins v1.2.0 - - -Trunk runs most linters on all platforms. However, some linters are not yet supported on Windows. For a full list of all linters, see our [Plugins repo](https://github.com/trunk-io/plugins). - -| Linter | Plans for Support | -|---|---| -| ansible-lint | Only supported on WSL | -| clang-format | Long-term plans for LLVM linter support | -| clang-tidy | Long-term plans for LLVM linter support | -| detekt-gradle | Long-term plans for support | -| include-what-you-use | Long-term plans for LLVM linter support | -| nixpkgs-fmt | Long-term plans for support | -| perlcritic | No immediate plans for support | -| perltidy | No immediate plans for support | -| scalafmt | No download available for Windows | -| semgrep | No download available for Windows | -| shellcheck | No download available for Windows | -| stringslint | Only supported on MacOS | -| swiftformat | Only supported on MacOS | -| swiftlint | Only supported on MacOS | -| taplo | No download available for Windows | - -### Backward compatibility - -We generally strive to maintain backward compatibility between the [Trunk Launcher](./install#the-trunk-launcher) and the Trunk binary, but you may need to occasionally upgrade the launcher to support the newest version of Trunk. diff --git a/code-quality/overview/cli/getting-started/configuration/actions/index.mdx b/code-quality/overview/cli/getting-started/configuration/actions/index.mdx deleted file mode 100644 index f28ca1d..0000000 --- a/code-quality/overview/cli/getting-started/configuration/actions/index.mdx +++ /dev/null @@ -1,156 +0,0 @@ ---- -title: "Actions" ---- -Actions are defined and enabled in the `actions` section of `trunk.yaml`. - -Here is an example of the actions section of `trunk.yaml`. If you are curious what your resolved configuration for actions looks like, run `trunk config print`. - -```yaml -actions: - enabled: - - trunk-announce - - trunk-upgrade-available - - npm-install - - seed-database - - custom-git-hook - - login - definitions: - - id: npm-install - triggers: - - files: [package.json] - run: npm install - - id: seed-database - triggers: - - schedule: 24h - run: python3 seed_database.py - runtime: python - run_from: utils - packages_file: requirements.txt - - id: custom-git-hook - triggers: - - git_hooks: [pre-push, pre-commit] - run: my_script.sh - - id: login - run: my_complicated_login_script.sh - interactive: true -``` - -### Action Definitions - -Now we'll walk through the process of creating your own action. - -Actions are required to have a `id` and `run` command. - -The command will implicitly run relative to your workspace, but you can also specify a `run_from` if you'd prefer to execute from a sub-directory. - -#### Runtime management - -We sandbox action executions and allow you to control the runtime. You can do this by specifying a `runtime` and `packages_file`. - -You can specify one of our built-in runtimes (`node`, `python`, ...) or a system runtime that you define. See the [runtimes documentation](../runtimes) for more information. - -For the `python` and `node` runtimes, we additionally provide the ability to install a requirements file like `requirements.txt` or `package.json`. - -### Triggers - -You can run actions manually, or you can also provide a set of triggers so that actions run in response to some event. They are documented below. - -#### Manual runs - -You may run an action manually by running `trunk run ` or `trunk actions run `. - -For manually triggered runs, we support the `${@}` and `${pwd}` variables for template resolution in the `run` declaration. `${@}` will be replaced with the arguments passed to the action, and `${pwd}` will be replaced with the directory the action is triggered from. - -```yaml -id: my-action -run: echo "The action was run from ${pwd} with arguments ${@}" -``` - -#### Time-based triggers - -We provide the ability to run actions in the background on a schedule. - -Under `triggers`, you can add one or more `schedule` entries. For example: - -```yaml -id: my-action -triggers: - - schedule: 1d -``` - -The `schedule` entry should be in the Duration format specified [here](https://pkg.go.dev/time#ParseDuration). The action will be run once per `duration`. - -This is a short-hand for specifying schedule as an object. You can also write: - -```yaml -id: my-action -triggers: - - schedule: - interval: 1d -``` - -The action may occasionally run more often than the specified duration depending on the Trunk daemon's lifetime. - -If you wish to stagger the execution of an action from others on a similar schedule, you may use the `delay` field: - -```yaml -id: my-action -triggers: - - schedule: - interval: 1d - delay: 1h -``` - -You may also use cron syntax: - -```yaml -nid: my-action - triggers: - # run every 2 hours - - schedule: "0 0 */2 * * ?" -``` - -or equivalently: - -```yaml - id: my-action - triggers: - # run every 2 hours - - schedule: - cron: "0 0 */2 * * ?" -``` - -#### File-based triggers - -We provide the ability to run actions automatically based on a file edit. - -You may provide exact filenames, or globs. - -```yaml -id: my-action -triggers: - - files: [foo.txt, bar/**] -``` - -In this case `my-action` will execute if either `foo.txt` is edited (or created), or if a file inside `bar` is edited or created. - -In case you need to know which file triggered the action, you can use the `${target}` variable in the `run` command. - -```yaml -id: my-action -triggers: - - files: [foo.txt, bar/**] -run: echo "The file ${target} was edited" -``` - -If you do a bulk file modification, the `${target}` template may resolve to a space-separated list of files that were simultaneously edited. - -> Note: We only provide file triggers for files inside of your workspace. - -#### Git hooks - -You can also configure Trunk to manage your git hooks. More detail is provided on this in our [git hooks reference](../../actions/git-hooks). - -### Interactivity - -Actions can read from `stdin` if they are marked as interactive (define `interactive: true` on the action). Note: this feature is only available for git hooks and manually run actions - since file-triggered and scheduled actions run in the background, you cannot interact with their execution. diff --git a/code-quality/overview/cli/getting-started/configuration/actions/logging-and-troubleshooting.mdx b/code-quality/overview/cli/getting-started/configuration/actions/logging-and-troubleshooting.mdx deleted file mode 100644 index b490a1a..0000000 --- a/code-quality/overview/cli/getting-started/configuration/actions/logging-and-troubleshooting.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: "Logging and Troubleshooting" -description: "Diagnosing problems with actions" ---- -We provide a number of tools for inspecting the results of actions that run in the background and wouldn't otherwise surface their errors. - -Every action execution is logged. We consider an action execution to have failed if it has a non-zero exit code. - -`trunk actions history ` gives a history of the recent runs of an action and whether it succeeded. You can control how many recent runs to show with the `--count` flag (for example, `trunk actions history trunk-upgrade-available --count=10`). When available, a full stacktrace is written to a file and made available. - -Failed action executions will also produce a notification so that background failures are periodically surfaced to the user. - -You can also inspect action logs at `.trunk/out/actions//`. - -We recommend running actions manually when you develop them to verify that they work correctly. - -### Output Level - -To see a more verbose output when running trunk actions, particularly from git-hooks, you can add the following to your `trunk.yaml`: - -```yaml -actions: - output_level: -``` diff --git a/code-quality/overview/cli/getting-started/configuration/actions/notifications.mdx b/code-quality/overview/cli/getting-started/configuration/actions/notifications.mdx deleted file mode 100644 index 5d78091..0000000 --- a/code-quality/overview/cli/getting-started/configuration/actions/notifications.mdx +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: "Notifications" -description: "Trunk Actions can also produce notifications to display in your terminal or in the VSCode extension!" ---- -### Defining actions that produce notifications - -Typically, whatever actions write to stdout are stored in the log file and perhaps shown to the user. However, actions can also produce structured output if `output_type` is set on the Action Definition to be `notification_v1`. - -In this case, the action should print yaml to output with the following structure: - -```yaml -notifications: - - id: action-id - # Display-related fields - title: My action - message: some text about the notification - rendered: A rendered message string for color terminals - icon: https://uri/to/icon - commands: - - title: A button title - run: a run command - run_from: directory to run from - priority: high # Can be one of low, high (default low) -``` - -Some notes: - -1. The ID can be whatever you want it to be, but generally should be made to match the action ID. -2. You may emit multiple notifications per action. -3. `icon` and `commands` are used to control notifications display in VSCode. -4. High-priority notifications are immediately shown to the user in terminal. Low-priority notifications are only shown every 24 hours (These are configurable). - -### Deleting notifications - -Actions can also clear their own notifications. in this case, make the output looks like this: - -```yaml -notifications_to_delete: [action-id] -``` - -If actions produce a notification that is reflective of a current state or something actionable for the user to do, they may clear the notification once that state changes/when the user takes the requested action. - -### An example - -We illustrate the cycle of actions managing their own notifications with the following example. - -Consider the built-in action for `trunk upgrade` - a command that upgrades trunk and a repo's enabled linters to their most recent versions. We'd like to notify the user of new upgrades once a day. Thus our `trunk-upgrade-available` action definition looks like this: - -```yaml -id: trunk-upgrade-available -output_type: notification_v1 -run: trunk upgrade --notify -triggers: - - schedule: 1h - - files: [.trunk/trunk.yaml] -``` - -`trunk upgrade --notify` produces a notification that looks like this: - -```yaml -notifications: - - commands: - - run: trunk upgrade - title: Upgrade Trunk - id: trunk-upgrade - message: "Upgrades available\n\n Trunk version 0.17.0-beta\n 10 linter updates\n\nRun trunk upgrade to upgrade all\n or trunk upgrade trunk to just upgrade trunk" - priority: low - rendered: "\x1b[1m\x1b[90m\nUpgrades available\x1b[0m\n\x1b[90m\n\x1b[0m• \x1b[90mTrunk version\x1b[0m \x1b[92m0.17.0-beta\x1b[0m\x1b[90m\n\x1b[0m• \x1b[92m11 linter\x1b[0m \x1b[90mupdates\n\x1b[0m\n\x1b[90mRun\x1b[0m\x1b[96m trunk upgrade\x1b[0m\x1b[90m to upgrade all\x1b[0m\x1b[90m\n or\x1b[0m\x1b[96m trunk upgrade trunk\x1b[0m\x1b[90m to just upgrade trunk\x1b[0m\x1b[90m\n\x1b[0m" -``` - -If there are no upgrades available, `trunk upgrade --notify` will produce: - -```yaml -notifications_to_delete: [trunk-upgrade-available] -``` - -So in this scenario, the `trunk-upgrade-available` action runs in the background periodically and produces a notification. The user takes action by running `trunk upgrade`. Since `trunk upgrade` modifies `.trunk/trunk.yaml`, this will again trigger the `trunk-upgrade-available` action (due to the file trigger). Since there is nothing else to upgrade, `trunk upgrade --notify` will produce output telling Trunk to delete its notification. Now, the user is no longer shown a notification about available upgrades! diff --git a/code-quality/overview/cli/getting-started/configuration/index.mdx b/code-quality/overview/cli/getting-started/configuration/index.mdx deleted file mode 100644 index c370b4e..0000000 --- a/code-quality/overview/cli/getting-started/configuration/index.mdx +++ /dev/null @@ -1,292 +0,0 @@ ---- -title: "Configuration" ---- -The Trunk CLI has its top-level config defined in `.trunk/trunk.yaml`. - -``` -/your_repo -├── .trunk -│ └── trunk.yaml -└── src - ├── bar - └── foo -``` - -This is initially generated by `trunk init` and is the central source of truth for how Trunk operates inside your repository. As we build new services and features, we'll extend `trunk.yaml` to include configuration sections for them. We believe strongly in "configuration as code" and being able to guarantee that `trunk` can be run reproducibly. - -### Config format - -The Trunk configuration file is written in YAML and is meant to be self-descriptive. Below is a sample config file to help you understand how the pieces come together. Alternatively, you can also refer to [the `trunk.yaml` in our GitHub Action](https://github.com/trunk-io/trunk-action/blob/main/.trunk/trunk.yaml) as an example or [`trunk-yaml-schema.json`](https://static.trunk.io/pub/trunk-yaml-schema.json). - -```yaml -version: 0.1 # the version of this config file. -cli: - version: 0.15.1 # the version of trunk you will run in your repository -runtimes: - enabled: - - ruby@>=2.7.1 - - python@3.9.1 -repo: - # main is the branch that everyone's work is merged into - # (this is usually inferred and not required to be set) - trunk_branch: main -lint: - definitions: - - name: my_custom_linter - files: [ALL] - commands: - output: sarif - run: ${workspace}/bin/foo --file ${target} - read_output_from: stdout - run_linter_from: workspace - success_codes: [0, 1] - enabled: - - ansible-lint@5.3.2 - - bandit@1.7.0 - - black@21.6b0 - - buf-lint@1.0.0-rc3 - - buildifier@5.1.0 - - cfnlint@0.51.0 - - eslint@7.30.0 - - gitleaks@7.6.1 - - gofmt@1.16.7 - - golangci-lint@1.41.1 - - hadolint@2.6.0 - - isort@5.8.0 - - markdownlint@0.28.1 - - mypy@0.910 - - prettier@2.3.2 - - pylint@2.8.1 - - rustfmt@1.55.0 - - semgrep@0.104.0 - - shellcheck@0.7.2 - - shfmt@3.3.1 - disabled: - - rufo - - tflint - ignore: - - linters: [ALL] - paths: - # Generated files - - a/proto/code_pb* - # Test data - - b/test_data/** - - linters: [eslint] - paths: - - c/run.ts - triggers: - - linters: - - ansible-lint - paths: - - ansible # A directory - targets: - - ansible # A directory -``` - -### `version` - -The `version field` is the schema version of `trunk.yaml.` - -### `cli` - -```yaml -cli: - version: 0.15.1 # the version of trunk you will run in your repository - options: - - commands: [ALL] # apply to all `trunk` commands - args: --monitor=true - - commands: [check, fmt] # apply only to `trunk check` and `trunk fmt` commands - args: -y -``` - -In addition to specifying `version`, `cli` allows you to specify default command line arguments using the `options` field. Specified `args` will be appended to strictly matched `commands` during `trunk` invocations. Specifying `ALL` as a `commands` element applies its options to all `trunk` subcommands. Any command line options will take precedence over these `args`. - -Some examples using the configuration above: - -* `trunk check` resolves to `trunk check -y --monitor=true` -* `trunk check -n` resolves to `trunk check -n --monitor=true` -* `trunk fmt` resolves to `trunk fmt -y --monitor=true` - -### `repo` - -```yaml -repo: - # main is the branch that everyone's work is merged into - # (this is usually inferred and not required to be set) - trunk_branch: main -``` - -Some Trunk features require Trunk to be aware of the canonical repository your organization uses, such as the repository that everyone pulls from and makes pull requests into. The Trunk CLI can infer this from your `origin` remote, but if you don't want your `origin` to be used for this purpose, you can explicitly specify your canonical repository. - -Other features - namely `trunk check` - need to be aware of the primary upstream branch that everyone branches from. If you use `main` or `master`, `trunk` can infer this; however, if you use some other primary branch, then you may want to consider setting this. - -The above configuration is how you would specify that [https://github.com/github/gitignore](https://github.com/github/gitignore) is your canonical repository and that `main` is the branch which `trunk` should always think of as your upstream branch. - -### `api` - -```yaml -api: - # name of your trunk organization on app.trunk.io - org: { your-org-name } -``` - -Some Trunk features, like the CI Debugger, require knowledge of the Trunk organization your repository is using. This information can be provided on the command line or hard-coded in the `trunk.yaml` file. - -### `trunk_remote_hint` - -```yaml -repo: - trunk_remote_hint: github.com/organization/my_repo -``` - -If this hint is set, Trunk will search all local remotes looking for the one that best matches `//` instead of defaulting to `origin`. It will then use this remote as the default upstream for computing changed files. - -### Stacked PR support - -```yaml -repo: - use_branch_upstream: true -``` - -By default, `trunk` will auto-detect all changed files relative to your main branch. If you would instead like it to compare against the upstream of your current git branch, you can enable this feature by setting `use_branch_upstream` to `true`. - -### Disable upgrade notifications - -Trunk will periodically tell you to upgrade to a newer version if one is available. If you prefer not to see these notifications, edit (or add) the section of your `.trunk/trunk.yaml` to include the following lines: - -```yaml -actions: - disabled: - - trunk-upgrade-available -``` - -### Overriding defaults - -Trunk ships with a default configuration which `trunk.yaml` is merged into to produce the actual configuration that Trunk runs with. You can view this merged configuration using `trunk print-config`. - -You may find while using Trunk that you want to modify one of these defaults: perhaps you want `clang-tidy` to not run on the upstream, or maybe you want the `node` runtime to include another environment variable. In these cases, you can specify the field in your `trunk.yaml` to override the default value. - -Let's take `clang-tidy` as an example, which ships with the following default configuration: - -```yaml -definitions: - ... - - name: clang-tidy - files: [c/c++-source] - type: llvm - commands: - - output: llvm - run: clang-tidy --export-fixes=- ${target} - success_codes: [0] - download: clang-tidy - direct_configs: [.clang-tidy] - disable_upstream: true - include_scanner_type: compile_command - environment: - - name: PATH - list: ["${linter}/bin"] - ... -``` - -If you wanted to flip the value of `disable_upstream` to `false`, you could, in your own `trunk.yaml`, specify: - -```yaml highlight={1} -definitions: - ... - - name: clang-tidy - disable_upstream: false - ... -``` - -Some linters have multiple commands, such as [trivy](https://github.com/trunk-io/plugins/blob/main/linters/trivy/plugin.yaml), which can run in different ways. Similarly, some linters are configured to run differently on different platforms or at different versions. When overriding a command definition, overrides are applied on the tuple `[name, version, platforms]`. For example, if you wanted to disable batching when running [ktlint](https://github.com/trunk-io/plugins/blob/main/linters/ktlint/plugin.yaml) on Windows, you could consider its default configuration: - -```yaml -definitions: - ... - - name: ktlint - ... - commands: - - name: format - platforms: [windows] - run: java -jar ${linter}/ktlint.exe -F "${target}" - output: rewrite - cache_results: true - formatter: true - in_place: true - batch: true - success_codes: [0, 1] - - name: format - run: ktlint -F "${target}" - output: rewrite - cache_results: true - formatter: true - in_place: true - batch: true - success_codes: [0, 1] - ... -``` - -and override it as such: - -```yaml -definitions: - ... - - name: ktlint - ... - commands: - - name: format - platforms: [windows] - batch: false - ... -``` - -When executing linters, Trunk will execute the first matching command based on its compatible platforms and linter version. Note when overriding that new commands that don't match an existing tuple are prepended to the resulting commands list. - -Alternatively, consider the default `node` runtime: - -```yaml -runtimes: - definitions: - - type: node - download: node - runtime_environment: - - name: HOME - value: ${home} - - name: PATH - list: ["${runtime}/bin"] - linter_environment: - - name: PATH - list: ["${linter}/node_modules/.bin"] - version: 16.14.2 - version_commands: - - run: "node --version" - parse_regex: ${semver} -``` - -If you wanted to add `${home}/my/special/node/path` to `PATH`, you could specify the following: - -```yaml -runtimes: - - type: node - runtime_environment: - - name: HOME - value: ${home} - - name: PATH - list: ["${home}/my/special/node/path", "${runtime}/bin"] -``` - -### Validation - -Custom linter, download, and runtime configs must be defined in full and will be validated. Overrides of existing linter, download, and runtime configs can be partial overrides. They do not have to be full definitions. - -Merged configurations are subject to the same validation that custom linters are - they must all have a name, type, command, and either `success_codes` or `error_codes` set. - -### Known limitations - -1. Scalar values are overridden in a straightforward manner - the value specified in the override\ - takes the place of the default, and otherwise, default values are retained. -2. To override a sequence value in the default (ex. `environment` in the `node` runtime), it is\ - necessary to fully specify the new sequence. This is why the `environment` override above also defines `HOME`. If you just wanted to add a new value, you would have to copy in the existing\ - sequence to your overriding config, and add your new value to the end of the list. -3. It is not possible to set sequences of non-zero length to zero length. For example, if the\ - default config has `success_codes: [0]`, you may override this to `success_codes: [0, 1]`, but you cannot clear its value. diff --git a/code-quality/overview/cli/getting-started/configuration/lint/auto-enable.mdx b/code-quality/overview/cli/getting-started/configuration/lint/auto-enable.mdx deleted file mode 100644 index fde505f..0000000 --- a/code-quality/overview/cli/getting-started/configuration/lint/auto-enable.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: "Auto-Enable" ---- -Simply defining a linter does not enable it. Trunk needs to know when to auto-enable the linter for certain projects (ex: all python projects) or if certain files are already present (ex: `.eslintrc`). - -## Auto Enabling - -The `direct_configs` property contains a list of config files that the underlying linter uses. The `suggest_if` property determines when `trunk check` should suggest this linter. If `suggest_if` is set to `config_present`, then trunk will search for the listed config files. If found, the linter will be enabled automatically when the user does `trunk init` or `trunk update`. - -For example: in the following yaml, the **flake8** linter sets `suggest_if` to `config_preset` and sets `direct_configs` to `[.flake8]`. If any `*.flake8` files are found, then trunk check will automatically enable flake8. - -**Flake8** linter definition. [full source](https://github.com/trunk-io/plugins/blob/main/linters/flake8/plugin.yaml) - -```yaml -version: 0.1 -tools: - definitions: - - name: flake8 - runtime: python - package: flake8 - shims: [flake8] - known_good_version: 4.0.1 -lint: - definitions: - - name: flake8 - files: [python] - tools: [flake8] - direct_configs: [.flake8] - suggest_if: config_present - affects_cache: - - setup.cfg - - tox.ini - # In case the user installs https://pypi.org/project/Flake8-pyproject/ - - pyproject.toml - issue_url_format: https://flake8.pycqa.org/en/latest/user/error-codes.html - known_good_version: 4.0.1 - version_command: - parse_regex: ${semver} - run: flake8 --version - -``` - -The **suggest\_if** field can be one of the following: - -* `config_present` will auto-enable a linter if Trunk sees any `direct_config` for it . -* `files_present` will auto-enable a linter if Trunk sees any file type that it operates on. -* `never` will never auto-enable this linter. - -Trunk curates the values of `suggest_if` for all linters in the [plugins](https://github.com/trunk-io/plugins) repo. - -## Manually enabling and disabling - -Setting the `lint.definitions[*].enabled` property to true will force the linter to be enabled. Setting the `lint.definitions[*].disabled` property to true will force the linter to never be enabled, even if the `enabled` property is true, and will never suggest this linter, even if `suggest_if` says it should. - -For additional information on the properties of Linters, see the [Linter Definition Reference](./definitions). diff --git a/code-quality/overview/cli/getting-started/configuration/lint/commands.mdx b/code-quality/overview/cli/getting-started/configuration/lint/commands.mdx deleted file mode 100644 index 5c91a1d..0000000 --- a/code-quality/overview/cli/getting-started/configuration/lint/commands.mdx +++ /dev/null @@ -1,454 +0,0 @@ ---- -title: "Commands" ---- -A command is the fundamental unit of linters. It defines specifically _what binary and arguments_ are used to run the linter. A linter can have multiple commands in case it has multiple behaviors (ex: lint and format), but it must have at least one. - -## How Code Quality Runs Linters - -The `run` property is the command to actually run a linter. This command can use [variables](./commands#template-variables) provided by the runtime such as `${plugin}` and `${target}`. - -For example: this is the `run` field for **black**, one of our Python linters. The `run` field is set to `black -q ${target}`. - -```yaml -version: 0.1 -tools: - definitions: - - name: black - runtime: python - package: black[python2,jupyter] - shims: [black] - known_good_version: 22.3.0 -lint: - definitions: - - name: black - files: [python, jupyter, python-interface] - commands: - - name: format - output: rewrite - run: black -q ${target} - success_codes: [0] - batch: true - in_place: true - allow_empty_files: false - cache_results: true - formatter: true - tools: [black] - suggest_if: files_present - affects_cache: [pyproject.toml] - known_good_version: 22.3.0 - version_command: - parse_regex: black, version (.*) - run: black --version -``` - -This command template contains all the information Trunk needs to execute `black` in a way where Trunk will be able to understand `blacks`'s output. - -## Input Target - -The `target` field specifies what paths this linter will run on given an input file. It may be a string literal such as `.`, which will run the linter on the whole repository. It also supports various substitutions: - -| Variable | Description | -| --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- | -| `${file}` | The input file. | -| `${parent}` | The folder containing the file. | -| `${parent_with()}` | Walks up toward the repository root looking for the first folder containing ``. If `` is not found, do not run any linter. | -| `${root_or_parent_with()}` | Walks up toward the repository root looking for the first folder containing ``. If `` is not found, evaluate to the repository root. | -| `${root_or_parent_with_regex()}` | Walks up toward the repository root looking for the first folder containing a name matching ``. If not found, evaluate to the repository root. | - -If `target` is not specified it will default to `${file}`. - -This target may be referenced in the `run` field as `${target}`, as in the example above for **black**, or this simple example. - -```yaml -lint: - definitions: - - name: noop - files: [ALL] - commands: - - name: format - output: rewrite - formatter: true - run: cat ${target} -``` - -or via `stdin`, by specifying `stdin: true`: - -```yaml -lint: - definitions: - - name: noop - files: [ALL] - commands: - - name: format - output: rewrite - formatter: true - run: cat - - stdin: true -``` - -> Note: Linters that take their input via `stdin` may still want to know the file's path so that they can, say, generate diagnostics with the file's path. In these cases you can still use `${target}` in `run`. - -## Exit codes - -Linters often use different exit codes to categorize the outcome. For instance, [`markdownlint`](https://github.com/igorshubovych/markdownlint-cli#exit-codes) uses `0` to indicate that no issues were found, `1` to indicate that the tool ran successfully but issues were found, and `2`, `3`, and `4` for tool execution failures. - -Trunk supports specifying either `success_codes` or `error_codes` for a linter: - -* if `success_codes` are specified, Trunk expects a successful linter invocation (which may or may not find issues) to return one of the specified `success_codes`; -* if `error_codes` are specified, Trunk expects a successful linter invocation to return any exit\ - code which is _not_ one of the specified `error_codes`. - -`markdownlint`, for example, has `success_codes: [0, 1]` in its configuration. - -**Note:** A linter command should set either success codes or error codes, but not both\*\*.\*\* - -## Working directory - -`run_from` determines what directory a linter command is run from. - -| run_from | Description | -|---|---| -| `` (`.` by default) | Explicit path to run from | -| `${parent}` | Parent of the target file; e.g. would be `foo/bar` for `foo/bar/hello.txt` | -| `${root_or_parent_with()}` | Nearest parent directory containing the specified file | -| `${root_or_parent_with_dir()}` | Nearest parent directory containing the specified directory | -| `${root_or_parent_with_regex()}` | Nearest parent directory containing a file or directory matching specified regex | -| `${root_or_parent_with_direct_config}` | Nearest parent directory containing a file from `direct_configs` | -| `${root_or_parent_with_any_config}` | Nearest parent directory containing a file from `affects_cache` or `direct_configs` | -| `${target_directory}` | Run the linter from the same directory as the target file, and change the target to be `.` | -| `${compile_command}` | Run from the directory where `compile_commands.json` is located | - -## Template Variables - -Note that some of the fields in this command template contain `${}` tokens: these tokens are why `command` is a template and are replaced at execution time with the value of that variable within the context of the lint action being executed. - -| Variable | Description | -| ----------------- | ----------------------------------------------------------------------------- | -| `${workspace}` | Path to the root of the repository | -| `${target}` | Path to the file to check, relative to `${workspace}` | -| `${linter}` | Path to the directory the linter was downloaded to | -| `${runtime}` | Path to the directory the runtime (e.g. `node`) was downloaded to | -| `${upstream-ref}` | Upstream git commit that is being used to calculate new/existing/fixed issues | -| `${plugin}` | Path to the root of the plugin's repository | - -## Limiting concurrency - -If you would like to limit the number of times trunk will invoke a linter concurrently, then you can use the `maximum_concurrency` option. For example, setting `maximum_concurrency: 1` will limit Trunk from running more than one instance of the linter simultaneously. - -## Environment variables - -Trunk by default runs linters _without_ environment variables from the parent shell; however, most linters need at least some such variables to be set, so Trunk allows specifying them using `environment`; for example, the `environment` for `ktlint` looks like this: - -```yaml -lint: - definitions: - name: ktlint - # ... - environment: - - name: PATH - list: ["${linter}"] - - name: LANG - value: en_US.UTF-8 -``` - -Most `environment` entries are maps with `name` and `value` keys; these become `name=value` environment variables. For `PATH`, we allow specifying `list`, in which case we concatenate the entries with `:`. - -We use the same template syntax for `environment` as we do for [`command`](./commands#commands). - -## Output Types and Parsing - -The output of a command should be in one of the supported output types like [SARIF](./output#sarif) or something that can be parsed with a [regex](./output#regex). See [See Output Types](./commands#output-types-and-parsing) for more details. If the standard output types do not meet your needs, you can also create a [custom parser](./output-parsing). - -## Full Reference - -The linter command definitions are defined in `lint.definitions.commands`. A single linter can have multiple commands if it is used in different ways. - -_Note:_. If you define the executable to run here (the command definition), then you should _not_ define it also in the linter definition. Defining it here as a command is preferred. - -## `allow_empty_files` - -`allow_empty_files`: _optional boolean_. Skip linting empty files for this linter. Trunk will assume there are no linters if the file is empty. - -## `batch` - -`batch`: _optional boolean_. Combine multiple files into the same execution. If true, the `${target}` template substitution in the `run` field may expand into multiple files. - -## `cache_ttl` - -`cache_ttl`, _duration string_. If this linter is not [idempotent](./commands#idempotent), this is how long cached results are kept before they expire. Defaults to 24hrs. See [Output Caching](../../caching) for more details. - -## `cache_results` - -`cache_results`: _optional boolean_. Indicates if this linter wants to cache results. See [Caching](./files-and-caching) for more details. - -## `disable_upstream` - -`disable_upstream`: _optional boolean_, Whether this linter supports comparing against the upstream version of this file. - -## `error_codes` - -`error_codes`: List of exit codes this linter will return when it hit an internal failure and couldn't generate results. **A linter should set either success codes or error codes, but not both.** See also [`success_codes`](./commands#success_codes). - -## `enabled` - -`enabled`: _optional boolean_. Whether the command is enabled to run when the linter is run. Allows some commands of a linter to be run by default without others. - -## `files` - -`files` is a list of file types listed in the `lint.files` section that this linter applies to. - -Example: **prettier** [full source](https://github.com/trunk-io/plugins/blob/main/linters/prettier/plugin.yaml) - -```yaml -lint: - definitions: - - name: prettier - files: - - typescript - - yaml - - css - - sass - - html - - markdown - - json - - javascript - - graphql - - prettier_supported_configs -``` - -## `fix_prompt` - -`fix_prompt`, _optional string._ e.g. 'Incorrect formatting' or 'Unoptimized image'. This string is used when prompting the user to use the linter interactively. - -## `fix_verb` - -`fix_verb`: _optional string_. This string is used when prompting the user to use the linter interactively. Example: `optimize`, `autoformat`, or `compress`. - -## `formatter` - -`formatter`: _optional boolean_. Whether this command is a formatter and should be included in `trunk fmt`. - -## `in_place` - -`in_place`: _optional boolean_. Indicates that this formatter will rewrite the file in place. **Only applies to formatters**. - -## `idempotent` - -`idempotent`: _optional boolean_. Indicates whether a linter is idempotent with config and source code inputs. For example, `semgrep` fetches rules from the Internet, so it is not idempotent . If set, will only cache results a duration of `cache_ttl`. See [Output Caching](./files-and-caching) for more details. - -## `is_security` - -`is_security`: _optional boolean_. Whether findings from this command should be considered "security" or not. Allows this linter to be run with `--scope==security`. [See Command Line Options](../../../../../../merge-queue/using-the-queue/reference) - -## `maximum_file_size` - -`maximum_file_size`: _optional number_. The maximum file size in bytes for input files to the linter. If not specified, the [lint.default\_max\_file\_size](./#default_max_file_size) will be used. - -## `max_concurrency` - -`max_concurrency`: _optional integer_, The maximum number of processes that Trunk Code Quality will run concurrently for this linter. [See Limiting Concurrency](./commands#limiting-concurrency) - -## `name` - -`name`: _string_. A unique name for this command (some tools expose multiple commands, format, lint, analyze, etc.). - -## `no_issues_codes` - -`no_issues_codes`: List of exit codes that Trunk will use to assume there were no issues without parsing the output. - -## `output` - -`output`: _string_. which type of output this linter produces. [See Output Types](./commands#output-types-and-parsing). - -## `parser` - -`parser`: The definition of a parser that will transform the output of the linter into SARIF. Not needed if linter is already output SARIF. [See Output Types](./commands#output-types-and-parsing) - -## `parse_regex` - -`parse_regex`: _string_. A regular expression used to support regex parsing. [See Regex output type](./output#regex) - -## `platforms` - -`platforms`: A list of platforms this linter supports. (ex: `windows`, `macos`, `linux`). Linters using managed runtimes (node, python, etc.) can generally run cross-platform and do not need the `platforms` property set. For tools which _are_ platform specific or which have different configuration for each platform, this property can be used to distinguish between them. When multiple command definitions have the same name, Trunk Check will pick the first one that matches the `platforms` setting. - -For example, the `detekt` plugin has different exit codes for Windows than MacOS or Linux, and has two command definitions with different `success_codes` fields. [Full Source](https://github.com/trunk-io/plugins/blob/main/linters/detekt/plugin.yaml). - -```yaml -lint: - definitions: - - name: detekt - files: [kotlin] - download: detekt - commands: - - name: lint - platforms: [windows] - output: sarif - run: - detekt-cli --build-upon-default-config --config - .detekt.yaml --input ${target,} --report - sarif:${tmpfile} - success_codes: [0, 1, 2] - read_output_from: tmp_file - batch: true - cache_results: true - - name: lint - output: sarif - run: - detekt-cli --build-upon-default-config --config - .detekt.yaml --input ${target,} --report - sarif:${tmpfile} - success_codes: [0, 2] - read_output_from: tmp_file - batch: true - cache_results: true -``` - -## `prepare_run` - -`prepare_run`: An extra command to run before running a linter. - -## `read_output_from` - -`read_output_from`: Tell parser where to expect output from for reading. Should be one of `stdout`, `stderr`, and `tmp_file`. [See Output Sources](./output#output-sources) - -## `run` - -`run`: The command to run a linter. This command can use variables provided at runtime such as `$plugin}` and `$target}`. [Full list of variables](./commands#template-variables). See [Run](./commands#how-code-qualit-runs-linters) for more details. - -`dart` `format` command: [full source](https://github.com/trunk-io/plugins/blob/main/linters/dart/plugin.yaml) - -```yaml -lint: - files: - - name: dart - extensions: [dart] - definitions: - - name: dart - main_tool: dart - commands: - - name: format - output: rewrite - run: dart format ${target} -``` - -## `run_from` - -`run_from`: What current working directory to run the linter from. See [Working Directory](./commands#working-directory) for more details. - -## `run_when` - -`run_when`: When this command should be run. One of `cli`, `lsp`, `monitor`, or `ci`. - -## `std_in` - -`std_in`: _optional boolean_. Should the command be fed the file on standard input? - -## `success_codes` - -`success_codes:` List of exit codes that indicates linter ran successfully. **This is unrelated to whether or not there were issues reported by the linter**. - -**Note:** a linter should set either success codes or error codes, but not both. See also [`error_codes`](./commands#error_codes). - -## `target` - -`target`, _optional string_, What target does this run on. By default, the target is the modified source code file, `${file}`. Some linters operate on a whole repo or directory. See [Input Target](./commands#input-target) for more details. - -Examples: - -**nancy** uses `.` as the target. [full source](https://github.com/trunk-io/plugins/blob/main/linters/nancy/plugin.yaml) - -```yaml -# nancy uses . -definitions: - - name: nancy - files: [go-lockfile] - download: nancy - runtime: go - commands: - - output: sarif - run: sh ${plugin}/linters/nancy/run.sh - success_codes: [0, 1, 2] - target: . - read_output_from: stdout - is_security: true -``` - -**tflint** uses `${parent}` as the target. [full source](https://github.com/trunk-io/plugins/blob/main/linters/tflint/plugin.yaml) - -```yaml -lint: - definitions: - - name: tflint - files: [terraform] - commands: - - name: lint - output: sarif - prepare_run: tflint --init - run: tflint --format=sarif --force - success_codes: [0, 1, 2] - read_output_from: stdout - # tflint can only run on the current directory unless --recursive is passed - target: ${parent} - run_from: ${target_directory} - version: ">=0.47.0" -``` - -**Clippy** uses `${parent_with(Cargo.toml)}` as the target. [full source](https://github.com/trunk-io/plugins/blob/main/linters/clippy/plugin.yaml) - -```yaml -version: 0.1 -lint: - definitions: - # clippy has 3 lint severities: deny, warn, and allow. Unfortunately deny causes rustc to - # fail eagerly due to its implementation (https://github.com/rust-lang/rust/pull/87337), - # We use --cap-lints to downgrade "deny" severity lints to warn. So rustc will find all - # issues instead of hard stopping. There are currently only 70 of them, so we could hardcode - # the list to fix their severity levels correctly. - - name: clippy - files: [rust] - download: rust - commands: - - name: lint - # Custom parser type defined in the trunk cli to handle clippy's JSON output. - output: clippy - target: ${parent_with(Cargo.toml)} - run: cargo clippy --message-format json --locked -- --cap-lints=warn --no-deps - success_codes: [0, 101, 383] - run_from: ${target_directory} - disable_upstream: true -``` - -## `version` - -`version`: _optional string_, Version constraint. When a linter has multiple commands with the same name, Trunk Code Quality will select the first command that matches the version constraint. This is useful for when multiple incompatible versions of a tool need to be supported. - -Example: the `ruff` linter changed a command line argument from `--format` to `--output-format` in version `v0.1.0`. To handle both versions, the linter defines two commands with different version attributes. The first is for version `>=0.1.0`. If the first is not matched (because the install version of run is less that 0.1.0) then Trunk Code Quality will move on to the next command until it finds a match. [Full source](https://github.com/trunk-io/plugins/blob/main/linters/ruff/plugin.yaml). - -```yaml -lint: - definitions: - - name: ruff - files: [python] - commands: - - name: lint - # As of ruff v0.1.0, --format is replaced with --output-format - version: ">=0.1.0" - run: ruff check --cache-dir ${cachedir} --output-format json ${target} - output: sarif - parser: - runtime: python - run: python3 ${cwd}/ruff_to_sarif.py 0 - batch: true - success_codes: [0, 1] - - name: lint - run: ruff check --cache-dir ${cachedir} --format json ${target} - output: sarif - parser: - runtime: python - run: python3 ${cwd}/ruff_to_sarif.py 1 - batch: true - success_codes: [0, 1] - - -``` diff --git a/code-quality/overview/cli/getting-started/configuration/lint/definitions.mdx b/code-quality/overview/cli/getting-started/configuration/lint/definitions.mdx deleted file mode 100644 index 0cb1acb..0000000 --- a/code-quality/overview/cli/getting-started/configuration/lint/definitions.mdx +++ /dev/null @@ -1,136 +0,0 @@ ---- -title: "Definitions" ---- -The definition of a particular linter is put under `lint.definitions`. The following properties define the settings of a _particular linter_, not for all linters. For global linter settings, see [Lint Config](./). - -## `affects_cache` - -`affects_cache`: The list of files that affect the cache results of this linter. [See Caching](../../caching). - -## `allow_empty_files` - -`allow_empty_files`: _optional boolean_. Indicates to skip linting empty files for this linter. - -## `batch` - -`batch`: _optional boolean_. Combine multiple files into the same execution. - -## `commands` - -`commands`: The list of commands exposed by this linter. See [Linter Command Definition](./commands). - -## `deprecated` - -`deprecated`: _string_. Indicates the linter is deprecated and should not be used. - -## `direct_configs` - -`direct_configs`: _string list_. Indicates config files used to auto-enable the linter. See [Auto Enabling](./auto-enable). - -## `disabled` - -`disabled`: _optional boolean_: Whether linter is actively disabled (and will not be recommended) and will not run (overrides enabled). - -## `download` - -`download`: _string_. The download URL. You must provide either runtime + packages or download, not both. Using runtimes is preferred. See [Runtimes](../runtimes). - -## `enabled` - -`enabled`: _optional boolean_. Whether this linter is enabled. - -## `environment` - -`environment`: a list of runtime variables used when running the linter. See [Command Environment Variables](./commands#environment-variables). - -## `extra_packages` - -`extra_packages`: list of strings, Extra packages to install, versions are optional. See [Linter Dependencies](./dependencies). - -## `formatter` - -`formatter`: _boolean_. Indicates whether this is a formatter and should be included in `trunk fmt`. - -## `good_without_config` - -`good_without_config`: _optional boolean_. Indicates whether this linter is recommended without the user tuning its configuration. Prefer [`suggest_if`](./definitions#suggest_if). - -## `hold_the_line` - -`hold_the_line`: _optional boolean_. Whether [hold-the-line will](/broken/pages/U4nTQBazaodt2vJadyRw#hold-the-line) be done for this linter or not. - -## `include_lfs` - -`include_lfs`: _boolean_. Allow this linter to operate on files tracked using [git LFS](https://git-lfs.com/). - -## `include_scanner_type` - -`include_scanner_type`: which include scanner to use, if any. - -## `issue_url_format` - -`issue_url_format`: _string_, a format string that accepts issue codes for links to issues docs. - -## `known_good_version` - -`known_good_version`: _string_. A version to be used when Trunk cannot query the latest version. Currently, Trunk can query the latest version for all package managers and downloads hosted on GitHub. - -## `known_bad_versions` - -`known_bad_versions`: _string list_. Versions of a linter that are known to be broken and should not be run with Trunk. We will fall back to a `known_good_version` if init or upgrade chooses something in this set. - -## `main_tool` - -`main_tool`, _string_. If your linter depends on more than a single tool, and none of the tools has the same name as the linter, then you will need to specify which is the main tool here. It will be used to version the tool from the linter's enabled version. - -## `name` - -`name` _required string._ The name of the linter. This property will be used to refer to the linter in other parts of the config, for example, in the list of enabled linters. - -## `package` - -`package`: string, What primary package to install, if using a package manager runtime. The enabled version of the runtime for this linter will apply to this package. See [Linter Dependencies](./dependencies). - -## `path_format` - -`path_format`, Whether to use the platform-specific paths or generic "/". Default native. - -## `plugin_url` - -`plugin_url`: _string_, a plugin url for reporting issues. - -## `prepare_command` - -`prepare_command`. A command that is run once per session before linting any number of files using this linter. ex. `[tflint, --init]`. - -## `query_compile_commands` - -`query_compile_commands`, _optional boolean_. - -## `runtime` - -`runtime`: RuntimeType, Which package manager runtime, if any, to require to be setup for this linter. Ex: `node`, `ruby`, `python`. See [Linter Dependencies](./dependencies). - -## `run_timeout` - -`run_timeout`: _duration string_. Describes how long a linter can run before timing out. [See timeouts](../../../../linters/configure-linters#timeout). - -## `suggest_if` - -How to determine if this linter should be auto-enabled/recommended. Possible values are `never`, `config_present`, and `files_present`. [See auto-enabling](./auto-enable) for more details. - -## `supported_platforms` - -Platform constraint. If incompatible, renders a notice. See also [Command `platforms`](./commands#platforms). - -## `tools` - -`tools`, _string list_. The list of tools used by this linter. See [Linter Dependencies](./dependencies). - -## `version_command` - -`version_command`: Version check commands. - -## `verbatim_message` - -`verbatim_message`: Do not try to truncate or reflow the output of this linter. diff --git a/code-quality/overview/cli/getting-started/configuration/lint/dependencies.mdx b/code-quality/overview/cli/getting-started/configuration/lint/dependencies.mdx deleted file mode 100644 index e118a17..0000000 --- a/code-quality/overview/cli/getting-started/configuration/lint/dependencies.mdx +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: "Dependencies" ---- -Linters use the `tools` section of the `.trunk/trunk.yaml` to specify Trunk configured binaries that the linter uses to run. The `linter.definitions.tools` key specifies a list of tool names. There are two ways for a linter to depend on a tool: [Eponymous Tools](./dependencies#eponymous-tool-dependencies) and [Additional Tools](./dependencies#additional-tool-dependencies) - -## Eponymous Tool Dependencies - -When the name of the tool matches the name of a linter, it is called an _eponymous tool dependency_. - -In the example below the `pylint` linter depends on the `pylint` tool, which is defined as the package `pylint` running with the `python` runtime. - -Eponymous tools need to be defined _separately_ from the linter but implicitly enabled with the linter's version. You may explicitly enable the eponymous tool if you wish, but note that its version needs to be synced to that of the linter. See the [Tools Configuration](../tools) page for more details on how to set up Tools. - -```yaml -tools: - definitions: - - name: pylint - runtime: python - package: pylint - shims: [pylint] - known_good_version: 2.11.1 -lint: - definitions: - - name: pylint - files: [python] - commands: - - name: lint - # Custom parser type defined in the trunk cli to - # handle pylint's JSON output. - output: pylint - run: pylint --exit-zero --output - ${tmpfile} --output-format json ${target} - success_codes: [0] - read_output_from: tmp_file - batch: true - cache_results: true - tools: [pylint] - suggest_if: config_present - direct_configs: - - pylintrc - - .pylintrc - affects_cache: - - pyproject.toml - - setup.cfg - issue_url_format: http://pylint-messages.wikidot.com/messages:{} - known_good_version: 2.11.1 - version_command: - parse_regex: pylint ${semver} - run: pylint --version -``` - -## Additional Tool Dependencies - -You can also have a scenario where a linter depends on a tool that is not identically named - an _additional tool dependency_. We give an example below: - -```yaml -tools: - definitions: - - name: terragrunt - known_good_version: 0.45.8 - download: terragrunt - shims: - - name: terragrunt - target: terragrunt -lint: - definitions: - - name: terragrunt - tools: [terragrunt, terraform] - known_good_version: 0.45.8 - files: [hcl] - suggest_if: never - environment: - - name: PATH - list: ["${linter}"] - commands: - - name: format - output: rewrite - run: terragrunt hclfmt ${target} - success_codes: [0] - sandbox_type: copy_targets - in_place: true - formatter: true - batch: true - version_command: - parse_regex: terragrunt v${semver} - run: terragrunt -version -``` - -In this scenario, `terraform` is an additional tool dependency - `terragrunt` requires it to be in `$PATH`. If the tool is an additional dependency, it must be enabled explicitly and versioned independently of the linter - that is, it must be listed in the `tools.enabled` section. - -## Download via package manager - -If your linter can be downloaded via `gem install`, `go get`, `npm install`, or `pip install`, you can specify a `runtime` and the `package` key: - -```yaml -lint: - definitions: - - name: fizz-buzz - files: [javascript] - # npm install fizz-buzz - runtime: node - package: fizz-buzz -``` - -This will now create a hermetic directory in `~/.cache/trunk/linters/fizz-buzz` and `npm install fizz-buzz` there. You can refer to different versions of your package in `trunk.yaml` as normal, via `fizz-buzz@1.2.3`. - -> Note: Such downloads will use the _hermetic_ version of the specified runtime that `trunk` installs, not the one you've installed on your machine. - -See [Package-based Tools](../tools#package-based-tools) for more information. diff --git a/code-quality/overview/cli/getting-started/configuration/lint/files-and-caching.mdx b/code-quality/overview/cli/getting-started/configuration/lint/files-and-caching.mdx deleted file mode 100644 index 5712364..0000000 --- a/code-quality/overview/cli/getting-started/configuration/lint/files-and-caching.mdx +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: "Files and Caching" ---- -## Applicable filetypes - -To determine which linters to run on which files (i.e. compute the set of lint actions), Trunk requires that every linter define the set of filetypes it applies to in `lint.files`, then reference those files from `lint.definitions[*].files`. - -We have a number of pre-defined filetypes (e.g. `c++-header`, `gemspec`, `rust`; see our [plugins repo](https://github.com/trunk-io/plugins/blob/main/linters/plugin.yaml) for an up-to-date list), but you can also define your own filetypes. Here's how we define the `python` filetype: - -```yaml -lint: - files: - - name: python - extensions: - - py - - py2 - - py3 - shebangs: - - python - - python3 -``` - -This tells Trunk that files matching either of the following criteria should be considered `python` files: - -* the extension is any of `.py`, `.py2`, or `.py3` (e.g. `lib.py`) -* the shebang is any of `python` or `python3` (e.g. `#!/usr/bin/env python3`) - -The **flake8** linter definition uses python files, so it references the filetype above in its definition. - -```yaml -lint: - definitions: - - name: flake8 - files: [python] - commands: - ... - affects_cache: - - setup.cfg - - tox.ini - # In case the user uses https://pypi.org/project/Flake8-pyproject/ - - pyproject.toml -``` - -## Caching - -Trunk Code Quality automatically caches results from previous runs of linters to speed up development. To do this Trunk needs to know which files could potentially affect the cache, besides the source code files themselves. - -### Enabling caching - -If a linter wishes Trunk to cache the results it should set `cache_results` to true. - -## Files which affect caching - -The `lint.definitions[*].affects_cache` property is a list of files which could affect the cache. General these are files which would change the configuration of the linter, and therefore invalidate the current cached results. For example, the **flake8** tool tells trunk to invalidate the cache whenever the `setup.cfg`, `tox.ini`, or `pyproject.toml` files are changed. - -```yaml -lint: - definitions: - - name: flake8 - files: [python] - commands: - ... - affects_cache: - - setup.cfg - - tox.ini - # In case the user uses https://pypi.org/project/Flake8-pyproject/ - - pyproject.toml -``` - -### Idempotency - -Trunk Code Quality also needs to know if the linter command itself is idempotent, meaning the command will return the exact same results given the exact same inputs. Most linters are, however semgrep, for example, fetches rules from the internet so the output could be different each time. - -Setting the `linter.definitions[*].commands.idempotent` property to true will tell trunk to only cache the result for a duration of `cache_ttl`, which is set to 24hrs by default. diff --git a/code-quality/overview/cli/getting-started/configuration/lint/index.mdx b/code-quality/overview/cli/getting-started/configuration/lint/index.mdx deleted file mode 100644 index e6b2dc4..0000000 --- a/code-quality/overview/cli/getting-started/configuration/lint/index.mdx +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: "Lint" ---- -### Lint Config - -The `lint` section of `.trunk/trunk.yaml` represents the configuration of all linters. This is where you can: - -* Define the linters (`lint.definitions`), -* List linters to enable and disable (`lint.enabled` and `lint.disabled`) -* Define file categories (`lint.files`) -* List required `runtimes` and `downloads`. -* And additional cross-linter settings. - -### `bazel` - -`bazel`: bazel configuration - -* `paths` locations to look for Bazel binary. [Example](../../../../linters/supported/clang-tidy#using-bazel). - -### `comment_formats` - -`comment_formats`: Definitions of comment formats. Reused in linter definitions. Trunk Quality already defines many common comment format such as `hash` (`# comment`), `slashes-block` (`/* comment */`), and `slashes-inline` (`// comment`). For the full list [see the linters plugin.yaml](https://github.com/trunk-io/plugins/blob/main/linters/plugin.yaml). - -To create a new comment format provide the name and delimiters like this: - -```yaml -lint: - comment_formats: - - name: dashes-block - leading_delimiter: --[[ - trailing_delimiter: --] -``` - -### `compile_commands` - -`compile_commands`: compile commands for clang-tidy. Must be one of `json` or `bazel`. - -### `compile_commands_roots` - -`compile_commands_roots`: Directories to search for `compile_commands.json`. The default is `build/`. - -### `default_max_file_size` - -`default_max_file_size`: Default maximum filesize in bytes. Trunk Code Quality will not run linters on any files larger than this. Default value is 4 megabytes. - -### `definitions` - -`definitions`: Where you define or override linter settings. See [Linter Definition Config](./definitions). - -### `disabled` - -`disabled`: The list of linters to disable. Adding a linter here will prevent trunk from suggesting it as a new linter each time you upgrade. Linter names can be in the form of `` or `@`, the same format as the [enabled](./#enabled) property. - -### `downloads` - -`downloads`: Locations to download binary artifacts from. Using [tool definitions](../tools) instead is preferred. - -### `enabled` - -`enabled`: The list of linters to enable. Linter names can be in the form of `` or `@`. Examples: - -```yaml -lint: - enabled: - # Mutually exclusive, choose one: - - eslint # Use the system version of markdownlint - - eslint@9.0.0 # Use a hermetically managed version of eslint - - eslint@node # Use eslint from node_modules/.bin -``` - -### `exported_configs` - -`exported_configs`: Linter configs to export when another project is [importing this plugin](../../../../linters/shared-configs) - -### `extra_compilation_flags` - -`extra_compilation_flags`: When running clang-tidy, this list will be appended to the compile command. - -### `files` - -`files`: Definitions of filetypes - -Every linter must define the set of filetypes it applies to in the `lint.files` section. - -New filetypes are defined with the name and extensions properties. They may also include the comments properties to describe what style of comments are used in these files. - -This is how the C++ source filetype is defined. See also [Files and Caching](./files-and-caching). - -```yaml -lint: - files: - - name: c++-source - extensions: - - C - - cc - - cpp - - cxx - comments: - - slashes-block - - slashes-inline -``` - -### `ignore` - -`ignore`: files to be ignored by linters. - -### `reuse_upstream` - -`reuse_upstream`: If enabled, Trunk will cache upstream sandboxes instead of creating a new one each time. Options are `true`, or `false`. - -### `runtimes` - -`runtimes`: Node, python, cargo, etc. Used to define or override a runtime environment for package management. [See Runtimes](../runtimes). - -### `skip_missing_compile_command` - -`skip_missing_compile_command`: For linters that depend on compile commands, setting this will cause Trunk to skip files without a compile command rather than report an error. - -### `threshold` - -`threshold`: where you specify the blocking behavior of linters. The [threshold](../../../../linters/configure-linters#blocking-thresholds) for whether an error from a linter should block commits or not. - -### `upstream_mode` - -`upstream_mode`: How to generate the upstream sandbox used for generating lint results for revisions not currently checked out. Options are`symlink` (default), `hardlink`, or `copy`. If using `copy`, it can be slow without also enabling `reuse_upstream: true`. diff --git a/code-quality/overview/cli/getting-started/configuration/lint/output-parsing.mdx b/code-quality/overview/cli/getting-started/configuration/lint/output-parsing.mdx deleted file mode 100644 index a61c5d7..0000000 --- a/code-quality/overview/cli/getting-started/configuration/lint/output-parsing.mdx +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: "Output Parsing" ---- -If you have a command or utility that you want to run pretty much as-is, but Trunk doesn't natively understand how to parse it, you can inject your own custom parser to translate its output into a format that Trunk does understand! - -For example, let's say that we want to use `grep` as a linter, but we want to add more context to the matches. We could define a custom linter like so: - -```yaml -lint: - definitions: - - name: todo-finder - files: [ALL] - commands: - - output: regex - # matches the parser run output - parse_regex: "((?P.*):(?P\\d+):(?P\\d+): - \\[(?P.*)\\] (?P.*) \\((?P.*)\\))" - run: grep --with-filename --line-number --ignore-case todo ${target} - success_codes: [0, 1] - read_output_from: stdout - parser: - run: - "sed -E 's/(.*):([0-9]+):(.*)/\\1:\\2:0: - [error] Found todo in \"\\3\" (found-todo)/'" -``` - -The execution model that `trunk` follows for a parser is that it will: - -* execute the linter's `run` field, asserting that either: - * the linter's exit code is in `success_codes`, or - * the linter's exit code is not in `error_codes`; -* execute `parser.run`, - * with the `read_output_from` of the linter execution fed to `parser.run` as `stdin`, - * assert that the exit code of the parser is 0, and then -* use `output` to determine how it should parse the parser's `stdout`. - -Note that you can also set `parser.runtime` to [`node`](./output-parsing#node) or [`python`](./output-parsing#python) so that you can write your parser in Javascript or Python instead, if you so prefer! You can find plenty of examples of python parsers in our [plugins repo](https://github.com/trunk-io/plugins). - - - -**Node** - -```yaml -lint: - definitions: - - name: todo-finder-node - files: [ALL] - commands: - - output: parsable - # parse_regex matches the parser run output - parse_regex: "((?P.*):(?P\\d+):(?P\\d+): - \\[(?P.*)\\] (?P.*) \\((?P.*)\\))" - run: grep --with-filename --line-number --ignore-case todo ${target} - success_codes: [0, 1] - read_output_from: stdout - parser: - runtime: node - run: ${workspace}/todo-finder-parser.js -``` - -```javascript -#!/usr/bin/env node -'use strict'; -let readline = require('readline'); -let rl = readline.createInterface({ input: process.stdin }); - -rl.on('line', function(line){ - let match = line.match(/(.*):([0-9]+):(.*)/); - - if (match) { - let [_, path, line_number, line_contents] = match; - console.log(`${path}:${line_number}:0: [error]` - +` Found todo in "${line_contents}" (found-todo)`); - } -``` - -Remember to run `chmod u+x todo-finder-parser.js` so that `trunk` can run it! - - -**Python** - -```yaml -lint: - definitions: - - name: todo-finder-python - files: [ALL] - commands: - - output: parsable - # parse_regex matches the parser run output - parse_regex: "((?P.*):(?P\\d+):(?P\\d+): - \\[(?P.*)\\] (?P.*) \\((?P.*)\\))" - run: grep --with-filename --line-number --ignore-case todo ${target} - success_codes: [0, 1] - read_output_from: stdout - parser: - runtime: python - run: ${workspace}/todo-finder-parser.js -``` - -```python -#!/usr/bin/env python -import re, sys - -for line in sys.stdin.readlines(): - match = re.match("(.*):([0-9]+):(.*)", line) - if match: - path, line_number, line_contents = match.groups() - print(f"{path}:{line_number}:0: [error] " - "Found todo in \"{line_contents}\" (found-todo)") - -``` - -Remember to run `chmod u+x todo-finder-parser.py` so that `trunk` can run it! - - diff --git a/code-quality/overview/cli/getting-started/configuration/lint/output.mdx b/code-quality/overview/cli/getting-started/configuration/lint/output.mdx deleted file mode 100644 index 9364527..0000000 --- a/code-quality/overview/cli/getting-started/configuration/lint/output.mdx +++ /dev/null @@ -1,207 +0,0 @@ ---- -title: "Output" ---- -## Output Sources - -The output format that Trunk expects from a linter is determined by its [`output`](./output#output-types) type. - -**`stdout`, `stderr` or `tmp_file`** - -`trunk` generally expects a linter to output its findings to `stdout`, but does support other output mechanisms: - -| `read_output_from` | Description | -| ------------------ | --------------------------------------------------------------------------------- | -| `stdout` | Standard output. | -| `stderr` | Standard error. | -| `tmp_file` | If `${tmpfile}` was specified in `command`, the path of the created `${tmpfile}`. | - -## Output Types - -Trunk supports several different generic output types. Most linters will use one of these output types, but if your linter doesn't conform well to any of these specifications, you can also write a [custom parser](./output-parsing). In general, SARIF should be preferred over other formats because it is the most flexible and battle tested. - -Trunk currently supports the following linter output types. - -| Linter Type | Autofix support | Description | -|---|---|---| -| [`sarif`](#sarif) | ✓ | Produces diagnostics as [Static Analysis Results Interchange Format](https://docs.oasis-open.org/sarif/sarif/v2.0/sarif-v2.0.html) JSON. | -| [`lsp_json`](#lsp-json) | | Produces diagnostics as [Language Server Protocol](https://microsoft.github.io/language-server-protocol/) JSON. | -| [`pass_fail`](#pass-fail-linters) | | Writes a single file-level diagnostic to `stdout`. | -| [`regex`](#regex) | | Produces diagnostics using a custom regex format. | -| [`arcanist`](#arcanist) | ✓ | Produces diagnostics as Arcanist JSON. | -| [`rewrite`](#formatters) | ✓ | Writes the formatted version of a file to `stdout`. | - -If your linter produces a different output type, you can also write a [parser](./output-parsing) to transform the linter's output into something Trunk can understand. - -### SARIF - -`output: sarif` linters produce diagnostics in the [Static Analysis Results Interchange Format](https://docs.oasis-open.org/sarif/sarif/v2.0/sarif-v2.0.html): - -```json -{ - "$schema": "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json", - "version": "2.1.0", - "runs": [ - { - "results": [ - { - "level": "warning", - "locations": [ - { - "physicalLocation": { - "artifactLocation": { - "uri": "/dev/shm/sandbox/detekt_test_repo/example.kt" - }, - "region": { - "startColumn": 12, - "startLine": 18 - } - } - } - ], - "message": { - "text": "A class should always override hashCode when overriding equals and the other way around." - }, - "ruleId": "detekt.potential-bugs.EqualsWithHashCodeExist" - } - ], - "tool": { - "driver": { - "downloadUri": "https://github.com/detekt/detekt/releases/download/v1.19.0/detekt", - "fullName": "detekt", - "guid": "022ca8c2-f6a2-4c95-b107-bb72c43263f3", - "informationUri": "https://detekt.github.io/detekt", - "language": "en", - "name": "detekt", - "organization": "detekt", - "semanticVersion": "1.19.0", - "version": "1.19.0" - } - } - } - ] -} -``` - -### LSP JSON - -`output: lsp_json` linters output issues as [Language Server Protocol](https://microsoft.github.io/language-server-protocol/specification#diagnostic) JSON. - -```json -[ - { - "message": "Not formatted correctly. Missing owner", - "code": "missing-owner", - "severity": "Error", - "range": { - "start": { - "line": 12, - "character": 8 - }, - "end": { - "line": 12, - "character": 12 - } - } - }, - { - "message": "TODO is assigned to someone not listed in this project", - "code": "unknown-user", - "severity": "Warning", - "range": { - "start": { - "line": 37, - "character": 0 - }, - "end": { - "line": 37, - "character": 14 - } - } - } -] -``` - -### Pass/Fail Linters - -`output: pass_fail` linters find either: - -* no issues in a file, indicated by exiting with `exit_code=0`, or -* a single file-level issue in a file, whose message is the linter's `stdout`, indicated by exiting\ - with `exit_code=1`. - -> Note: Exiting with `exit_code=1` but writing nothing to `stdout` is considered to be a linter tool failure. -> -> Note: `pass_fail` linters are required to have `success_codes: [0, 1]` - -### Regex - -`output: regex` linters produce output that can be parsed with custom regular expressions and named capture groups. The regular expression is specified in the `parse_regex` field. - -`regex` supports capturing strings from a linter output for the following named capture groups: - -* `path`: file path (required) -* `line`: line number -* `col`: column number -* `severity`: one of `note`, `notice`, `allow`, `deny`, `disabled`, `error`, `info`, `warning` -* `code`: linter diagnostic code -* `message`: description - -For example, the output - -``` -.trunk/trunk.yaml:7:81: [error] line too long (82 > 80 characters) (line-length) -``` - -can be parsed with the regular expression - -``` -((?P.*):(?P\d+):(?P\d+): \[(?P.*)\] (?P.*) \((?P.*)\)) -``` - -and would result in a `trunk` diagnostic that looks like this: - -``` -7:81 high line too long (82 > 80 characters) regex-linter/line-length -``` - -In the event that multiple capture groups of the same name are specified, the nonempty capture will be preferred. If there are multiple non-empty captures, a linter error will be thrown. Adjust your regular expression accordingly to match the specifics of your output. - -> Note: For additional information on building custom regular expressions, see [re2](https://github.com/google/re2/wiki/Syntax). More complicated regex may require additional escape characters in yaml configuration. - -### Arcanist - -You can also output JSON using the Arcanist format. - -```json -[ - { - "Char": 1, - "Code": "missing_copyright", - "Description": "Message about things\nMaybe contain multiple lines and web\nlinks\nhttps://website.com/notice-about-stuff\n", - "Line": 1, - "Name": "Incorrect (or missing) copyright notice", - "OriginalText": "", - "Path": "somefile.py" - } -] -``` - -### Formatters - -`output: rewrite` linters write the formatted version of a file to `stdout`; this becomes an autofix which `trunk` can prompt you to apply (which is what `trunk check` does by default) or automatically apply for you (if you `trunk check --fix` or `trunk fmt`). - -For example, if you wanted a linter to normalize your line endings, you could do this: - -```yaml -lint: - definitions: - - name: no-carriage-returns - files: [ALL] - commands: - - output: rewrite - formatter: true - command: sed s/\r// ${target} - success_codes: [0] -``` - -Setting `formatter: true` will cause `trunk fmt` to run this linter. diff --git a/code-quality/overview/cli/getting-started/configuration/merge.mdx b/code-quality/overview/cli/getting-started/configuration/merge.mdx deleted file mode 100644 index 7608741..0000000 --- a/code-quality/overview/cli/getting-started/configuration/merge.mdx +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: "Merge" ---- -Custom `required_statuses` defined in the `.trunk/trunk.yaml` file take precedence over the GitHub required status checks from branch protection. - -Use custom `required_statuses` when your checks don't match what you configure on GitHub one-to-one. - -``` -version: 0.1 -``` - -``` -cli: - version: 1.16.0 -merge: - required_statuses: - - Trunk Check - - Unit tests & test coverage - # Add more required statuses here -``` diff --git a/code-quality/overview/cli/getting-started/configuration/per-user-overrides.mdx b/code-quality/overview/cli/getting-started/configuration/per-user-overrides.mdx deleted file mode 100644 index 2e2d9c9..0000000 --- a/code-quality/overview/cli/getting-started/configuration/per-user-overrides.mdx +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: "Per User Overrides" ---- -## Per-User Customization - -Trunk can also be managed by the `.trunk/user.yaml` file in your repository. This file is optional, but it allows individual developers to customize how they want `trunk` to run on their machines. - -Simply configure `.trunk/user.yaml` as you would for `.trunk/trunk.yaml`. Now you can add additional linters, enable [actions](../actions/), or specify [default command options](./#cli), without impacting the way other developers run `trunk`. - -Be mindful that `.trunk/user.yaml` takes precedence over `.trunk/trunk.yaml`, so substantial modifications could violate hermeticity. - -## Identity Config - -Trunk also saves a user config in `$HOME/.cache/trunk/user.yaml`. This is auto-generated in order to manage [anonymous usage data](./telemetry) and persist login sessions. diff --git a/code-quality/overview/cli/getting-started/configuration/plugins/exported-configs.mdx b/code-quality/overview/cli/getting-started/configuration/plugins/exported-configs.mdx deleted file mode 100644 index a3ece1f..0000000 --- a/code-quality/overview/cli/getting-started/configuration/plugins/exported-configs.mdx +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: "Exporting linter configs" -description: "Reusing linter configs across projects." ---- -Plugin repositories can also export their own linter config files to keep configuration synced across an organization. Simply add an `exported_configs` section to a `plugin.yaml`, with paths to all of the config files you want to export, relative to the repository root. For example: - -```yaml -lint: - exported_configs: - - configs: - - .eslintrc.yaml - - .trunk/configs/.shellcheckrc -``` - -These config files will be available for linters that enumerate them in `affects_cache`or `direct_configs` to reference. These files are automatically symlinked into the repository root during linter execution. The set of applicable config files can be viewed in the details yaml file listed when running `trunk check --verbose`. - -Plugin-exported configs are sourced in lockstep with the plugin itself, so you will need to update\ -the `ref` field to use the latest configs. - -Note that if you're using an IDE Extension like clangd with an LSP that relies on those configs being in the root, you will need to manually create a symlink to the plugin's config. You can do this by running `ln -s .trunk/plugins// `. - -For an example of a plugin repo with config files, see our own [configs](https://github.com/trunk-io/configs) repo. - -### Importing configs - -This process can also be reversed to import config files from a plugins repository which\ -does not explicitly export them. Given a plugin sourced with id `trunk`, the sourcing repository can\ -achieve the same effect by including the following in its `.trunk/trunk.yaml`. - -```yaml -lint: - exported_configs: - - plugin_id: trunk - configs: - - .eslintrc.yaml - - .trunk/configs/.shellcheckrc -``` diff --git a/code-quality/overview/cli/getting-started/configuration/plugins/external-repositories.mdx b/code-quality/overview/cli/getting-started/configuration/plugins/external-repositories.mdx deleted file mode 100644 index 3b3aa96..0000000 --- a/code-quality/overview/cli/getting-started/configuration/plugins/external-repositories.mdx +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: "Share config between codebases" -description: "Sharing configuration between codebases using public config repos" ---- -To standardize Trunk configuration across an organization, you can create and publish a public plugins repository. This repo can define new linter definitions, specify enabled linters and actions, and even [export linter configs](./exported-configs). - -Once you've created your plugin repository, you can source it in other repositories to adopt shared configuration across your organization. For an example of how we do this in our own org, check out our [configs repo](https://github.com/trunk-io/configs). - -Note that in order to keep linters and tools up to date in your plugin configs repo, you'll need to run `trunk upgrade --apply-to=plugin.yaml` to apply [upgrades](../../../../linters/upgrades). After making a public GitHub release with your plugin changes, other dependent repos will pick up these changes automatically when running `trunk upgrade`. - -### Get started - -Let's walk through how to create a simple linter that warns about TODOs in your codebase. - -We'll start by creating a new Git repository: - -```bash -PLUGIN_PATH=~/my-first-trunk-plugin -mkdir "${PLUGIN_PATH}" && cd "${PLUGIN_PATH}" -git init -``` - -And then create a linter that can find TODOs in your codebase using `grep` and `sed`: - -```bash -cat >plugin.yaml < -trunk check enable todo-finder -``` - -And now, to demonstrate how this works, let's `trunk check` some files where we know we have TODOs: - -```bash -trunk check $(git grep -li todo | head -n 10) -``` - -which will show you something like this: - -``` -.eslintrc.yaml:19:0 - 19:0 high Found todo in " # TODO(chris): Figure out why this causes a massive slowdown ... .trunk/dev-out/O1F.txt local.todo-finder/found-todo - 101:0 high Found todo in " node/no-unpublished-import: off # TODO: do we want this?" local.todo-finder/found-todo -``` - -### Organizing your code - -In the example we gave above, we put the linter's source code in `plugin.yaml`, which is fine for an example, but not really great for anything more than that. We can take the `sed` command from the plugin we created earlier and push that into the shell script: - -```bash -#!/bin/bash -sed -E 's/(.*):([0-9]+):(.*)/\1:\2:0: [error] Found todo in \"\3\" (found-todo)/'" -``` - -> Tip: Remember to run `chmod u+x todo-finder-parser.sh` so that `trunk` can run it! - -and also point the definition of `todo-finder` at it: - -```bash -version: 0.1 -lint: - definitions: - - name: todo-finder - files: [ALL] - commands: - - output: parsable - run: grep --with-filename --line-number --ignore-case todo ${target} - success_codes: [0, 1] - read_output_from: stdout - parser: - run: ${plugin}/todo-finder-parser.sh -``` - -We can also go another step and push the entire linter definition into a shell script: - -```bash -#!/bin/bash -grep --with-filename --line-number --ignore-case todo "${1}" | \ - sed -E 's/(.*):([0-9]+):(.*)/\1:\2:0: [error] Found todo in \"\3\" (found-todo)/'" -``` - -```yaml -version: 0.1 -lint: - definitions: - - name: todo-finder - files: [ALL] - commands: - - output: parsable - run: ${plugin}/todo-finder.sh - success_codes: [0] -``` - -See our documentation on [custom linters](../../../../linters/custom-linters) and [custom parsers](../lint/output-parsing) for more on what you can do, such as writing your parser in Javascript or Python! - -### Publishing your plugin - -To share your plugin with the world, all you have to do is tag a release and push it to GitHub, GitLab, or some other repository hosting service: - -```bash -git add . -git commit "Create a TODO finder" -git tag -a v0.0.0 --message "Initial TODO finder release" -git remote add origin -git push origin main v0.0.0 -``` - -Now that it's available on the Internet, everyone else can just use your plugin by running: - -```bash -trunk plugins add --id=their-first-plugin v0.0.0 -``` diff --git a/code-quality/overview/cli/getting-started/configuration/plugins/index.mdx b/code-quality/overview/cli/getting-started/configuration/plugins/index.mdx deleted file mode 100644 index 93d1b9e..0000000 --- a/code-quality/overview/cli/getting-started/configuration/plugins/index.mdx +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: "Plugins" ---- -### Plugin config merging - -Trunk uses a plugin system where a root configuration is defined in the [trunk-io/plugin repository](https://github.com/trunk-io/plugins). You can import many plugin config sources, and fields defined at each level override the level above. - - -When plugin configs are merged, only fields defined in a config file are merged into the level above. You can define just the fields you wish to override in `.trunk/trunk.yaml and .trunk/user.yaml.` - - -When using trunk, you can merge several sets of configuration files with a `trunk.yaml` schema. Config merging proceeds as follows: - -1. Remote plugins sourced in `.trunk/trunk.yaml` (and `.trunk/user.yaml`). Plugins are sourced in the order they're defined, with later plugins overriding those defined before it. The [`trunk`](https://github.com/trunk-io/plugins) plugin is implicitly sourced first. -2. Your repo level `.trunk/trunk.yaml` file, complete with a CLI version and any definitions or enables. Configurations defined here override what's defined in the remote plugins. -3. Optionally, `.trunk/user.yaml`, a local **git-ignored** file where users can provide their own overrides. - -Additionally, any files enumerated in the lint `exported_configs` section are symlinked from their relevant plugin into the root of the workspace when an applicable linter is run with `trunk check`. - -### Importing a plugin repository - -By default, trunk imports the trunk-io/plugins repository. To import a repo add it to the `plugins.sources` list. Each repo requires a URI and ref. - -```yaml -plugins: - sources: - - id: trunk - uri: https://github.com/trunk-io/plugins - ref: v1.2.6 -``` - -| Field | Description | -|---|---| -| `id` | unique identifier for this repository | -| `uri` | address used to clone the target repository | -| `ref` | commit id or tag to checkout. **Do not use branch names, as these can be unstable** | -| `local` | path to local (on-disk) repository. Takes precedence over uri/ref if defined | -| `import_to_global` (default: `true`) | import content into the global namespace. If set to false actions and linters defined in the plugin must be referenced by `.` | - -### Plugin capabilities - -Any configuration used in `trunk.yaml` can also be used in a plugin repository, with [some exceptions](./#excluded-fields). A plugin repository must have one root level `plugin.yaml` and can have any number of other `plugin.yaml` files in other subdirectories. These configuration files are then merged into one composite plugin configuration. - -The most common use for a plugin repository is to define custom linters, actions, or tools. But they can also be used to define a common set of shared tools across an organization. For more info, see [organization configs](./external-repositories). - -The root `plugin.yaml` file may also have a `required_trunk_version` field which governs compatibility when [upgrading](../../../../linters/upgrades) between CLI versions. - -#### Add a plugin to your `trunk.yaml` file - -To add a plugin from GitHub: - -``` -trunk plugins add https://github.com/trunk-io/plugins --id=trunk -``` - -To add a plugin from GitHub at a specific version: - -``` -trunk plugins add https://github.com/trunk-io/plugins v1.2.6 --id=trunk -``` - -To add a plugin from a local repository: - -``` -trunk plugins add /home/user/self/hello-world --id=hello-world -``` - -Note that when specifying a remote plugin, the `ref` field must be a tag or SHA. - -### Plugins scope - -Plugins are merged serially, in the order that they are sourced, and can override almost any Trunk\ -configuration. This allows organizations to provide a set of overrides and definitions in one\ -central place. - -For instance, you can create your own `my-plugins` repository with `plugin.yaml`: - -```yaml -version: 0.1 -lint: - definitions: - - name: trufflehog - commands: - - name: lint - # override trufflehog to use '--only-verified' - run: trufflehog filesystem --json --fail --only-verified ${target} - enabled: - - ruff@0.0.256 -``` - -sourced in a `.trunk/trunk.yaml` file from another repository as follows: - -```yaml -version: 0.1 -plugins: - sources: - - id: trunk - uri: https://github.com/trunk-io/plugins - ref: v1.2.6 - - id: my-plugins - local: ../my-plugins -``` - -When a user runs `trunk` in the sourcing repository, they will already have `ruff` enabled, along with the `trufflehog` override from the `my-plugins` repository. - -Note that private GitHub plugin repositories are not currently supported. - -### Excluded fields - -Plugin `sources`, as well as the `cli` `version`, are not merged from plugin repositories to ensure\ -that config merging occurs in a predictable, stable fashion. diff --git a/code-quality/overview/cli/getting-started/configuration/runtimes.mdx b/code-quality/overview/cli/getting-started/configuration/runtimes.mdx deleted file mode 100644 index b277e7d..0000000 --- a/code-quality/overview/cli/getting-started/configuration/runtimes.mdx +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: "Runtimes" ---- -Trunk manages the hermetic installation of all required runtimes. You can also specifically pin a version of a runtime you'd like Trunk to use, or tell Trunk to reuse an already-installed runtime on the system. - -Trunk makes it easy for you to run tools (such as linters and actions) because, under the hood, Trunk actually downloads everything a given tool depends on, and then executes said tool in the context of its dependencies. In other words, you can run tools like `golangci-lint` and `rubocop` without wasting hours figuring out how to install the right Go and Ruby versions on your machine, because Trunk will install a `go` and `ruby` runtime for those tools to depend on. - -Importantly, just like how Trunk by design requires you to version your tools, i.e. specify which version of `golangci-lint` and `rubocop` is enabled in your repository at a given commit, Trunk also versions your runtimes. This means that you can stop asking questions like "Wait, which version of Go are you using?" and "How do I choose a Ruby version to install on this new Jenkins runner?"; instead, all you have to do is look at the `runtimes` section in your `.trunk/trunk.yaml`, and you know which version of which runtime Trunk will use for a tool at any given moment: - -``` -runtimes: - enabled: - - go@1.18.3 - - node@16.14.2 - - python@3.10.3 - - ruby@3.1.0 -``` - -## How does this work? - -Runtimes are defined by a combination of configuration and native code inside Trunk itself. Let's walk through an example, `prettier`: - -```yaml -lint: - definitions: - - name: prettier - runtime: node - package: prettier - commands: - - run: prettier -w ${target} - ... -``` - -Since Prettier uses the `node` runtime, let's also look at that definition; specifically, the `runtime_environment` and `linter_environment`: - -```yaml -runtimes: - definitions: - - type: node - linter_environment: - - name: PATH - list: - - ${linter}/node_modules/.bin - runtime_environment: - - name: HOME - value: ${home} - - name: PATH - list: - - ${runtime}/bin -``` - -Now we have all the config fields we need to understand what Trunk does in this example. - -### Installing `prettier` - -Before Trunk can run `prettier`, it needs to install `prettier`; this is done using the package manager associated with a given runtime, the mechanism for which is defined natively inside Trunk (i.e. Trunk has custom code for every runtime to manage how packages for said runtime are installed). - -For most runtimes, this is as simple as executing the runtime's package manager in the context of the `runtime_environment`; in this example, that means doing `npm install ${package}` with environment variables `HOME=${home}` and `PATH=${runtime}/bin`. - -### Running `prettier` - -Once `prettier` is installed, we combine its runtime's `linter_environment` with any other environment variables that might be defined in a given `lint.definitions` entry (in this case there are none), and then use that as the environment when we execute the command for a given linter. - -## Specifying a runtime version - -If you would like to use the system-installed runtime instead of the Trunk managed version you can always use the `runtimes.definitions.system_version` property in your `trunk.yaml` file. - -```yaml -runtimes: - enabled: - - go@x.y.z - -# or -runtimes: - enabled: - - go@>=x.y.z - definitions: - - type: go - system_version: allowed -``` - -If you choose to use a system-managed version, you will also need to specify a runtime version constraint in your enabled section, e.g. `python@>=3.0.0`. diff --git a/code-quality/overview/cli/getting-started/configuration/telemetry.mdx b/code-quality/overview/cli/getting-started/configuration/telemetry.mdx deleted file mode 100644 index 866126a..0000000 --- a/code-quality/overview/cli/getting-started/configuration/telemetry.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: "Telemetry" ---- -Trunk sends basic usage metrics from our local tools ([CLI](/broken/pages/OJc6wVrAfc2SLQZlJ2m1) & [VS Code Extension](../../../ide-integration/vscode)) to our analytics system to help us understand our usage and improve our tools over time. We do not send your code or codebase to our backend. - -## Why we collect usage data - -Our product team constantly works on feature enhancement and new areas to invest in. Usage data allows us best to understand the ergonomics and performance of our tools. For example, if we add a new subcommand to the command line interface - how often is it used? Additionally, usage data is gathered to track usage and compliance against our free and paid product offerings. - -To give concrete examples: we track our users' client version and operating system to understand backward compatibility requirements, and the time it takes our user base to upgrade to our latest releases. - -## Example usage data - -```json -{ - "anonymous_id": , - "command": "check --all", - "launcher_version": "1.2.3", - "os": "macOS", - "release": 1.4.1, - "source": "client", - "time": , - "exit_code": 0, - "duration_ms": 232, - "repository": -} -``` - -## Can I disable usage data? - -Yes. You can disable usage telemetry by setting the following environment variable: - -```bash -TRUNK_TELEMETRY=off -``` diff --git a/code-quality/overview/cli/getting-started/configuration/tools.mdx b/code-quality/overview/cli/getting-started/configuration/tools.mdx deleted file mode 100644 index 9445e53..0000000 --- a/code-quality/overview/cli/getting-started/configuration/tools.mdx +++ /dev/null @@ -1,121 +0,0 @@ ---- -title: "Tools" ---- -Tool definitions - -Each tool definition shares a set of attributes: - -| Field | | -|---|---| -| `name` | The name of the tool. Must be unique. | -| `known_good_version` | The default version to initialize the tool at (required). | -| `shims` | A list of binaries exposed by the tool. Each of these will correspond to one identically named executable installed in `.trunk/tools.`In the most common case, there is exactly one shim matching the name of the tool. We'll discuss other cases below. | -| `environment` | You can specify an environment for the tool. We provide the `${tool}` template argument that resolves to the installation directory of the tool. By default, we prepend this to `$PATH` within the shim script, so this is used to locate the binary. For legacy reasons, `${linter}` also resolves to this directory. | - -> Note: If the tool has a `runtime` attribute, the runtime's environment is merged in to its environment (discussed in the examples below). - -Broadly speaking, there are 3 kinds of tools - download, package, and runtime-based tools. We'll look at each one in turn: - -#### Download-based tools - -Download-based tools are straightforward: They reference a named download configuration in the global `downloads` section. Here is an example: - -```yaml -downloads: - - name: gh - downloads: - - os: - linux: linux - cpu: - x86_64: amd64 - arm_64: arm64 - url: https://github.com/cli/cli/releases/download/v${version}/gh_${version}_${os}_${cpu}.tar.gz - strip_components: 1 - - os: - windows: windows - cpu: - x86_64: amd64 - arm_64: arm64 - url: https://github.com/cli/cli/releases/download/v${version}/gh_${version}_${os}_${cpu}.zip - strip_components: 1 - # macOS releases since 2.28.0 started using .zip instead of .tar.gz - - os: - macos: macOS - cpu: - x86_64: amd64 - arm_64: arm64 - url: https://github.com/cli/cli/releases/download/v${version}/gh_${version}_${os}_${cpu}.zip - strip_components: 1 - version: ">=2.28.0" - - os: - macos: macOS - cpu: - x86_64: amd64 - arm_64: arm64 - url: https://github.com/cli/cli/releases/download/v${version}/gh_${version}_${os}_${cpu}.tar.gz - strip_components: 1 -tools: - definitions: - - name: gh - download: gh - known_good_version: 2.27.0 - environment: - - name: PATH - list: ["${tool}/bin"] - shims: [gh] -``` - -Note that for the downloaded archive, the binary named `gh` is inside the `bin` directory, so we use the environment to point the `$PATH` there. - -#### Download fields - -`strip_components`: This number of leading directory components to remove from all files in an archive when extracting. - -`rename_single_file`: If an archive contains a single file, this will cause that file to be renamed to the name of the tool. This is most useful for downloads of gzip'd binaries with the platform name in the binary. - -#### Package-based tools - -Package-based tools depend on specified `package` and `runtime` attributes. Here is an example of configuring `mypy` as a tool: - -```yaml -tools: - definitions: - - name: mypy - runtime: python - package: mypy - shims: [mypy] - known_good_version: 0.931 - extra_packages: - - types-six@1.16.21 - - types-request -``` - -`extra_packages` behaves equivalently to a package file like `requirements.txt` for Python or `package.json` for Node. They can be optionally pinned at versions. - -The version of the primary package (in this case, `mypy`) is specified in the `tools.enabled`. So to enable the `mypy` tool at `1.4.0`, list it as `- mypy@1.4.0`. - -If you don't want to include additional packages in the tool definition, you can instead make them explicit in the enabled section of your `.trunk/trunk.yaml` as you would for [linters](../../../linters/), for example: - -```yaml -tools: - enabled: - - mypy@1.4.0: - packages: - - types-six@1.16.21 -``` - -#### Runtime-based tools - -Runtime-based tools are a special case that are not explicitly defined. Rather, each runtime object exposes a set of `shims` (just like `tool` definitions). - -If the runtime is enabled and listed in `tools.runtimes`, then shims exposed by that runtime are automatically installed in the `.trunk/tools` directory alongside those of other tools (`trunk tools enable ` does that for you). Thus you can run `python`, `pip`, etc as `trunk`-managed tools. - -Example: - -```yaml -tools: - runtimes: - - python -``` - -If this is disruptive to your workflow, simply remove the runtime's name `(go, node, python,...)` from `tools.runtimes` section or run `trunk tools disable ` which will handle it for you. Runtimes cannot be enabled or versioned via the `tools.enabled` section, however, and runtimes must be enabled in the `runtimes` section to be available to have their shims installed. diff --git a/code-quality/overview/cli/getting-started/index.mdx b/code-quality/overview/cli/getting-started/index.mdx deleted file mode 100644 index 041487d..0000000 --- a/code-quality/overview/cli/getting-started/index.mdx +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "Code Quality CLI" ---- -Trunk provides command-line tools for different products. Choose your product below: - -* [Trunk Launcher Install](./install): Trunk uses a launcher to automatically install the appropriate CLI for your platform -* [Trunk Code Quality CLI](./commands-reference/): commands reference -* [Trunk Code Quality CLI Configuration](./configuration/): the Trunk CLI has its top-level config defined in `.trunk/trunk.yaml` -* [Trunk Tools CLI](./tools): manage tools used by your repo -* [Trunk Actions](./actions/): local workflow automation and githooks manager - -## Initializing Trunk in a git repo is as simple as running: - -```bash -trunk init -``` - -This will scan your repository and create a `.trunk/trunk.yaml` file which enables all the linters, formatters, and security analyzers that [Trunk C](./code-quality)[ode Quality ](./code-quality)recommends. - - -Security-conscious users may want to also record the signature of the CLI, which the [Trunk Launcher](./install#the-trunk-launcher) will use to verify the CLI's provenance: - -``` -trunk init --lock -``` - - -### Tweak the configuration - -Trunk is completely controlled through the `trunk.yaml` file. If for example you are not using the `check` tool you can safely remove the `lint` section from the file. - -[Learn more about CLI configuration](./configuration/) - -### Single-player mode - -If you want to run `trunk` inside your repository but are not ready to roll it out team-wide, you can run `trunk` in what we call single-player mode. - -When in single-player mode, the `.trunk` directory will be listed in `.git/info/exclude`, which will cause git to ignore its contents. When trunk is automatically initialized by the VSCode extension, you will be started in this mode. You can also initialize this way explicitly with the `trunk init --single-player-mode` command. If at any time you wish to toggle single-player mode on or off, it can be done with the following two commands: - -```bash -# Turn single-player mode on. -trunk config hide -``` - -```bash -# Turn single-player mode off. -trunk config share -``` - -### Only enabling detected tools - -`trunk init` supports the flags `--only-detected-formatters` and `--only-detected-linters`. Each of these flags limits `trunk init` to only enable tools that we detect you are already using. - -We provide support for running `trunk` in GitHub Codespaces. - -[GitHub Codespaces](https://github.com/features/codespaces) are fully configured virtual containers for developing your GitHub repositories. diff --git a/code-quality/overview/cli/getting-started/install.mdx b/code-quality/overview/cli/getting-started/install.mdx deleted file mode 100644 index 9b42e71..0000000 --- a/code-quality/overview/cli/getting-started/install.mdx +++ /dev/null @@ -1,138 +0,0 @@ ---- -title: "Install" ---- -### The Trunk launcher - -Trunk uses a launcher to automatically install the appropriate CLI for your platform. The launcher is a bash script that downloads the appropriate Trunk CLI version and runs it. The launcher invisibly runs the Trunk CLI version specified in a project's `.trunk/trunk.yaml` file. The actual Trunk CLI is a single binary that is cached locally in `~/.cache/trunk` and is updated automatically. - -### Install the launcher - -The Trunk CLI can be installed in many different ways, depending on your use case. - -#### Using NPM - -If your project uses a `package.json`, you can specify the Trunk Launcher as a dependency so your developers can start using Trunk after installing Node dependencies. - - -```sh npm -npm install -D @trunkio/launcher -``` -```sh pnpm -pnpm add -D @trunkio/launcher -``` -```sh yarn -yarn add -D @trunkio/launcher -``` -```sh bun -bun install -D @trunkio/launcher -``` - - -Then add Trunk Launcher in your `package.json` as a script: - -```json -{ - "scripts": { - "trunk": "trunk", - "lint": "trunk check", - "fmt": "trunk fmt" - } -} -``` - -#### Using cURL - -You can install the Trunk Launcher script directly by downloading it through cURL. The launcher script supports both macOS and Linux environments. - -To allow your teammates to use `trunk` without installing anything, the launcher can be committed directly into your repo: - -``` -curl -LO https://trunk.io/releases/trunk -chmod +x trunk -git commit ./trunk -m "Commit Trunk to our repo" -``` - -When the launcher is called for the first time by your teammates, the Trunk Launcher will download, manage, and run the appropriate binary for the environment. - -#### Using Homebrew - -You can run the following command if you prefer to install this tool via homebrew. Keep in mind that other developers on your team will also have to install manually. - -```bash -brew install trunk-io -``` - -#### Using Windows - -From **`git-bash` or `msys2`**, download the Bash launcher and add it to your `PATH`: - -```bash -curl https://get.trunk.io -fsSL | bash -``` - -From **`powershell`**, download the powershell launcher: - -``` -Invoke-RestMethod -Uri https://trunk.io/releases/trunk.ps1 -OutFile trunk.ps1 -``` - -Ensure you can execute powershell scripts: - -``` -Set-ExecutionPolicy Bypass -Scope CurrentUser -``` - -You can then execute trunk as `.\trunk.ps1`. - -#### Compatibility - -Trunk only supports Windows with the following versions and above: - -| Tool | Where to Modify | Minimum Required Version | -|---|---|---| -| CLI | `cli` `version` in `.trunk/trunk.yaml` | `1.13.0` | -| Plugins | `ref` for the `trunk` plugin in `.trunk/trunk.yaml` | `v1.0.0` | -| VSCode | Reload VSCode to update | `3.4.4` | - -You will also need to install [C and C++ runtime libraries](https://aka.ms/vs/17/release/vc_redist.x64.exe) in order to run some linters. - -### Uninstall instructions - -#### From your system - -Trunk has a very minimal installation, and therefore, there's not much to uninstall. The two system paths we use are: - -* `/usr/local/bin/trunk`: the [Trunk Launcher](./install#the-trunk-launcher) -* `~/.cache/trunk`: cached versions of the trunk cli, linters, formatters, etc. - -You can delete those two paths to uninstall. - -#### From a repo - -To cleanly remove Trunk from a particular repo, run: - -```bash -trunk deinit -``` - -#### VS Code extension - -To uninstall the Trunk VS Code extension, do so as you would any extension ([docs](https://code.visualstudio.com/docs/editor/extension-marketplace)). Then reload VS Code. - -### Binary download (not recommended) - -You can directly download the `trunk` binary. _We don't recommend this mode of operation because your ability to version the tool through_ `trunk.yaml` _will not function when launching_ `trunk` _directly from a downloaded binary._ Regardless you can bypass the launcher support by downloading the prebuilt binaries here: - -| variable | options | -|---|---| -| version | the semver of the binary you want to download | -| platform | 'darwin`, 'linux' | - -```bash -# for example https://trunk.io/releases/1.0.0/trunk-1.0.0-linux-x86_64.tar.gz -https://trunk.io/releases/${version}/trunk-${version}-${platform}-x86_64.tar.gz -``` - -### Pre-installing tools - -Trunk hermetically manages all the tools that it runs. To do this, it will download and install them into its cache folder only when they are needed. If you would like to ensure that all tools are installed ahead of time, then you can use the `trunk install` command. This may be useful if you want to prepare to work offline or if you would like to include the tools in a docker image. On Linux and macOS you may find the cache folder at `$HOME/.cache/trunk`. diff --git a/code-quality/overview/cli/getting-started/tools.mdx b/code-quality/overview/cli/getting-started/tools.mdx deleted file mode 100644 index a065fae..0000000 --- a/code-quality/overview/cli/getting-started/tools.mdx +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: "Tools" ---- -You can use the Trunk CLI to manage tools used by your repo. Trunk CLI can install the tools needed for a project according to what's configured in the `trunk.yaml` config file and let your teammates easily install the same versions of the tools. Trunk will also help you expose those installed tools by dynamically adding them to your `PATH` when you enter the project directory, but will not pollute your `PATH` outside of the project. - -### Command line - -| trunk tools \ | Description | -| -------------------------------- | ------------------------------------------------------------------------------ | -| `list` | list all available tools in the repository and whether they are enabled or not | -| `install` | install your enabled tools into `.trunk/tools` | -| `enable` `[@version]` | enable the provided tool, optionally at a specified version | -| `disable` `` | disable the provided tool | - -### Discovering tools - -The Trunk [plugins repo](https://github.com/trunk-io/plugins) ships with a collection of tools that can help supercharge your repository and provide examples for how to write your own. To see a list of tools that you can enable in your own repo run: - -```shell -trunk tools list -``` - - -![](/assets/image_(17).png) - - -### Configuring shell hooks - -Before running any tools managed by Trunk, enable shell hooks. With shell hooks, Trunk can manage your path variable dynamically, which lets you install tools used only in specific repos without polluting your shell by installing global tools. This is especially useful if you work on two repos using the same tool, but locked to different versions. - -You can enable shell hooks by running `trunk shellhooks install`, which will install the Trunk hooks to the config file of your $SHELL. You can also run `trunk shellhooks install ` to install a specific shell hook. - -Supported shells: - -* bash -* zsh -* tcsh -* fish -* elvish - -For organizations that want to require the use of the hooks, they can add to the config file: - -```yaml -# .trunk/trunk.yaml: -version: 0.1 -cli: - shell_hooks: - enforce: true -``` - -On the next Trunk command (like check or fmt), it will update your shell RC file to load our hooks. - -After reloading your shell, whenever you're inside your repo at the command line, you can just run shims installed by `trunk tools` directly by name. - -N.B. There is a known incompatibility with direnv when using PATH\_ADD. To use our hooks, remove PATH\_ADD from your .envrc and add them to your Trunk config as such: - -```yaml -version: 0.1 -cli: - shell_hooks: - path_add: - - "${workspace}/tools" -``` - -Paths can either be absolute, or relative to the workspace using the special `${workspace}` variable. - -### Running tools - -With shell hooks enabled, you can just run your tools by their name. For example, if you have run `trunk tools install grpcui` to install the GRPC UI tool, you can run it with: - -``` -grpcui -``` - -#### Running tools without shell hooks - -Trunk installs your enabled tools into the `.trunk/tools` directory. Each tool exposes a list of **shims** (these may or may not be identically named to the tool - most typically a tool has one shim matching the name of the tool). Each shim is installed into the `.trunk/tools` directory. - -You can run your tools by referring to the path `/.trunk/tools/` but this is unwieldy. We highly recommend using our shell hooks to manage your PATH. - -### Troubleshooting linters - -Tools enable you to run your linter binaries on the command line independent of `trunk check` and test and troubleshoot your integrations more easily. - -Tools are configured in the `tools` section of `trunk.yaml`. As with other settings, you can override these values in your [User YAML](./configuration/per-user-overrides). - -```yaml -tools: - auto_sync: false # whether shims should be hot-reloaded off config changes. - enabled: - - bazel@6.0.0 - - mypy@1.4.1 - - ibazel@0.22.0 - - helm@3.9.4 - - eksctl@0.74.0 - - asciinema@2.1.0 - disabled: - - gt - definitions: - - name: gh - download: gh - known_good_version: 2.27.0 - environment: - - name: PATH - list: ["${tool}/bin"] - shims: [gh] -``` - -Like with actions and linters, we have a (versioned) `enabled` section and a `disabled` section, which can be manipulated using `trunk tools enable/disable`. There is also a list of `definitions`, which are merged across your `trunk.yaml`, `user.yaml`, as well as any plugins that you use. - -`auto_sync` controls whether or not Trunk automatically installs your tools for you when your config changes. This defaults to `true`. Note that the daemon must be running with the monitor in order for this to function properly. diff --git a/code-quality/overview/deal-with-existing-issues.mdx b/code-quality/overview/deal-with-existing-issues.mdx deleted file mode 100644 index 5e30e83..0000000 --- a/code-quality/overview/deal-with-existing-issues.mdx +++ /dev/null @@ -1,149 +0,0 @@ ---- -title: "Local linting" ---- -After initializing Trunk, you can begin scanning for issues in your repo, and decide whether to fix them up front, fix them incrementally as you code, or ignore irrelevant suggestions. This page walks through the process of linting locally and fixing existing issues. - -### Running for the first time - -After initializing Trunk Code Quality, you can run **all tools** on **all files** to look for existing issues. You can run Trunk on **all files** in your repo with this command. This will output all issues detected by every linter enabled in your project. - - -**Hold-the-line** - -You don't need to fix all issues upfront. Trunk lets you fix linter errors incrementally with hold-the-line. - -Learn more about [hold-the-line](./deal-with-existing-issues#hold-the-line). - - -#### Issues in pull requests - -You can reproduce issues discovered in CI by running `trunk check` and addressing issues. - -If `trunk check` continues to identify new Code Quality issues on your PR, first try merging the latest changes from your base branch. Trunk will rebase your changes on top of the current `HEAD` in main to ensure it catches all issues before merging. - -If this continues to fail, then run `git checkout refs/pull//merge && trunk check`. This is a reference to the merge commit GitHub creates. - -### Hold-the-line - -After initializing Trunk, you can begin scanning for issues in your repo, and decide whether to fix them up front, fix them incrementally as you code, or ignore irrelevant suggestions. This page walks through the process of linting locally and fixing existing issues. - -If you **only want to prevent new issues** from new code changes, skip to [prevent-new-issues](./prevent-new-issues/ "mention"). - -### Running for the first time - -After initializing Trunk Code Quality, you can run **all tools** on **all files** to look for existing issues. You can run Trunk on **all files** in your repo with this command. This will output all issues detected by every linter enabled in your project. - -```bash -trunk check --all -``` - - -**Trunk is Git aware** - -When you run `trunk check` without specifying `--all`, it will **only run on files you've modified according to git**. Remember to [specify a base branch](./initialize-trunk#initializing-trunk) if you're using something other than `main` or `master`. - - -### Fixing existing issues - -There are different approaches to dealing with existing issues, such as running `format` and applying automatic fixes, ignoring irrelevant issues, and sampling linters/files. This section walks you through the process to make fixing issues easier. - - -**Hold-the-line** - -You don't need to fix all issues upfront. Trunk lets you fix linter errors incrementally with hold-the-line. - -Learn more about [hold-the-line](./deal-with-existing-issues#hold-the-line). - - -#### Running formatters and applying fixes - -Some issues can be fixed automatically. You can apply fixes by running the following command. - -```bash -trunk check --all --fix -``` - -#### Overwhelmed by existing issues? - -You can also focus on the issues revealed by 1 linter at a time. - -```bash -trunk check --all --filter= -``` - -If that still produces too many issues, you can sample your files, such as 1/5 files. - -```bash -trunk check --all --filter= --sample=5 -``` - -You can drill down further and run only one single file. - -```bash -trunk check --all --filter= --sample=5 -``` - -If you're still overwhelmed by the results, you can fix them incrementally as you change files. See the [hold-the-line](./deal-with-existing-issues#hold-the-line) section. - -#### Disabling linters - -Some recommended linters could be unnecessary for your project. You can disable and enable linters with these commands: - -```bash -trunk check enable -trunk check disable -``` - -#### Ignore issues - -If there are warnings that don't apply to your project, you can ignore them by line, by file, or by class of warnings in each linter's config file. - -You can tell Trunk Code Quality to ignore a line in your source code with a special comment like this: - -```cpp -struct FooBar { - // trunk-ignore(clang-tidy) - void *ptr = NULL; -}; -``` - -The comment should contain the name of the linter you want to ignore the following line, in this case `clang-tidy` For more complex ignore commands, see [Ignoring Issues](./linters/ignoring-issues-and-files). - -Sometimes you may want to ignore entire files or groups of files, such as generated code. To ignore them, use the `ignore` key to your `.trunk/trunk.yaml` file: - -```yaml -lint: - ignore: - - linters: [ALL] - paths: - # Ignore generated files - - src/generated/** -``` - -You can also ignore an entire class of warnings using the config file of your linter, either at the project root or in `.trunk/configs` - -For example, these are the ignores for Markdownlint in `.trunk/configs/.markdownlint.yaml`: - -```yaml -# Prettier friendly markdownlint config (all formatting rules disabled) -extends: markdownlint/style/prettier -MD024: false -MD033: false -MD034: false -``` - -#### Issues in pull requests - -You can reproduce issues discovered in CI by running `trunk check` and addressing issues. - -If `trunk check` continues to identify new Code Quality issues on your PR, first try merging the latest changes from your base branch. Trunk will rebase your changes on top of the current `HEAD` in main to ensure it catches all issues before merging. - -If this continues to fail, then run `git checkout refs/pull//merge && trunk check`. This is a reference to the merge commit GitHub creates. - -### Hold-the-line - -You don't need to fix all the issues. Trunk Code Quality has the ability to _**Hold The Line**_, which means it only lints your git diffs; only what you changed on your branch gets linted. The pre-existing issues can be managed later. - -This allows you to clean up as you go, preventing new issues and letting your team leave each file with better code quality than before. - -When you've fixed the existing issues you want to fix, you can skip to [prevent-new-issues](./prevent-new-issues/ "mention") directly. diff --git a/code-quality/overview/debugging.mdx b/code-quality/overview/debugging.mdx deleted file mode 100644 index 50fdfda..0000000 --- a/code-quality/overview/debugging.mdx +++ /dev/null @@ -1,133 +0,0 @@ ---- -title: "Debugging" ---- -## Why aren't issues showing up anymore? - -If you aren’t seeing any issues the likely cause is that your local repo is clean. By default Trunk Code Quality only processes new changes to your codebase (read about [hold-the-line](/broken/pages/U4nTQBazaodt2vJadyRw#hold-the-line)). - -You can use `trunk check` to scan for older, pre-existing lint issues. - -For example, to look at a sampling of each linter's issues for 5 random files: - -```sh -trunk check --samples=5 -``` - -You can also scan all files using `--all`: - -```sh -trunk check --all -``` - -[Read our docs for more information on CLI options](./deal-with-existing-issues#fixing-existing-issues). - -## My linters are failing or not running as expected - -When your linters aren’t working the way you expect, first check their configuration. Trunk’s [list of supported linters](./linters/supported/) provides some specific tips for certain linters. You can see the full default configuration of every linter in [Trunk’s public plugin repo](https://github.com/trunk-io/plugins/tree/main). - -You can also try running `trunk check --verbose` to see what’s going on under the hood. If that still doesn’t work then please reach out to us on [our community Slack](https://trunkcommunity.slack.com/ssb/redirect) with the output of `trunk check --verbose`. - -## Why does Trunk take up so much disk space? - -Trunk Code Quality uses hermetically versioned tools, which means it downloads a separate copy of the tools and runtime for each tool version. Over time, as tools are upgraded, this can leave a lot of unnecessary files in the cache directory. Trunk is working on a way to automatically remove unneeded files from the cache. In the meantime, you can safely clear your cache with - -``` -trunk cache clean --all -``` - -then run `trunk install` again in your repos. - -## How do I make a linter work with a different file type? - -Every linter defines a set of file types that it wants to work with in a section of the YAML called `files`. To change this you need to override the files section of that linter’s definition. [More linter application file types](./getting-started/configuration/lint/files-and-caching#applicable-filetypes). - -Suppose you are using the **foo-linter** which normally runs on `foo` files. The config might look like this: - -```yaml -lint: - files: - - name: foo - extensions: [foo] - definitions: - - name: foo-linter - files: [foo] - commands: - - name: lint - output: pass_fail - run: echo “foo” - success_codes: [0, 1] -``` - -To add support for `bar` files add this to your `trunk.yaml` file. The first part defines the `bar` file type, and the second says that `foo-linter` uses both `foo` and `bar` files. - -```yaml -lint: - files: - - name: bar - extensions: [bar] -... - - definitions: - - name: foo-linter - files: - - foo - - bar -``` - -## How can I disable trunk on a commit for just me, but keep it on for the rest of my team? - -If you prefer to never run Trunk on commit and push you can disable it just for you. Edit or create the `.trunk/user.yaml` file and change the `actions.disabled` section to look like this: - -```yaml -version: 0.1 -actions: - disabled: - - trunk-check-pre-push - - trunk-fmt-pre-commit - -``` - -This will disable the checks for just the current user. The `.trunk/user.yaml` file is specifically gitignored but will be loaded locally if present. - -## What should I do if a linter process seems to take longer than expected during a Trunk check? - -There are two main strategies to address this issue: **configuring timeouts** and **ignoring certain files**. - -**Timeout Configuration** - -Each linter integrated with Trunk Code Quality has a default timeout of 10 minutes to prevent processes from running indefinitely. If a linter exceeds this time frame, Trunk Code Quality will automatically terminate the process and notify you of the timeout. - -To adjust the timeout duration for a specific linter, you can modify its `run_timeout` setting in your configuration. For example: - -```yaml -lint: - definitions: - - name: clang-tidy - run_timeout: 5m -``` - -Timeouts can be specified using `s` for seconds, `m` for minutes, or `h` for hours, allowing you to tailor the behavior to your project's needs. More on [linter timeouts](./linters/configure-linters#timeout). - -**Ignoring Files** - -Certain files, particularly those that are auto-generated, may not require linting and can significantly extend the duration of checks. To exclude these from being checked, use the `ignore` key in your configuration: - -```yaml -lint: - ignore: - - linters: [ALL] - paths: - # Ignore generated files - - src/generated/** - # Except for files ending in .foo - - !src/generated/**/*.foo # Test data - - test/test_data -``` - -This approach lets you specify which linters to ignore for particular paths, optimizing the check process and focusing on relevant files. [More details on ignoring files](./linters/). - -## `trunk init` says "Trunk can only init if it's run at the root of a git repo" - -Trunk requires that you run `trunk init` from the root of a git repository. Trunk is git-aware, and relies on git to understand which files are modified, gitignored, and more. - -If you see this message, it means that you are not in the root directory of a git repository. If you are in a git worktree, Trunk _does_ support worktrees. Your worktree may be in a broken state, try running `git worktree repair` and then `trunk init` again. diff --git a/code-quality/overview/getting-started/actions/git-hooks.mdx b/code-quality/overview/getting-started/actions/git-hooks.mdx deleted file mode 100644 index 786e0eb..0000000 --- a/code-quality/overview/getting-started/actions/git-hooks.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: "Git Hooks" -description: "Trunk supports triggering actions on all githooks" ---- -### Features - -* Seamlessly bring `git-hooks` under version control. `git-hooks` can be a major headache for organizations - they require manual installation and are not easily versioned along with the rest of your code. -* Take advantage of Trunk's powerful sandboxing and environment management to write and execute hooks using the programming language and runtime of your choice, as opposed to dealing with complicated bash scripts. - -### Manual installation - -```bash -trunk git-hooks sync -``` - -### Automatic Installation - -Trunk will automatically install and begin managing your `githooks` if you have any actions enabled in `trunk.yaml` which trigger from git events. - -### Triggering an action from a githook - -As an example let's examine how we implement the `git-lfs` action in the [plugins repo](https://github.com/trunk-io/plugins). - -#### Definition - -```yaml -- id: git-lfs - display_name: Git LFS - description: Git LFS hooks - run: git lfs "${hook}" "${@}" - triggers: - - git_hooks: [post-checkout, post-commit, post-merge, pre-push] -``` - -#### Template resolution - -As documented by [git](https://git-scm.com/docs/githooks), each githook generates a variable number of parameters that can be referenced in the `run` entry for the action. - -The following special variables are made available for template resolution when reacting to a git event: - -| Variable | Description | -| ----------------------------- | --------------------------------------------------------------- | -| `${hook}` | Hook that triggered this action (e.g. `pre-commit`, `pre-push`) | -| `${1}`,`${2}`, `${3}`, etc... | Positional parameters passed by `git` to the hook | -| `${@}` | All parameters passed to the hook | - -#### Interactivity - -```yaml -interactive: true -``` - -Setting `interactive` to true will allow your githook action to be run from an interactive terminal. This enables you to write more complicated hooks to react to user input. - -#### Testing a `githook` action - -The following command will simulate a githook event and execute all of the enabled actions for the provided hook in the order you defined them. - -```bash -trunk git-hooks callback -- -``` - -Alternatively, once an action is enabled you can call `git` and debug with the actual `git` provided data. This is sometimes easier since some git parameters point to txt files and fabricating those formats through manual testing can be tricky. - -#### Debugging a `githook` action - -You can observe the actions that are triggered by a `git` event by calling: - -```bash -trunk actions history -``` - -Which will print out the last 10 executions including timestamps of the specified action \\ - - -![](/assets/image_(8).png) - - -### Uninstalling - -Remove all actions that are triggered by githooks from `trunk.yaml` and run - -```bash -git config --unset core.hooksPath -``` diff --git a/code-quality/overview/getting-started/actions/index.mdx b/code-quality/overview/getting-started/actions/index.mdx deleted file mode 100644 index 1295770..0000000 --- a/code-quality/overview/getting-started/actions/index.mdx +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: "Actions" ---- -The most common Trunk Actions are provided out of the box with trunk, and are triggered to invisibly autoformat (`trunk fmt`) your commits every time you `git commit`, and run `trunk check` when you `git push`. - -### Triggers - -There are several different types of Trunk Actions, based on when they are triggered: - -| Trigger | Description | -|---|---| -| time-based | run on a schedule (once per hour, once per day, once per week) | -| file modification | run whenever a file or directory in your repo changes. | -| [githooks](./git-hooks) | run whenever a listed githook event fires (e.g. pre-commit, on-push) | -| manual | `trunk run ` | - -### **Command line** - -| trunk actions \ | Description | -|---|---| -| `list` | list all available actions in the repository | -| `history ` | print the history for execution of the provided action | -| `enable ` | enable the provided action | -| `disable ` | disable the provided action | -| `run ` | manually trigger the provided action
alias: `trunk run ` | - -### Discovering actions - -The trunk [plugins](https://github.com/trunk-io/plugins) repo ships with a collection of actions that can help supercharge your repository and provide examples of how to write your own actions. To see a list of actions that you can enable in your repo run: - -```bash -trunk actions list -``` - - -![](/assets/image_(16).png) - - -### Enable/Disable actions - -Trunk only runs actions listed in the `enabled` section of your `trunk.yaml`. Some built-in actions are enabled by default and can be disabled explicitly by adding them to the disabled list. You can always run `trunk actions list` to check the enabled status of an action. - -```yaml -actions: - enabled: - - trunk-announce - - git-lfs - - trunk-check-pre-push - - trunk-fmt-pre-commit - - trunk-cache-prune - - trunk-upgrade-available -``` diff --git a/code-quality/overview/getting-started/announce.mdx b/code-quality/overview/getting-started/announce.mdx deleted file mode 100644 index 351b648..0000000 --- a/code-quality/overview/getting-started/announce.mdx +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: "Announce" ---- -### Trunk Announce - -Does your Git commit carry some important information to share with the rest of your organization? Now you can easily share it with the rest of the org by including `/trunk announce` at the beginning of one of the lines of your commit message. - - -If your org squashes commit messages, you should put it in your PR description - - -Any additional text on that line will form an optional title, and the remaining text of the commit message will form the commit body (both are optional, but either a title or body is required). These will then be displayed to other users when they pull or rebase. - -### Enable Trunk Announce - -Trunk Announce is a githook-triggered Trunk Action. You can enable this Trunk Action by running this command: - -``` -trunk actions enable trunk-announce -``` - -### Viewing Announcements - -When you pull new changes, new announcements are automatically shown. - -If you would like to see changes since some commit, use `trunk show-announcements since `. - -For example: - -``` - trunk show-announcements since HEAD~1 -``` diff --git a/code-quality/overview/getting-started/caching.mdx b/code-quality/overview/getting-started/caching.mdx deleted file mode 100644 index 42fa831..0000000 --- a/code-quality/overview/getting-started/caching.mdx +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: "Caching" ---- -Trunk hermetically manages all the tools that it runs. To do this, it will download and install them into its cache folder only when needed. On Linux and macOS you may find the cache folder at `$HOME/.cache/trunk`. - -### Viewing your repo's cache - -If you need to debug your repo's cache, you can find its location by running the cache command. - -``` -trunk cache -``` - -### Cleaning cache - -Trunk will automatically clean up downloads that have not been used in a while, such as old versions of tools and linters. - -If you want to manually prune files in your cache directory that are no longer needed, you can run this command: - -``` -trunk cache prune -``` - -If you need to clean your entire cache manually, you can use the command: - -```sh -trunk cache clean --all -``` - -Remember to rerun the install command to reinstall the necessary tools and linters. - -``` -trunk install -``` diff --git a/code-quality/overview/getting-started/code-quality.mdx b/code-quality/overview/getting-started/code-quality.mdx deleted file mode 100644 index cf3d738..0000000 --- a/code-quality/overview/getting-started/code-quality.mdx +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: "Code Quality" -description: "CLI Metalinter and VSCode extension for over 100 code checking tools." ---- - -Available as a CLI tool and VSCode extension, Code Quality is a separate from the Trunk Platform for CI Stability, which includes [Merge Queue](../../broken-reference/) and [Flaky Tests](../../broken-reference/). Code Quality runs entirely locally and does not require access to the Trunk web app or platform services. - - -Trunk Code Quality is a **metalinter** that lets you lint every language and every file in your project with a single tool using 100+ supported idiomatic code-checking tools, such as ESLint, Prettier, Ruff, and more for every language and project. - -Trunk Code Quality is trusted by popular open-source projects like [**ESLint**](https://eslint.org/) to improve their developer experience. [Learn more about how ESLint leverages Code Quality in their repos](https://trunk.io/blog/improving-linting-experience-in-eslint-s-open-source-repo-with-trunk-code-quality). - -### What is Code Quality? - - - - A tour of Code Quality, what it does, its key features, and its components. - - - How Code Quality works under the hood to level up your linting experience. - - - What makes Trunk Code Quality different from other metalinters. - - - Browse the 100+ supported static analysis tools to lint, format, and secure your projects. - - - -### How do I get started? - - - - - - - diff --git a/code-quality/overview/getting-started/commands-reference/actions.mdx b/code-quality/overview/getting-started/commands-reference/actions.mdx deleted file mode 100644 index be1cb78..0000000 --- a/code-quality/overview/getting-started/commands-reference/actions.mdx +++ /dev/null @@ -1,202 +0,0 @@ ---- -title: "Actions" ---- -### Trunk Actions - -`trunk actions`: Workflow automation for your repo. - -#### **Usage** **example** - -``` -trunk actions [options] [subcommand] -``` - -#### Options - -* `--version`: The version -* `--monitor`: Enable the trunk daemon to monitor file changes in your repo -* `--ci`: Run in continuous integration mode -* `--no-progress`: Don't show progress updates -* `--ci-progress`: Rate limit progress updates to every 30s (implied by `--ci`) -* `--action_timeout`: Timeout for downloads, lint runs, etc -* `-v`, `--verbose`: Output details about what's happening under the hood -* `--color`: Enable/disable color output - -### Trunk Actions run - -`trunk actions run`: Run a specified trunk action. **Usage** **bash** - -``` -trunk actions run [options] -``` - -#### **Options** - -* `--nolog`: Don't create a log file for the action run -* `--version`: The version -* `--monitor`: Enable the trunk daemon to monitor file changes in your repo -* `--ci`: Run in continuous integration mode -* `--no-progress`: Don't show progress updates -* `--ci-progress`: Rate limit progress updates to every 30s (implied by `--ci`) -* `--action_timeout`: Timeout for downloads, lint runs, etc. -* `-v`, `--verbose`: Output details about what's happening under the hood -* `--color`: Enable/disable color output -* `--name `: Specify the name of the Trunk action to be executed -* `--branch `: Run the action on a specific branch -* `--retry `: Number of times to retry the action on failure - -### Trunk Actions history - -`trunk actions history`: View the history of Trunk actions. - -#### **Usage** example - -``` -trunk actions history [options] -``` - -#### **Options** - -* `--count`: Number of logs to show -* `--version`: The version -* `--monitor`: Enable the trunk daemon to monitor file changes in your repo -* `--ci`: Run in continuous integration mode -* `--no-progress`: Don't show progress updates -* `--ci-progress`: Rate limit progress updates to every 30s (implied by `--ci`) -* `--action_timeout`: Timeout for downloads, lint runs, etc. -* `-v`, `--verbose`: Output details about what's happening under the hood -* `--color`: Enable/disable color output - -### Trunk Actions list - -`trunk actions list`: List all Trunk actions. - -#### **Usage** example - -``` -trunk actions list [options] -``` - -#### **Options** - -* `--version`: The version -* `--monitor`: Enable the trunk daemon to monitor file changes in your repo -* `--ci`: Run in continuous integration mode -* `--no-progress`: Don't show progress updates -* `--ci-progress`: Rate limit progress updates to every 30s (implied by `--ci`) -* `--action_timeout`: Timeout for downloads, lint runs, etc. -* `-v`, `--verbose`: Output details about what's happening under the hood -* `--color`: Enable/disable color output - -### Trunk Actions enable - -`trunk actions enable`: Enable a specified Trunk action. - -#### **Usage** example - -``` -trunk actions enable [options] -``` - -#### **Options** - -* `--version`: The version -* `--monitor`: Enable the trunk daemon to monitor file changes in your repo -* `--ci`: Run in continuous integration mode -* `--no-progress`: Don't show progress updates -* `--ci-progress`: Rate limit progress updates to every 30s (implied by `--ci`) -* `--action_timeout`: Timeout for downloads, lint runs, etc. -* `-v`, `--verbose`: Output details about what's happening under the hood -* `--color`: Enable/disable color output - -### Trunk Actions disable - -`trunk actions disable`: Disable a specified Trunk action. - -#### **Usage** example - -``` -trunk actions disable [options] -``` - -#### **Options** - -* `--version`: The version -* `--monitor`: Enable the trunk daemon to monitor file changes in your repo -* `--ci`: Run in continuous integration mode -* `--no-progress`: Don't show progress updates -* `--ci-progress`: Rate limit progress updates to every 30s (implied by `--ci`) -* `--action_timeout`: Timeout for downloads, lint runs, etc. -* `-v`, `--verbose`: Output details about what's happening under the hood -* `--color`: Enable/disable color output - -### Trunk Shellhooks - -`trunk shellhooks`: Let Trunk manage your shell hooks similar to `direnvs` trunk shellhooks install \ - -#### **Usage** example - -``` -trunk shellhooks install [options] -``` - -### Trunk Git Hooks - -`trunk git-hooks sync`: Sync githooks with what's defined in `trunk.yaml` - -#### **Usage** example - -``` -trunk git-hook sync [options] -``` - -### Trunk show announcements since a commit - -**`trunk show-announcements since`**: Show announcements since a specified commit - -#### **Usage** example: - -```sh -trunk show-announcements since --commit abc123 -``` - -#### **Options**: - -* `--color`: Enable/disable color output -* `-v`, `--verbose`: Output details about what's happening under the hood -* `--action_timeout`: Timeout for downloads, lint runs, etc. -* `--ci-progress`: Rate limit progress updates to every 30s (implied by `--ci`) -* `--no-progress`: Don't show progress updates -* `--ci`: Run in continuous integration mode -* `--monitor`: Enable the trunk daemon to monitor file changes in your repo -* `--version`: The version - -### **Trunk show announcements post-merge** - -**`trunk show-announcements post-merge`**: Run on git pull/merge, usually run by a git-hook and not directly. - -**Usage Example**: - -```sh -trunk show-announcements post-merge --verbose -``` - -### **Trunk show announcements pre-rebase** - -**`trunk show-announcements pre-rebase`**: Run on git pre-rebase, usually run by a git-hook and not directly. - -#### **Usage** example: - -```sh -trunk show-announcements pre-rebase [options] [branch-refs...] -``` - -### **Trunk show announcements post-checkout** - -**`trunk show-announcements post-checkout`**: Run on git checkout/switch, usually run by a git-hook and not directly. - -#### **Usage** example:: - -```sh -trunk show-announcements post-checkout [options] [branch-refs...] -``` diff --git a/code-quality/overview/getting-started/commands-reference/code-quality.mdx b/code-quality/overview/getting-started/commands-reference/code-quality.mdx deleted file mode 100644 index ac50dd6..0000000 --- a/code-quality/overview/getting-started/commands-reference/code-quality.mdx +++ /dev/null @@ -1,172 +0,0 @@ ---- -title: "Code Quality" ---- -### trunk check - -`trunk check`: Universal code checker. - -#### **Usage** **example** - -``` -trunk check [options] -``` - -#### Filtering options - -* `-a, --all`: Check all files instead of only changed files -* `--sample`: Run each linter on N files -* `--filter`: Comma-separated list of linters and/or issue codes to include or exclude -* `--exclude`: Shorthand for an inverse --filter -* `--scope`: Scope of checks to run \{all | security} -* `--ignore`: Glob pattern to exclude files from linting -* `--force`: Run on all files, even if ignored -* `--include-existing-autofixes`: Include existing issues that can be autofixed - -#### **CI** options - -* `--ci`: Run in non-interactive mode designed for CI environments -* `-j`, `--jobs`: Number of concurrent jobs - -#### Git Hooks options - -* `--index`: Run linter on git-indexed files -* `--index-file`: Run linter on git-indexed files based on specified index -* `--commit-ref`: Commit ref to lint (instead of current working tree) -* `--commit-ref-from-pre-push`: Commit ref to lint from the stdin of a pre-push git hook (instead of the current working tree) - -#### Output options - -* `--show-existing`: Show existing issues otherwise hidden by -* `--print-failures`: Print any failures that occur -* `--diff`: Diff printing mode \{none | compact | full} -* `-v, --verbose`: Show verbose output for debugging purposes -* `--debug`: Show debug output - -#### Behavior options - -* `-y, --fix`: Automatically apply all fixes without prompting -* `-n, --no-fix`: Don't automatically apply fixes -* `--cache`: Disable to skip cache for all check actions -* `--ignore-git-state`: Run linters even if a merge, rebase, or revert is in progress -* `--upstream`: Upstream branch used to compute changed files - -### Trunk Check Enable Linter - -`trunk check enable`: Enable linters for trunk check. - -#### **Usage** **example** - -``` -trunk check enable [options] -``` - -### Trunk Check Disable Linter - -`trunk check disable`: Disable linters for trunk check. - -#### **Usage** **example** - -``` -trunk check disable [options] -``` - -### Trunk Check List Linters - -`trunk check list`: List linters for trunk check. - -#### **Usage** **example** - -``` -trunk check list [options] -``` - -### Trunk Check Run Format - -`trunk fmt`: List linters for trunk check. - -#### **Usage** **example** - -``` -trunk fmt [options] -``` - -#### **Options** - -#### Filtering options - -* `-a, --all`: Check all files instead of only changed files -* `--filter`: Comma-separated list of linters and/or issue codes to include or exclude -* `--exclude`: Shorthand for an inverse --filter -* `--scope`: Scope of checks to run \{all | security} -* `--ignore`: Glob pattern to exclude files from linting -* `--force`: Run on all files, even if ignored -* `--show-existing`: Show existing issues otherwise hidden by [hold-the-line](/broken/pages/U4nTQBazaodt2vJadyRw#hold-the-line) -* `--ignore-git-state`: Run linters even if a merge, rebase, or revert is in progress - -#### Git Hooks options - -* `--index`: Run linter on git-indexed files -* `--index-file`: Run linter on git-indexed files based on specified index -* `--commit-ref`: Commit ref to lint (instead of current working tree) -* `--commit-ref-from-pre-push`: Commit ref to lint from the stdin of a pre-push git hook (instead of the current working tree) - -#### Output options - -* `--show-existing`: Show existing issues otherwise hidden by -* `--print-failures`: Print any failures that occur -* `--diff`: Diff printing mode \{none | compact | full} -* `-v, --verbose`: Show verbose output for debugging purposes -* `--debug`: Show debug output - -#### Behavior options - -* `-y, --fix`: Automatically apply all fixes without prompting -* `-n, --no-fix`: Don't automatically apply fixes -* `--cache`: Disable to skip cache for all check actions -* `--ignore-git-state`: Run linters even if a merge, rebase, or revert is in progress -* `--upstream`: Upstream branch used to compute changed files -* `-j`, `--jobs`: Number of concurrent jobs - -## Advanced Trunk Check features - -| Options & Flags | Explanation | -| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `--root` | Explicitly set the root of the repository to run against | -| `--upstream` | Specify the upstream branch used to calculate new vs existing issued. | -| `--trigger` | Supports running trunk check from inside a git hook. Options are manual (default), git-push, git-commit. Controls whether the check returns early and its interactivity. | -| `--output=format` | Output results in specified format: `text` (default) or `json` | -| `--output-file=FILE` | Write json results to specified file | - -#### --filter - -`--filter` argument allows you to restrict `trunk check` to a subset of the linters enabled in your repository. - -For example, to run `eslint` and `isort` on the entire repo: - -```bash -trunk check --all --filter=eslint,isort -``` - -Alternatively, to run every linter _except_ `clang-tidy` and `shellcheck`: - -```bash -trunk check --all --filter=-clang-tidy,-shellcheck -``` - -#### --sample - -`--sample=N` will attempt to run every enabled linter against the requested number of files. The goal of the `sample` flag is to test the setup of the linters in your repository as well as any specific configuration they might honor. - -The sample command will attempt to run each linter N times, but may run fewer if not enough applicable files exist in your set of files to lint. `--sample=N` can be combined with any other set of options for `trunk check`. - -For example, to run `prettier` against 10 different prettier supported files: - -```bash -trunk check --sample=10 --filter=prettier -``` - -Alternatively, to run every linter at most 5 times against its supported files: - -```bash -trunk check --sample=5 -``` diff --git a/code-quality/overview/getting-started/commands-reference/index.mdx b/code-quality/overview/getting-started/commands-reference/index.mdx deleted file mode 100644 index c9a65d9..0000000 --- a/code-quality/overview/getting-started/commands-reference/index.mdx +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: "Commands reference" ---- -### trunk init - -`trunk init`: Set up trunk in this repo. - -#### **Usage** Example - -``` -trunk init -``` - -### trunk version - -`trunk version`: Output the version. - -#### **Usage** example - -``` -trunk version -``` - -### trunk upgrade - -`trunk upgrade`: Upgrade Trunk and its linters to the latest releases. - -#### **Usage** **example** - -``` -trunk upgrade [options] -``` - -#### **Options** - -* `-y, --yes-to-all`: Answer yes to all upgrade prompts -* `-n, --no-to-all`: Answer no to all upgrade prompts -* `--apply-to`: Apply upgrades to a specified file -* `--filter`: Filter the upgraded linters -* `--dry-run`: Detect available upgrades, but do not apply changes - -### trunk login - -`trunk login`: Login to trunk.io. - -#### **Usage** example - -``` -trunk login -``` - -### trunk logout - -`trunk logout`: Logout from trunk.io. - -#### **Usage** example - -``` -trunk logout -``` - -### trunk plugins add - -`trunk plugins add`: Add a plugin by URI. - -#### **Usage** example - -``` -trunk plugins [uri] [ref] [options] -``` - -### trunk tools - -`trunk tools`: Universal tool manager. - -#### **Usage** example - -``` -trunk tools [options] -``` - -### trunk daemon status - -Report the status of the daemon. - -#### **Usage** example - -``` -trunk daemon status -``` - -### trunk daemon start - -Start the trunk daemon in the background if it’s not already running. - -#### **Usage** example - -``` -trunk daemon start -``` - -### **trunk daemon shutdown** - -`trunk daemon shutdown`: Shutdown the trunk daemon if it is running. - -#### **Usage** example - -``` -trunk daemon shutdown -``` - -### **trunk daemon launch** - -`trunk daemon launch`: Start the trunk daemon in the foreground if it’s not already running. - -#### **Usage** example - -``` -trunk daemon launch -``` - -### trunk whoami - -`trunk whoami`: print who you're logged in as - -#### **Usage** example - -``` -trunk whoami -``` - -### trunk deinit - -`trunk deinit`: Deinitialize Trunk in your repo - -#### **Usage** example - -``` -trunk deinit [options] -``` - -#### **Options** - -* `-y`, `--yes`: Proceed unconditionally -* `-v`, `--verbose`: Output details about what's happening under the hood -* `--color`: Enable/disable color output - -### trunk config share - -`trunk config share`: Remove Trunk config files from your local git ignores. - -#### **Usage** example - -``` -trunk config share -``` - -### trunk config hide - -`trunk config hide`: Add Trunk config files to your local git ignores. - -#### **Usage** example - -``` -trunk config hide -``` - -### trunk config print - -`trunk config print`: Print the resolved trunk config. - -#### **Usage** example - -``` -trunk config print -``` - -### trunk cache clean - -`trunk cache clean`: Clean cached files used by Trunk. - -#### **Usage** Example - -``` -trunk cache clean -``` - -### trunk cache prune - -`trunk cache prune`: Prune unused cached files. - -#### **Usage** example - -``` -trunk cache clean -``` - -### trunk install - -`trunk install`: Download & install enabled runtimes/linters. - -#### **Usage** example - -``` -trunk install [options] -``` - -#### **Options** - -* `--version`: The version -* `--monitor`: Enable the trunk daemon to monitor file changes in your repo -* `--ci`: Run in continuous integration mode -* `--no-progress`: Don't show progress updates -* `--ci-progress`: Rate limit progress updates to every 30s (implied by `--ci`) -* `--action_timeout`: Timeout for downloads, lint runs, etc. -* `-v`, `--verbose`: Output details about what's happening under the hood -* `--color`: Enable/disable color output diff --git a/code-quality/overview/getting-started/compatibility.mdx b/code-quality/overview/getting-started/compatibility.mdx deleted file mode 100644 index 6d41400..0000000 --- a/code-quality/overview/getting-started/compatibility.mdx +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: "Compatibility" ---- -### Linux - -Trunk will run on most Linux flavors, including Ubuntu, Arch, and others. We do require glibc version 2.19 or later. Alpine Linux is not supported. - -### macOS - -Trunk will run on macOS version 10.15 or later. - -### Windows - -Trunk only supports Windows with the following versions and above: - -| Tool | Where to Modify | Minimum Required Version | -|---|---|---| -| CLI | `cli` `version` in `.trunk/trunk.yaml` | `1.13.0` | -| Plugins | `ref` for the `trunk` plugin in `.trunk/trunk.yaml` | `v1.0.0` | -| VSCode | Reload VSCode to update | `3.4.4` | - -You will also need to install [C and C++ runtime libraries](https://aka.ms/vs/17/release/vc_redist.x64.exe) to run some linters. - -#### Getting in touch - -Thank you for being a beta tester of Trunk Check on Windows! We are actively working to improve the experience. If you have any feedback or questions, please contact us directly on [Slack](https://slack.trunk.io/). - -If you want to override a repo-wide setting just for your Windows machine, you can modify your [`.trunk/user.yaml`](./configuration/per-user-overrides). - -#### Supported features - -We intend to bring full feature support to Windows for Trunk. Currently, the following features are supported: - -* [Trunk Code Quality](./code-quality) -* Non-interactive [Trunk Actions](./actions/) and [git-hooks](./actions/git-hooks) -* [VSCode](../ide-integration/vscode) - -### Plugin compatibility - - -This section was last updated for Plugins v1.2.0 - - -Trunk runs most linters on all platforms. However, some linters are not yet supported on Windows. For a full list of all linters, see our [Plugins repo](https://github.com/trunk-io/plugins). - -| Linter | Plans for Support | -|---|---| -| ansible-lint | Only supported on WSL | -| clang-format | Long-term plans for LLVM linter support | -| clang-tidy | Long-term plans for LLVM linter support | -| detekt-gradle | Long-term plans for support | -| include-what-you-use | Long-term plans for LLVM linter support | -| nixpkgs-fmt | Long-term plans for support | -| perlcritic | No immediate plans for support | -| perltidy | No immediate plans for support | -| scalafmt | No download available for Windows | -| semgrep | No download available for Windows | -| shellcheck | No download available for Windows | -| stringslint | Only supported on MacOS | -| swiftformat | Only supported on MacOS | -| swiftlint | Only supported on MacOS | -| taplo | No download available for Windows | - -### Backward compatibility - -We generally strive to maintain backward compatibility between the [Trunk Launcher](./install#the-trunk-launcher) and the Trunk binary, but you may need to occasionally upgrade the launcher to support the newest version of Trunk. diff --git a/code-quality/overview/getting-started/configuration/actions/index.mdx b/code-quality/overview/getting-started/configuration/actions/index.mdx deleted file mode 100644 index f28ca1d..0000000 --- a/code-quality/overview/getting-started/configuration/actions/index.mdx +++ /dev/null @@ -1,156 +0,0 @@ ---- -title: "Actions" ---- -Actions are defined and enabled in the `actions` section of `trunk.yaml`. - -Here is an example of the actions section of `trunk.yaml`. If you are curious what your resolved configuration for actions looks like, run `trunk config print`. - -```yaml -actions: - enabled: - - trunk-announce - - trunk-upgrade-available - - npm-install - - seed-database - - custom-git-hook - - login - definitions: - - id: npm-install - triggers: - - files: [package.json] - run: npm install - - id: seed-database - triggers: - - schedule: 24h - run: python3 seed_database.py - runtime: python - run_from: utils - packages_file: requirements.txt - - id: custom-git-hook - triggers: - - git_hooks: [pre-push, pre-commit] - run: my_script.sh - - id: login - run: my_complicated_login_script.sh - interactive: true -``` - -### Action Definitions - -Now we'll walk through the process of creating your own action. - -Actions are required to have a `id` and `run` command. - -The command will implicitly run relative to your workspace, but you can also specify a `run_from` if you'd prefer to execute from a sub-directory. - -#### Runtime management - -We sandbox action executions and allow you to control the runtime. You can do this by specifying a `runtime` and `packages_file`. - -You can specify one of our built-in runtimes (`node`, `python`, ...) or a system runtime that you define. See the [runtimes documentation](../runtimes) for more information. - -For the `python` and `node` runtimes, we additionally provide the ability to install a requirements file like `requirements.txt` or `package.json`. - -### Triggers - -You can run actions manually, or you can also provide a set of triggers so that actions run in response to some event. They are documented below. - -#### Manual runs - -You may run an action manually by running `trunk run ` or `trunk actions run `. - -For manually triggered runs, we support the `${@}` and `${pwd}` variables for template resolution in the `run` declaration. `${@}` will be replaced with the arguments passed to the action, and `${pwd}` will be replaced with the directory the action is triggered from. - -```yaml -id: my-action -run: echo "The action was run from ${pwd} with arguments ${@}" -``` - -#### Time-based triggers - -We provide the ability to run actions in the background on a schedule. - -Under `triggers`, you can add one or more `schedule` entries. For example: - -```yaml -id: my-action -triggers: - - schedule: 1d -``` - -The `schedule` entry should be in the Duration format specified [here](https://pkg.go.dev/time#ParseDuration). The action will be run once per `duration`. - -This is a short-hand for specifying schedule as an object. You can also write: - -```yaml -id: my-action -triggers: - - schedule: - interval: 1d -``` - -The action may occasionally run more often than the specified duration depending on the Trunk daemon's lifetime. - -If you wish to stagger the execution of an action from others on a similar schedule, you may use the `delay` field: - -```yaml -id: my-action -triggers: - - schedule: - interval: 1d - delay: 1h -``` - -You may also use cron syntax: - -```yaml -nid: my-action - triggers: - # run every 2 hours - - schedule: "0 0 */2 * * ?" -``` - -or equivalently: - -```yaml - id: my-action - triggers: - # run every 2 hours - - schedule: - cron: "0 0 */2 * * ?" -``` - -#### File-based triggers - -We provide the ability to run actions automatically based on a file edit. - -You may provide exact filenames, or globs. - -```yaml -id: my-action -triggers: - - files: [foo.txt, bar/**] -``` - -In this case `my-action` will execute if either `foo.txt` is edited (or created), or if a file inside `bar` is edited or created. - -In case you need to know which file triggered the action, you can use the `${target}` variable in the `run` command. - -```yaml -id: my-action -triggers: - - files: [foo.txt, bar/**] -run: echo "The file ${target} was edited" -``` - -If you do a bulk file modification, the `${target}` template may resolve to a space-separated list of files that were simultaneously edited. - -> Note: We only provide file triggers for files inside of your workspace. - -#### Git hooks - -You can also configure Trunk to manage your git hooks. More detail is provided on this in our [git hooks reference](../../actions/git-hooks). - -### Interactivity - -Actions can read from `stdin` if they are marked as interactive (define `interactive: true` on the action). Note: this feature is only available for git hooks and manually run actions - since file-triggered and scheduled actions run in the background, you cannot interact with their execution. diff --git a/code-quality/overview/getting-started/configuration/actions/logging-and-troubleshooting.mdx b/code-quality/overview/getting-started/configuration/actions/logging-and-troubleshooting.mdx deleted file mode 100644 index b490a1a..0000000 --- a/code-quality/overview/getting-started/configuration/actions/logging-and-troubleshooting.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: "Logging and Troubleshooting" -description: "Diagnosing problems with actions" ---- -We provide a number of tools for inspecting the results of actions that run in the background and wouldn't otherwise surface their errors. - -Every action execution is logged. We consider an action execution to have failed if it has a non-zero exit code. - -`trunk actions history ` gives a history of the recent runs of an action and whether it succeeded. You can control how many recent runs to show with the `--count` flag (for example, `trunk actions history trunk-upgrade-available --count=10`). When available, a full stacktrace is written to a file and made available. - -Failed action executions will also produce a notification so that background failures are periodically surfaced to the user. - -You can also inspect action logs at `.trunk/out/actions//`. - -We recommend running actions manually when you develop them to verify that they work correctly. - -### Output Level - -To see a more verbose output when running trunk actions, particularly from git-hooks, you can add the following to your `trunk.yaml`: - -```yaml -actions: - output_level: -``` diff --git a/code-quality/overview/getting-started/configuration/actions/notifications.mdx b/code-quality/overview/getting-started/configuration/actions/notifications.mdx deleted file mode 100644 index 5d78091..0000000 --- a/code-quality/overview/getting-started/configuration/actions/notifications.mdx +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: "Notifications" -description: "Trunk Actions can also produce notifications to display in your terminal or in the VSCode extension!" ---- -### Defining actions that produce notifications - -Typically, whatever actions write to stdout are stored in the log file and perhaps shown to the user. However, actions can also produce structured output if `output_type` is set on the Action Definition to be `notification_v1`. - -In this case, the action should print yaml to output with the following structure: - -```yaml -notifications: - - id: action-id - # Display-related fields - title: My action - message: some text about the notification - rendered: A rendered message string for color terminals - icon: https://uri/to/icon - commands: - - title: A button title - run: a run command - run_from: directory to run from - priority: high # Can be one of low, high (default low) -``` - -Some notes: - -1. The ID can be whatever you want it to be, but generally should be made to match the action ID. -2. You may emit multiple notifications per action. -3. `icon` and `commands` are used to control notifications display in VSCode. -4. High-priority notifications are immediately shown to the user in terminal. Low-priority notifications are only shown every 24 hours (These are configurable). - -### Deleting notifications - -Actions can also clear their own notifications. in this case, make the output looks like this: - -```yaml -notifications_to_delete: [action-id] -``` - -If actions produce a notification that is reflective of a current state or something actionable for the user to do, they may clear the notification once that state changes/when the user takes the requested action. - -### An example - -We illustrate the cycle of actions managing their own notifications with the following example. - -Consider the built-in action for `trunk upgrade` - a command that upgrades trunk and a repo's enabled linters to their most recent versions. We'd like to notify the user of new upgrades once a day. Thus our `trunk-upgrade-available` action definition looks like this: - -```yaml -id: trunk-upgrade-available -output_type: notification_v1 -run: trunk upgrade --notify -triggers: - - schedule: 1h - - files: [.trunk/trunk.yaml] -``` - -`trunk upgrade --notify` produces a notification that looks like this: - -```yaml -notifications: - - commands: - - run: trunk upgrade - title: Upgrade Trunk - id: trunk-upgrade - message: "Upgrades available\n\n Trunk version 0.17.0-beta\n 10 linter updates\n\nRun trunk upgrade to upgrade all\n or trunk upgrade trunk to just upgrade trunk" - priority: low - rendered: "\x1b[1m\x1b[90m\nUpgrades available\x1b[0m\n\x1b[90m\n\x1b[0m• \x1b[90mTrunk version\x1b[0m \x1b[92m0.17.0-beta\x1b[0m\x1b[90m\n\x1b[0m• \x1b[92m11 linter\x1b[0m \x1b[90mupdates\n\x1b[0m\n\x1b[90mRun\x1b[0m\x1b[96m trunk upgrade\x1b[0m\x1b[90m to upgrade all\x1b[0m\x1b[90m\n or\x1b[0m\x1b[96m trunk upgrade trunk\x1b[0m\x1b[90m to just upgrade trunk\x1b[0m\x1b[90m\n\x1b[0m" -``` - -If there are no upgrades available, `trunk upgrade --notify` will produce: - -```yaml -notifications_to_delete: [trunk-upgrade-available] -``` - -So in this scenario, the `trunk-upgrade-available` action runs in the background periodically and produces a notification. The user takes action by running `trunk upgrade`. Since `trunk upgrade` modifies `.trunk/trunk.yaml`, this will again trigger the `trunk-upgrade-available` action (due to the file trigger). Since there is nothing else to upgrade, `trunk upgrade --notify` will produce output telling Trunk to delete its notification. Now, the user is no longer shown a notification about available upgrades! diff --git a/code-quality/overview/getting-started/configuration/index.mdx b/code-quality/overview/getting-started/configuration/index.mdx deleted file mode 100644 index ab7b28b..0000000 --- a/code-quality/overview/getting-started/configuration/index.mdx +++ /dev/null @@ -1,293 +0,0 @@ ---- -title: "Configuration" ---- -The Trunk CLI has its top-level config defined in `.trunk/trunk.yaml`. - -``` -/your_repo -├── .trunk -│ └── trunk.yaml -└── src - ├── bar - └── foo -``` - -This is initially generated by `trunk init` and is the central source of truth for how Trunk operates inside your repository. As we build new services and features, we'll extend `trunk.yaml` to include configuration sections for them. We believe strongly in "configuration as code" and being able to guarantee that `trunk` can be run reproducibly. - -### Config format - -The Trunk configuration file is written in YAML and is meant to be self-descriptive. Below is a sample config file to help you understand how the pieces come together. Alternatively, you can also refer to [the `trunk.yaml` in our GitHub Action](https://github.com/trunk-io/trunk-action/blob/main/.trunk/trunk.yaml) as an example or [`trunk-yaml-schema.json`](https://static.trunk.io/pub/trunk-yaml-schema.json). - -```yaml -version: 0.1 # the version of this config file. -cli: - version: 0.15.1 # the version of trunk you will run in your repository -runtimes: - enabled: - - ruby@>=2.7.1 - - python@3.9.1 -repo: - # main is the branch that everyone's work is merged into - # (this is usually inferred and not required to be set) - trunk_branch: main -lint: - definitions: - - name: my_custom_linter - files: [ALL] - commands: - output: sarif - run: ${workspace}/bin/foo --file ${target} - read_output_from: stdout - run_linter_from: workspace - success_codes: [0, 1] - enabled: - - ansible-lint@5.3.2 - - bandit@1.7.0 - - black@21.6b0 - - buf-lint@1.0.0-rc3 - - buildifier@5.1.0 - - cfnlint@0.51.0 - - eslint@7.30.0 - - gitleaks@7.6.1 - - gofmt@1.16.7 - - golangci-lint@1.41.1 - - hadolint@2.6.0 - - isort@5.8.0 - - markdownlint@0.28.1 - - mypy@0.910 - - prettier@2.3.2 - - pylint@2.8.1 - - rustfmt@1.55.0 - - semgrep@0.104.0 - - shellcheck@0.7.2 - - shfmt@3.3.1 - disabled: - - rufo - - tflint - ignore: - - linters: [ALL] - paths: - # Generated files - - a/proto/code_pb* - # Test data - - b/test_data/** - - linters: [eslint] - paths: - - c/run.ts - triggers: - - linters: - - ansible-lint - paths: - - ansible # A directory - targets: - - ansible # A directory -``` - -### `version` - -The `version field` is the schema version of `trunk.yaml.` - -### `cli` - -```yaml -cli: - version: 0.15.1 # the version of trunk you will run in your repository - options: - - commands: [ALL] # apply to all `trunk` commands - args: --monitor=true - - commands: [check, fmt] # apply only to `trunk check` and `trunk fmt` commands - args: -y -``` - -In addition to specifying `version`, `cli` allows you to specify default command line arguments using the `options` field. Specified `args` will be appended to strictly matched `commands` during `trunk` invocations. Specifying `ALL` as a `commands` element applies its options to all `trunk` subcommands. Any command line options will take precedence over these `args`. - -Some examples using the configuration above: - -* `trunk check` resolves to `trunk check -y --monitor=true` -* `trunk check -n` resolves to `trunk check -n --monitor=true` -* `trunk fmt` resolves to `trunk fmt -y --monitor=true` - -### `repo` - -```yaml -repo: - # main is the branch that everyone's work is merged into - # (this is usually inferred and not required to be set) - trunk_branch: main -``` - -Some Trunk features require Trunk to be aware of the canonical repository your organization uses, such as the repository that everyone pulls from and makes pull requests into. The Trunk CLI can infer this from your `origin` remote, but if you don't want your `origin` to be used for this purpose, you can explicitly specify your canonical repository. - -Other features - namely `trunk check` - need to be aware of the primary upstream branch that everyone branches from. If you use `main` or `master`, `trunk` can infer this; however, if you use some other primary branch, then you may want to consider setting this. - -The above configuration is how you would specify that [https://github.com/github/gitignore](https://github.com/github/gitignore) is your canonical repository and that `main` is the branch which `trunk` should always think of as your upstream branch. - -### `api` - -```yaml -api: - # name of your trunk organization on app.trunk.io - org: { your-org-name } -``` - -Some Trunk features, like the CI Debugger, require knowledge of the Trunk organization your repository is using. This information can be provided on the command line or hard-coded in the `trunk.yaml` file. - -### `trunk_remote_hint` - -```yaml -repo: - trunk_remote_hint: github.com/organization/my_repo -``` - -If this hint is set, Trunk will search all local remotes looking for the one that best matches `//` instead of defaulting to `origin`. It will then use this remote as the default upstream for computing changed files. - -### Stacked PR support - -```yaml -repo: - use_branch_upstream: true -``` - -By default, `trunk` will auto-detect all changed files relative to your main branch. If you would instead like it to compare against the upstream of your current git branch, you can enable this feature by setting `use_branch_upstream` to `true`. - -### Disable upgrade notifications - -Trunk will periodically tell you to upgrade to a newer version if one is available. If you prefer not to see these notifications, edit (or add) the section of your `.trunk/trunk.yaml` to include the following lines: - -```yaml -actions: - disabled: - - trunk-upgrade-available -``` - -### Overriding defaults - -Trunk ships with a default configuration which `trunk.yaml` is merged into to produce the actual configuration that Trunk runs with. You can view this merged configuration using `trunk print-config`. - -You may find while using Trunk that you want to modify one of these defaults: perhaps you want `clang-tidy` to not run on the upstream, or maybe you want the `node` runtime to include another environment variable. In these cases, you can specify the field in your `trunk.yaml` to override the default value. - -Let's take `clang-tidy` as an example, which ships with the following default configuration: - -```yaml -definitions: - ... - - name: clang-tidy - files: [c/c++-source] - type: llvm - commands: - - output: llvm - run: clang-tidy --export-fixes=- ${target} - success_codes: [0] - download: clang-tidy - direct_configs: [.clang-tidy] - disable_upstream: true - include_scanner_type: compile_command - environment: - - name: PATH - list: ["${linter}/bin"] - ... -``` - -If you wanted to flip the value of `disable_upstream` to `false`, you could, in your own `trunk.yaml`, specify: - -```yaml -definitions: - ... - - name: clang-tidy - disable_upstream: false - ... -``` - - -Some linters have multiple commands, such as [trivy](https://github.com/trunk-io/plugins/blob/main/linters/trivy/plugin.yaml), which can run in different ways. Similarly, some linters are configured to run differently on different platforms or at different versions. When overriding a command definition, overrides are applied on the tuple `[name, version, platforms]`. For example, if you wanted to disable batching when running [ktlint](https://github.com/trunk-io/plugins/blob/main/linters/ktlint/plugin.yaml) on Windows, you could consider its default configuration: - -```yaml -definitions: - ... - - name: ktlint - ... - commands: - - name: format - platforms: [windows] - run: java -jar ${linter}/ktlint.exe -F "${target}" - output: rewrite - cache_results: true - formatter: true - in_place: true - batch: true - success_codes: [0, 1] - - name: format - run: ktlint -F "${target}" - output: rewrite - cache_results: true - formatter: true - in_place: true - batch: true - success_codes: [0, 1] - ... -``` - -and override it as such: - -```yaml -definitions: - ... - - name: ktlint - ... - commands: - - name: format - platforms: [windows] - batch: false - ... -``` - -When executing linters, Trunk will execute the first matching command based on its compatible platforms and linter version. Note when overriding that new commands that don't match an existing tuple are prepended to the resulting commands list. - -Alternatively, consider the default `node` runtime: - -```yaml -runtimes: - definitions: - - type: node - download: node - runtime_environment: - - name: HOME - value: ${home} - - name: PATH - list: ["${runtime}/bin"] - linter_environment: - - name: PATH - list: ["${linter}/node_modules/.bin"] - version: 16.14.2 - version_commands: - - run: "node --version" - parse_regex: ${semver} -``` - -If you wanted to add `${home}/my/special/node/path` to `PATH`, you could specify the following: - -```yaml -runtimes: - - type: node - runtime_environment: - - name: HOME - value: ${home} - - name: PATH - list: ["${home}/my/special/node/path", "${runtime}/bin"] -``` - -### Validation - -Custom linter, download, and runtime configs must be defined in full and will be validated. Overrides of existing linter, download, and runtime configs can be partial overrides. They do not have to be full definitions. - -Merged configurations are subject to the same validation that custom linters are - they must all have a name, type, command, and either `success_codes` or `error_codes` set. - -### Known limitations - -1. Scalar values are overridden in a straightforward manner - the value specified in the override\ - takes the place of the default, and otherwise, default values are retained. -2. To override a sequence value in the default (ex. `environment` in the `node` runtime), it is\ - necessary to fully specify the new sequence. This is why the `environment` override above also defines `HOME`. If you just wanted to add a new value, you would have to copy in the existing\ - sequence to your overriding config, and add your new value to the end of the list. -3. It is not possible to set sequences of non-zero length to zero length. For example, if the\ - default config has `success_codes: [0]`, you may override this to `success_codes: [0, 1]`, but you cannot clear its value. diff --git a/code-quality/overview/getting-started/configuration/lint/auto-enable.mdx b/code-quality/overview/getting-started/configuration/lint/auto-enable.mdx deleted file mode 100644 index fde505f..0000000 --- a/code-quality/overview/getting-started/configuration/lint/auto-enable.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: "Auto-Enable" ---- -Simply defining a linter does not enable it. Trunk needs to know when to auto-enable the linter for certain projects (ex: all python projects) or if certain files are already present (ex: `.eslintrc`). - -## Auto Enabling - -The `direct_configs` property contains a list of config files that the underlying linter uses. The `suggest_if` property determines when `trunk check` should suggest this linter. If `suggest_if` is set to `config_present`, then trunk will search for the listed config files. If found, the linter will be enabled automatically when the user does `trunk init` or `trunk update`. - -For example: in the following yaml, the **flake8** linter sets `suggest_if` to `config_preset` and sets `direct_configs` to `[.flake8]`. If any `*.flake8` files are found, then trunk check will automatically enable flake8. - -**Flake8** linter definition. [full source](https://github.com/trunk-io/plugins/blob/main/linters/flake8/plugin.yaml) - -```yaml -version: 0.1 -tools: - definitions: - - name: flake8 - runtime: python - package: flake8 - shims: [flake8] - known_good_version: 4.0.1 -lint: - definitions: - - name: flake8 - files: [python] - tools: [flake8] - direct_configs: [.flake8] - suggest_if: config_present - affects_cache: - - setup.cfg - - tox.ini - # In case the user installs https://pypi.org/project/Flake8-pyproject/ - - pyproject.toml - issue_url_format: https://flake8.pycqa.org/en/latest/user/error-codes.html - known_good_version: 4.0.1 - version_command: - parse_regex: ${semver} - run: flake8 --version - -``` - -The **suggest\_if** field can be one of the following: - -* `config_present` will auto-enable a linter if Trunk sees any `direct_config` for it . -* `files_present` will auto-enable a linter if Trunk sees any file type that it operates on. -* `never` will never auto-enable this linter. - -Trunk curates the values of `suggest_if` for all linters in the [plugins](https://github.com/trunk-io/plugins) repo. - -## Manually enabling and disabling - -Setting the `lint.definitions[*].enabled` property to true will force the linter to be enabled. Setting the `lint.definitions[*].disabled` property to true will force the linter to never be enabled, even if the `enabled` property is true, and will never suggest this linter, even if `suggest_if` says it should. - -For additional information on the properties of Linters, see the [Linter Definition Reference](./definitions). diff --git a/code-quality/overview/getting-started/configuration/lint/commands.mdx b/code-quality/overview/getting-started/configuration/lint/commands.mdx deleted file mode 100644 index 8f04903..0000000 --- a/code-quality/overview/getting-started/configuration/lint/commands.mdx +++ /dev/null @@ -1,454 +0,0 @@ ---- -title: "Commands" ---- -A command is the fundamental unit of linters. It defines specifically _what binary and arguments_ are used to run the linter. A linter can have multiple commands in case it has multiple behaviors (ex: lint and format), but it must have at least one. - -## How Code Quality Runs Linters - -The `run` property is the command to actually run a linter. This command can use [variables](./commands#template-variables) provided by the runtime such as `${plugin}` and `${target}`. - -For example: this is the `run` field for **black**, one of our Python linters. The `run` field is set to `black -q ${target}`. - -```yaml -version: 0.1 -tools: - definitions: - - name: black - runtime: python - package: black[python2,jupyter] - shims: [black] - known_good_version: 22.3.0 -lint: - definitions: - - name: black - files: [python, jupyter, python-interface] - commands: - - name: format - output: rewrite - run: black -q ${target} - success_codes: [0] - batch: true - in_place: true - allow_empty_files: false - cache_results: true - formatter: true - tools: [black] - suggest_if: files_present - affects_cache: [pyproject.toml] - known_good_version: 22.3.0 - version_command: - parse_regex: black, version (.*) - run: black --version -``` - -This command template contains all the information Trunk needs to execute `black` in a way where Trunk will be able to understand `blacks`'s output. - -## Input Target - -The `target` field specifies what paths this linter will run on given an input file. It may be a string literal such as `.`, which will run the linter on the whole repository. It also supports various substitutions: - -| Variable | Description | -| --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- | -| `${file}` | The input file. | -| `${parent}` | The folder containing the file. | -| `${parent_with()}` | Walks up toward the repository root looking for the first folder containing ``. If `` is not found, do not run any linter. | -| `${root_or_parent_with()}` | Walks up toward the repository root looking for the first folder containing ``. If `` is not found, evaluate to the repository root. | -| `${root_or_parent_with_regex()}` | Walks up toward the repository root looking for the first folder containing a name matching ``. If not found, evaluate to the repository root. | - -If `target` is not specified it will default to `${file}`. - -This target may be referenced in the `run` field as `${target}`, as in the example above for **black**, or this simple example. - -```yaml -lint: - definitions: - - name: noop - files: [ALL] - commands: - - name: format - output: rewrite - formatter: true - run: cat ${target} -``` - -or via `stdin`, by specifying `stdin: true`: - -```yaml -lint: - definitions: - - name: noop - files: [ALL] - commands: - - name: format - output: rewrite - formatter: true - run: cat - - stdin: true -``` - -> Note: Linters that take their input via `stdin` may still want to know the file's path so that they can, say, generate diagnostics with the file's path. In these cases you can still use `${target}` in `run`. - -## Exit codes - -Linters often use different exit codes to categorize the outcome. For instance, [`markdownlint`](https://github.com/igorshubovych/markdownlint-cli#exit-codes) uses `0` to indicate that no issues were found, `1` to indicate that the tool ran successfully but issues were found, and `2`, `3`, and `4` for tool execution failures. - -Trunk supports specifying either `success_codes` or `error_codes` for a linter: - -* if `success_codes` are specified, Trunk expects a successful linter invocation (which may or may not find issues) to return one of the specified `success_codes`; -* if `error_codes` are specified, Trunk expects a successful linter invocation to return any exit\ - code which is _not_ one of the specified `error_codes`. - -`markdownlint`, for example, has `success_codes: [0, 1]` in its configuration. - -**Note:** A linter command should set either success codes or error codes, but not both\*\*.\*\* - -## Working directory - -`run_from` determines what directory a linter command is run from. - -| run_from | Description | -|---|---| -| `` (`.` by default) | Explicit path to run from | -| `${parent}` | Parent of the target file; e.g. would be `foo/bar` for `foo/bar/hello.txt` | -| `${root_or_parent_with()}` | Nearest parent directory containing the specified file | -| `${root_or_parent_with_dir()}` | Nearest parent directory containing the specified directory | -| `${root_or_parent_with_regex()}` | Nearest parent directory containing a file or directory matching specified regex | -| `${root_or_parent_with_direct_config}` | Nearest parent directory containing a file from `direct_configs` | -| `${root_or_parent_with_any_config}` | Nearest parent directory containing a file from `affects_cache` or `direct_configs` | -| `${target_directory}` | Run the linter from the same directory as the target file, and change the target to be `.` | -| `${compile_command}` | Run from the directory where `compile_commands.json` is located | - -## Template Variables - -Note that some of the fields in this command template contain `${}` tokens: these tokens are why `command` is a template and are replaced at execution time with the value of that variable within the context of the lint action being executed. - -| Variable | Description | -| ----------------- | ----------------------------------------------------------------------------- | -| `${workspace}` | Path to the root of the repository | -| `${target}` | Path to the file to check, relative to `${workspace}` | -| `${linter}` | Path to the directory the linter was downloaded to | -| `${runtime}` | Path to the directory the runtime (e.g. `node`) was downloaded to | -| `${upstream-ref}` | Upstream git commit that is being used to calculate new/existing/fixed issues | -| `${plugin}` | Path to the root of the plugin's repository | - -## Limiting concurrency - -If you would like to limit the number of times trunk will invoke a linter concurrently, then you can use the `maximum_concurrency` option. For example, setting `maximum_concurrency: 1` will limit Trunk from running more than one instance of the linter simultaneously. - -## Environment variables - -Trunk by default runs linters _without_ environment variables from the parent shell; however, most linters need at least some such variables to be set, so Trunk allows specifying them using `environment`; for example, the `environment` for `ktlint` looks like this: - -```yaml -lint: - definitions: - name: ktlint - # ... - environment: - - name: PATH - list: ["${linter}"] - - name: LANG - value: en_US.UTF-8 -``` - -Most `environment` entries are maps with `name` and `value` keys; these become `name=value` environment variables. For `PATH`, we allow specifying `list`, in which case we concatenate the entries with `:`. - -We use the same template syntax for `environment` as we do for [`command`](./commands#commands). - -## Output Types and Parsing - -The output of a command should be in one of the supported output types like [SARIF](./output#sarif) or something that can be parsed with a [regex](./output#regex). See [See Output Types](./commands#output-types-and-parsing) for more details. If the standard output types do not meet your needs, you can also create a [custom parser](./output-parsing). - -## Full Reference - -The linter command definitions are defined in `lint.definitions.commands`. A single linter can have multiple commands if it is used in different ways. - -_Note:_. If you define the executable to run here (the command definition), then you should _not_ define it also in the linter definition. Defining it here as a command is preferred. - -## `allow_empty_files` - -`allow_empty_files`: _optional boolean_. Skip linting empty files for this linter. Trunk will assume there are no linters if the file is empty. - -## `batch` - -`batch`: _optional boolean_. Combine multiple files into the same execution. If true, the `${target}` template substitution in the `run` field may expand into multiple files. - -## `cache_ttl` - -`cache_ttl`, _duration string_. If this linter is not [idempotent](./commands#idempotent), this is how long cached results are kept before they expire. Defaults to 24hrs. See [Output Caching](../../caching) for more details. - -## `cache_results` - -`cache_results`: _optional boolean_. Indicates if this linter wants to cache results. See [Caching](./files-and-caching) for more details. - -## `disable_upstream` - -`disable_upstream`: _optional boolean_, Whether this linter supports comparing against the upstream version of this file. - -## `error_codes` - -`error_codes`: List of exit codes this linter will return when it hit an internal failure and couldn't generate results. **A linter should set either success codes or error codes, but not both.** See also [`success_codes`](./commands#success_codes). - -## `enabled` - -`enabled`: _optional boolean_. Whether the command is enabled to run when the linter is run. Allows some commands of a linter to be run by default without others. - -## `files` - -`files` is a list of file types listed in the `lint.files` section that this linter applies to. - -Example: **prettier** [full source](https://github.com/trunk-io/plugins/blob/main/linters/prettier/plugin.yaml) - -```yaml -lint: - definitions: - - name: prettier - files: - - typescript - - yaml - - css - - sass - - html - - markdown - - json - - javascript - - graphql - - prettier_supported_configs -``` - -## `fix_prompt` - -`fix_prompt`, _optional string._ e.g. 'Incorrect formatting' or 'Unoptimized image'. This string is used when prompting the user to use the linter interactively. - -## `fix_verb` - -`fix_verb`: _optional string_. This string is used when prompting the user to use the linter interactively. Example: `optimize`, `autoformat`, or `compress`. - -## `formatter` - -`formatter`: _optional boolean_. Whether this command is a formatter and should be included in `trunk fmt`. - -## `in_place` - -`in_place`: _optional boolean_. Indicates that this formatter will rewrite the file in place. **Only applies to formatters**. - -## `idempotent` - -`idempotent`: _optional boolean_. Indicates whether a linter is idempotent with config and source code inputs. For example, `semgrep` fetches rules from the Internet, so it is not idempotent . If set, will only cache results a duration of `cache_ttl`. See [Output Caching](./files-and-caching) for more details. - -## `is_security` - -`is_security`: _optional boolean_. Whether findings from this command should be considered "security" or not. Allows this linter to be run with `--scope==security`. [See Command Line Options](/merge-queue/using-the-queue/reference) - -## `maximum_file_size` - -`maximum_file_size`: _optional number_. The maximum file size in bytes for input files to the linter. If not specified, the [lint.default\_max\_file\_size](./#default_max_file_size) will be used. - -## `max_concurrency` - -`max_concurrency`: _optional integer_, The maximum number of processes that Trunk Code Quality will run concurrently for this linter. [See Limiting Concurrency](./commands#limiting-concurrency) - -## `name` - -`name`: _string_. A unique name for this command (some tools expose multiple commands, format, lint, analyze, etc.). - -## `no_issues_codes` - -`no_issues_codes`: List of exit codes that Trunk will use to assume there were no issues without parsing the output. - -## `output` - -`output`: _string_. which type of output this linter produces. [See Output Types](./commands#output-types-and-parsing). - -## `parser` - -`parser`: The definition of a parser that will transform the output of the linter into SARIF. Not needed if linter is already output SARIF. [See Output Types](./commands#output-types-and-parsing) - -## `parse_regex` - -`parse_regex`: _string_. A regular expression used to support regex parsing. [See Regex output type](./output#regex) - -## `platforms` - -`platforms`: A list of platforms this linter supports. (ex: `windows`, `macos`, `linux`). Linters using managed runtimes (node, python, etc.) can generally run cross-platform and do not need the `platforms` property set. For tools which _are_ platform specific or which have different configuration for each platform, this property can be used to distinguish between them. When multiple command definitions have the same name, Trunk Check will pick the first one that matches the `platforms` setting. - -For example, the `detekt` plugin has different exit codes for Windows than MacOS or Linux, and has two command definitions with different `success_codes` fields. [Full Source](https://github.com/trunk-io/plugins/blob/main/linters/detekt/plugin.yaml). - -```yaml -lint: - definitions: - - name: detekt - files: [kotlin] - download: detekt - commands: - - name: lint - platforms: [windows] - output: sarif - run: - detekt-cli --build-upon-default-config --config - .detekt.yaml --input ${target,} --report - sarif:${tmpfile} - success_codes: [0, 1, 2] - read_output_from: tmp_file - batch: true - cache_results: true - - name: lint - output: sarif - run: - detekt-cli --build-upon-default-config --config - .detekt.yaml --input ${target,} --report - sarif:${tmpfile} - success_codes: [0, 2] - read_output_from: tmp_file - batch: true - cache_results: true -``` - -## `prepare_run` - -`prepare_run`: An extra command to run before running a linter. - -## `read_output_from` - -`read_output_from`: Tell parser where to expect output from for reading. Should be one of `stdout`, `stderr`, and `tmp_file`. [See Output Sources](./output#output-sources) - -## `run` - -`run`: The command to run a linter. This command can use variables provided at runtime such as `$plugin}` and `$target}`. [Full list of variables](./commands#template-variables). See [Run](./commands#how-code-qualit-runs-linters) for more details. - -`dart` `format` command: [full source](https://github.com/trunk-io/plugins/blob/main/linters/dart/plugin.yaml) - -```yaml -lint: - files: - - name: dart - extensions: [dart] - definitions: - - name: dart - main_tool: dart - commands: - - name: format - output: rewrite - run: dart format ${target} -``` - -## `run_from` - -`run_from`: What current working directory to run the linter from. See [Working Directory](./commands#working-directory) for more details. - -## `run_when` - -`run_when`: When this command should be run. One of `cli`, `lsp`, `monitor`, or `ci`. - -## `std_in` - -`std_in`: _optional boolean_. Should the command be fed the file on standard input? - -## `success_codes` - -`success_codes:` List of exit codes that indicates linter ran successfully. **This is unrelated to whether or not there were issues reported by the linter**. - -**Note:** a linter should set either success codes or error codes, but not both. See also [`error_codes`](./commands#error_codes). - -## `target` - -`target`, _optional string_, What target does this run on. By default, the target is the modified source code file, `${file}`. Some linters operate on a whole repo or directory. See [Input Target](./commands#input-target) for more details. - -Examples: - -**nancy** uses `.` as the target. [full source](https://github.com/trunk-io/plugins/blob/main/linters/nancy/plugin.yaml) - -```yaml -# nancy uses . -definitions: - - name: nancy - files: [go-lockfile] - download: nancy - runtime: go - commands: - - output: sarif - run: sh ${plugin}/linters/nancy/run.sh - success_codes: [0, 1, 2] - target: . - read_output_from: stdout - is_security: true -``` - -**tflint** uses `${parent}` as the target. [full source](https://github.com/trunk-io/plugins/blob/main/linters/tflint/plugin.yaml) - -```yaml -lint: - definitions: - - name: tflint - files: [terraform] - commands: - - name: lint - output: sarif - prepare_run: tflint --init - run: tflint --format=sarif --force - success_codes: [0, 1, 2] - read_output_from: stdout - # tflint can only run on the current directory unless --recursive is passed - target: ${parent} - run_from: ${target_directory} - version: ">=0.47.0" -``` - -**Clippy** uses `${parent_with(Cargo.toml)}` as the target. [full source](https://github.com/trunk-io/plugins/blob/main/linters/clippy/plugin.yaml) - -```yaml -version: 0.1 -lint: - definitions: - # clippy has 3 lint severities: deny, warn, and allow. Unfortunately deny causes rustc to - # fail eagerly due to its implementation (https://github.com/rust-lang/rust/pull/87337), - # We use --cap-lints to downgrade "deny" severity lints to warn. So rustc will find all - # issues instead of hard stopping. There are currently only 70 of them, so we could hardcode - # the list to fix their severity levels correctly. - - name: clippy - files: [rust] - download: rust - commands: - - name: lint - # Custom parser type defined in the trunk cli to handle clippy's JSON output. - output: clippy - target: ${parent_with(Cargo.toml)} - run: cargo clippy --message-format json --locked -- --cap-lints=warn --no-deps - success_codes: [0, 101, 383] - run_from: ${target_directory} - disable_upstream: true -``` - -## `version` - -`version`: _optional string_, Version constraint. When a linter has multiple commands with the same name, Trunk Code Quality will select the first command that matches the version constraint. This is useful for when multiple incompatible versions of a tool need to be supported. - -Example: the `ruff` linter changed a command line argument from `--format` to `--output-format` in version `v0.1.0`. To handle both versions, the linter defines two commands with different version attributes. The first is for version `>=0.1.0`. If the first is not matched (because the install version of run is less that 0.1.0) then Trunk Code Quality will move on to the next command until it finds a match. [Full source](https://github.com/trunk-io/plugins/blob/main/linters/ruff/plugin.yaml). - -```yaml -lint: - definitions: - - name: ruff - files: [python] - commands: - - name: lint - # As of ruff v0.1.0, --format is replaced with --output-format - version: ">=0.1.0" - run: ruff check --cache-dir ${cachedir} --output-format json ${target} - output: sarif - parser: - runtime: python - run: python3 ${cwd}/ruff_to_sarif.py 0 - batch: true - success_codes: [0, 1] - - name: lint - run: ruff check --cache-dir ${cachedir} --format json ${target} - output: sarif - parser: - runtime: python - run: python3 ${cwd}/ruff_to_sarif.py 1 - batch: true - success_codes: [0, 1] - - -``` diff --git a/code-quality/overview/getting-started/configuration/lint/definitions.mdx b/code-quality/overview/getting-started/configuration/lint/definitions.mdx deleted file mode 100644 index 1973b3b..0000000 --- a/code-quality/overview/getting-started/configuration/lint/definitions.mdx +++ /dev/null @@ -1,136 +0,0 @@ ---- -title: "Definitions" ---- -The definition of a particular linter is put under `lint.definitions`. The following properties define the settings of a _particular linter_, not for all linters. For global linter settings, see [Lint Config](./). - -## `affects_cache` - -`affects_cache`: The list of files that affect the cache results of this linter. [See Caching](../../caching). - -## `allow_empty_files` - -`allow_empty_files`: _optional boolean_. Indicates to skip linting empty files for this linter. - -## `batch` - -`batch`: _optional boolean_. Combine multiple files into the same execution. - -## `commands` - -`commands`: The list of commands exposed by this linter. See [Linter Command Definition](./commands). - -## `deprecated` - -`deprecated`: _string_. Indicates the linter is deprecated and should not be used. - -## `direct_configs` - -`direct_configs`: _string list_. Indicates config files used to auto-enable the linter. See [Auto Enabling](./auto-enable). - -## `disabled` - -`disabled`: _optional boolean_: Whether linter is actively disabled (and will not be recommended) and will not run (overrides enabled). - -## `download` - -`download`: _string_. The download URL. You must provide either runtime + packages or download, not both. Using runtimes is preferred. See [Runtimes](../runtimes). - -## `enabled` - -`enabled`: _optional boolean_. Whether this linter is enabled. - -## `environment` - -`environment`: a list of runtime variables used when running the linter. See [Command Environment Variables](./commands#environment-variables). - -## `extra_packages` - -`extra_packages`: list of strings, Extra packages to install, versions are optional. See [Linter Dependencies](./dependencies). - -## `formatter` - -`formatter`: _boolean_. Indicates whether this is a formatter and should be included in `trunk fmt`. - -## `good_without_config` - -`good_without_config`: _optional boolean_. Indicates whether this linter is recommended without the user tuning its configuration. Prefer [`suggest_if`](./definitions#suggest_if). - -## `hold_the_line` - -`hold_the_line`: _optional boolean_. Whether [hold-the-line will](/broken/pages/U4nTQBazaodt2vJadyRw#hold-the-line) be done for this linter or not. - -## `include_lfs` - -`include_lfs`: _boolean_. Allow this linter to operate on files tracked using [git LFS](https://git-lfs.com/). - -## `include_scanner_type` - -`include_scanner_type`: which include scanner to use, if any. - -## `issue_url_format` - -`issue_url_format`: _string_, a format string that accepts issue codes for links to issues docs. - -## `known_good_version` - -`known_good_version`: _string_. A version to be used when Trunk cannot query the latest version. Currently, Trunk can query the latest version for all package managers and downloads hosted on GitHub. - -## `known_bad_versions` - -`known_bad_versions`: _string list_. Versions of a linter that are known to be broken and should not be run with Trunk. We will fall back to a `known_good_version` if init or upgrade chooses something in this set. - -## `main_tool` - -`main_tool`, _string_. If your linter depends on more than a single tool, and none of the tools has the same name as the linter, then you will need to specify which is the main tool here. It will be used to version the tool from the linter's enabled version. - -## `name` - -`name` _required string._ The name of the linter. This property will be used to refer to the linter in other parts of the config, for example, in the list of enabled linters. - -## `package` - -`package`: string, What primary package to install, if using a package manager runtime. The enabled version of the runtime for this linter will apply to this package. See [Linter Dependencies](./dependencies). - -## `path_format` - -`path_format`, Whether to use the platform-specific paths or generic "/". Default native. - -## `plugin_url` - -`plugin_url`: _string_, a plugin url for reporting issues. - -## `prepare_command` - -`prepare_command`. A command that is run once per session before linting any number of files using this linter. ex. `[tflint, --init]`. - -## `query_compile_commands` - -`query_compile_commands`, _optional boolean_. - -## `runtime` - -`runtime`: RuntimeType, Which package manager runtime, if any, to require to be setup for this linter. Ex: `node`, `ruby`, `python`. See [Linter Dependencies](./dependencies). - -## `run_timeout` - -`run_timeout`: _duration string_. Describes how long a linter can run before timing out. [See timeouts](../../../linters/configure-linters#timeout). - -## `suggest_if` - -How to determine if this linter should be auto-enabled/recommended. Possible values are `never`, `config_present`, and `files_present`. [See auto-enabling](./auto-enable) for more details. - -## `supported_platforms` - -Platform constraint. If incompatible, renders a notice. See also [Command `platforms`](./commands#platforms). - -## `tools` - -`tools`, _string list_. The list of tools used by this linter. See [Linter Dependencies](./dependencies). - -## `version_command` - -`version_command`: Version check commands. - -## `verbatim_message` - -`verbatim_message`: Do not try to truncate or reflow the output of this linter. diff --git a/code-quality/overview/getting-started/configuration/lint/dependencies.mdx b/code-quality/overview/getting-started/configuration/lint/dependencies.mdx deleted file mode 100644 index e118a17..0000000 --- a/code-quality/overview/getting-started/configuration/lint/dependencies.mdx +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: "Dependencies" ---- -Linters use the `tools` section of the `.trunk/trunk.yaml` to specify Trunk configured binaries that the linter uses to run. The `linter.definitions.tools` key specifies a list of tool names. There are two ways for a linter to depend on a tool: [Eponymous Tools](./dependencies#eponymous-tool-dependencies) and [Additional Tools](./dependencies#additional-tool-dependencies) - -## Eponymous Tool Dependencies - -When the name of the tool matches the name of a linter, it is called an _eponymous tool dependency_. - -In the example below the `pylint` linter depends on the `pylint` tool, which is defined as the package `pylint` running with the `python` runtime. - -Eponymous tools need to be defined _separately_ from the linter but implicitly enabled with the linter's version. You may explicitly enable the eponymous tool if you wish, but note that its version needs to be synced to that of the linter. See the [Tools Configuration](../tools) page for more details on how to set up Tools. - -```yaml -tools: - definitions: - - name: pylint - runtime: python - package: pylint - shims: [pylint] - known_good_version: 2.11.1 -lint: - definitions: - - name: pylint - files: [python] - commands: - - name: lint - # Custom parser type defined in the trunk cli to - # handle pylint's JSON output. - output: pylint - run: pylint --exit-zero --output - ${tmpfile} --output-format json ${target} - success_codes: [0] - read_output_from: tmp_file - batch: true - cache_results: true - tools: [pylint] - suggest_if: config_present - direct_configs: - - pylintrc - - .pylintrc - affects_cache: - - pyproject.toml - - setup.cfg - issue_url_format: http://pylint-messages.wikidot.com/messages:{} - known_good_version: 2.11.1 - version_command: - parse_regex: pylint ${semver} - run: pylint --version -``` - -## Additional Tool Dependencies - -You can also have a scenario where a linter depends on a tool that is not identically named - an _additional tool dependency_. We give an example below: - -```yaml -tools: - definitions: - - name: terragrunt - known_good_version: 0.45.8 - download: terragrunt - shims: - - name: terragrunt - target: terragrunt -lint: - definitions: - - name: terragrunt - tools: [terragrunt, terraform] - known_good_version: 0.45.8 - files: [hcl] - suggest_if: never - environment: - - name: PATH - list: ["${linter}"] - commands: - - name: format - output: rewrite - run: terragrunt hclfmt ${target} - success_codes: [0] - sandbox_type: copy_targets - in_place: true - formatter: true - batch: true - version_command: - parse_regex: terragrunt v${semver} - run: terragrunt -version -``` - -In this scenario, `terraform` is an additional tool dependency - `terragrunt` requires it to be in `$PATH`. If the tool is an additional dependency, it must be enabled explicitly and versioned independently of the linter - that is, it must be listed in the `tools.enabled` section. - -## Download via package manager - -If your linter can be downloaded via `gem install`, `go get`, `npm install`, or `pip install`, you can specify a `runtime` and the `package` key: - -```yaml -lint: - definitions: - - name: fizz-buzz - files: [javascript] - # npm install fizz-buzz - runtime: node - package: fizz-buzz -``` - -This will now create a hermetic directory in `~/.cache/trunk/linters/fizz-buzz` and `npm install fizz-buzz` there. You can refer to different versions of your package in `trunk.yaml` as normal, via `fizz-buzz@1.2.3`. - -> Note: Such downloads will use the _hermetic_ version of the specified runtime that `trunk` installs, not the one you've installed on your machine. - -See [Package-based Tools](../tools#package-based-tools) for more information. diff --git a/code-quality/overview/getting-started/configuration/lint/files-and-caching.mdx b/code-quality/overview/getting-started/configuration/lint/files-and-caching.mdx deleted file mode 100644 index 5712364..0000000 --- a/code-quality/overview/getting-started/configuration/lint/files-and-caching.mdx +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: "Files and Caching" ---- -## Applicable filetypes - -To determine which linters to run on which files (i.e. compute the set of lint actions), Trunk requires that every linter define the set of filetypes it applies to in `lint.files`, then reference those files from `lint.definitions[*].files`. - -We have a number of pre-defined filetypes (e.g. `c++-header`, `gemspec`, `rust`; see our [plugins repo](https://github.com/trunk-io/plugins/blob/main/linters/plugin.yaml) for an up-to-date list), but you can also define your own filetypes. Here's how we define the `python` filetype: - -```yaml -lint: - files: - - name: python - extensions: - - py - - py2 - - py3 - shebangs: - - python - - python3 -``` - -This tells Trunk that files matching either of the following criteria should be considered `python` files: - -* the extension is any of `.py`, `.py2`, or `.py3` (e.g. `lib.py`) -* the shebang is any of `python` or `python3` (e.g. `#!/usr/bin/env python3`) - -The **flake8** linter definition uses python files, so it references the filetype above in its definition. - -```yaml -lint: - definitions: - - name: flake8 - files: [python] - commands: - ... - affects_cache: - - setup.cfg - - tox.ini - # In case the user uses https://pypi.org/project/Flake8-pyproject/ - - pyproject.toml -``` - -## Caching - -Trunk Code Quality automatically caches results from previous runs of linters to speed up development. To do this Trunk needs to know which files could potentially affect the cache, besides the source code files themselves. - -### Enabling caching - -If a linter wishes Trunk to cache the results it should set `cache_results` to true. - -## Files which affect caching - -The `lint.definitions[*].affects_cache` property is a list of files which could affect the cache. General these are files which would change the configuration of the linter, and therefore invalidate the current cached results. For example, the **flake8** tool tells trunk to invalidate the cache whenever the `setup.cfg`, `tox.ini`, or `pyproject.toml` files are changed. - -```yaml -lint: - definitions: - - name: flake8 - files: [python] - commands: - ... - affects_cache: - - setup.cfg - - tox.ini - # In case the user uses https://pypi.org/project/Flake8-pyproject/ - - pyproject.toml -``` - -### Idempotency - -Trunk Code Quality also needs to know if the linter command itself is idempotent, meaning the command will return the exact same results given the exact same inputs. Most linters are, however semgrep, for example, fetches rules from the internet so the output could be different each time. - -Setting the `linter.definitions[*].commands.idempotent` property to true will tell trunk to only cache the result for a duration of `cache_ttl`, which is set to 24hrs by default. diff --git a/code-quality/overview/getting-started/configuration/lint/index.mdx b/code-quality/overview/getting-started/configuration/lint/index.mdx deleted file mode 100644 index e2831a4..0000000 --- a/code-quality/overview/getting-started/configuration/lint/index.mdx +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: "Lint" ---- -### Lint Config - -The `lint` section of `.trunk/trunk.yaml` represents the configuration of all linters. This is where you can: - -* Define the linters (`lint.definitions`), -* List linters to enable and disable (`lint.enabled` and `lint.disabled`) -* Define file categories (`lint.files`) -* List required `runtimes` and `downloads`. -* And additional cross-linter settings. - -### `bazel` - -`bazel`: bazel configuration - -* `paths` locations to look for Bazel binary. [Example](../../../linters/supported/clang-tidy#using-bazel). - -### `comment_formats` - -`comment_formats`: Definitions of comment formats. Reused in linter definitions. Trunk Quality already defines many common comment format such as `hash` (`# comment`), `slashes-block` (`/* comment */`), and `slashes-inline` (`// comment`). For the full list [see the linters plugin.yaml](https://github.com/trunk-io/plugins/blob/main/linters/plugin.yaml). - -To create a new comment format provide the name and delimiters like this: - -```yaml -lint: - comment_formats: - - name: dashes-block - leading_delimiter: --[[ - trailing_delimiter: --] -``` - -### `compile_commands` - -`compile_commands`: compile commands for clang-tidy. Must be one of `json` or `bazel`. - -### `compile_commands_roots` - -`compile_commands_roots`: Directories to search for `compile_commands.json`. The default is `build/`. - -### `default_max_file_size` - -`default_max_file_size`: Default maximum filesize in bytes. Trunk Code Quality will not run linters on any files larger than this. Default value is 4 megabytes. - -### `definitions` - -`definitions`: Where you define or override linter settings. See [Linter Definition Config](./definitions). - -### `disabled` - -`disabled`: The list of linters to disable. Adding a linter here will prevent trunk from suggesting it as a new linter each time you upgrade. Linter names can be in the form of `` or `@`, the same format as the [enabled](./#enabled) property. - -### `downloads` - -`downloads`: Locations to download binary artifacts from. Using [tool definitions](../tools) instead is preferred. - -### `enabled` - -`enabled`: The list of linters to enable. Linter names can be in the form of `` or `@`. Examples: - -```yaml -lint: - enabled: - # Mutually exclusive, choose one: - - eslint # Use the system version of markdownlint - - eslint@9.0.0 # Use a hermetically managed version of eslint - - eslint@node # Use eslint from node_modules/.bin -``` - -### `exported_configs` - -`exported_configs`: Linter configs to export when another project is [importing this plugin](../../../linters/shared-configs) - -### `extra_compilation_flags` - -`extra_compilation_flags`: When running clang-tidy, this list will be appended to the compile command. - -### `files` - -`files`: Definitions of filetypes - -Every linter must define the set of filetypes it applies to in the `lint.files` section. - -New filetypes are defined with the name and extensions properties. They may also include the comments properties to describe what style of comments are used in these files. - -This is how the C++ source filetype is defined. See also [Files and Caching](./files-and-caching). - -```yaml -lint: - files: - - name: c++-source - extensions: - - C - - cc - - cpp - - cxx - comments: - - slashes-block - - slashes-inline -``` - -### `ignore` - -`ignore`: files to be ignored by linters. - -### `reuse_upstream` - -`reuse_upstream`: If enabled, Trunk will cache upstream sandboxes instead of creating a new one each time. Options are `true`, or `false`. - -### `runtimes` - -`runtimes`: Node, python, cargo, etc. Used to define or override a runtime environment for package management. [See Runtimes](../runtimes). - -### `skip_missing_compile_command` - -`skip_missing_compile_command`: For linters that depend on compile commands, setting this will cause Trunk to skip files without a compile command rather than report an error. - -### `threshold` - -`threshold`: where you specify the blocking behavior of linters. The [threshold](../../../linters/configure-linters#blocking-thresholds) for whether an error from a linter should block commits or not. - -### `upstream_mode` - -`upstream_mode`: How to generate the upstream sandbox used for generating lint results for revisions not currently checked out. Options are`symlink` (default), `hardlink`, or `copy`. If using `copy`, it can be slow without also enabling `reuse_upstream: true`. diff --git a/code-quality/overview/getting-started/configuration/lint/output-parsing.mdx b/code-quality/overview/getting-started/configuration/lint/output-parsing.mdx deleted file mode 100644 index a61c5d7..0000000 --- a/code-quality/overview/getting-started/configuration/lint/output-parsing.mdx +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: "Output Parsing" ---- -If you have a command or utility that you want to run pretty much as-is, but Trunk doesn't natively understand how to parse it, you can inject your own custom parser to translate its output into a format that Trunk does understand! - -For example, let's say that we want to use `grep` as a linter, but we want to add more context to the matches. We could define a custom linter like so: - -```yaml -lint: - definitions: - - name: todo-finder - files: [ALL] - commands: - - output: regex - # matches the parser run output - parse_regex: "((?P.*):(?P\\d+):(?P\\d+): - \\[(?P.*)\\] (?P.*) \\((?P.*)\\))" - run: grep --with-filename --line-number --ignore-case todo ${target} - success_codes: [0, 1] - read_output_from: stdout - parser: - run: - "sed -E 's/(.*):([0-9]+):(.*)/\\1:\\2:0: - [error] Found todo in \"\\3\" (found-todo)/'" -``` - -The execution model that `trunk` follows for a parser is that it will: - -* execute the linter's `run` field, asserting that either: - * the linter's exit code is in `success_codes`, or - * the linter's exit code is not in `error_codes`; -* execute `parser.run`, - * with the `read_output_from` of the linter execution fed to `parser.run` as `stdin`, - * assert that the exit code of the parser is 0, and then -* use `output` to determine how it should parse the parser's `stdout`. - -Note that you can also set `parser.runtime` to [`node`](./output-parsing#node) or [`python`](./output-parsing#python) so that you can write your parser in Javascript or Python instead, if you so prefer! You can find plenty of examples of python parsers in our [plugins repo](https://github.com/trunk-io/plugins). - - - -**Node** - -```yaml -lint: - definitions: - - name: todo-finder-node - files: [ALL] - commands: - - output: parsable - # parse_regex matches the parser run output - parse_regex: "((?P.*):(?P\\d+):(?P\\d+): - \\[(?P.*)\\] (?P.*) \\((?P.*)\\))" - run: grep --with-filename --line-number --ignore-case todo ${target} - success_codes: [0, 1] - read_output_from: stdout - parser: - runtime: node - run: ${workspace}/todo-finder-parser.js -``` - -```javascript -#!/usr/bin/env node -'use strict'; -let readline = require('readline'); -let rl = readline.createInterface({ input: process.stdin }); - -rl.on('line', function(line){ - let match = line.match(/(.*):([0-9]+):(.*)/); - - if (match) { - let [_, path, line_number, line_contents] = match; - console.log(`${path}:${line_number}:0: [error]` - +` Found todo in "${line_contents}" (found-todo)`); - } -``` - -Remember to run `chmod u+x todo-finder-parser.js` so that `trunk` can run it! - - -**Python** - -```yaml -lint: - definitions: - - name: todo-finder-python - files: [ALL] - commands: - - output: parsable - # parse_regex matches the parser run output - parse_regex: "((?P.*):(?P\\d+):(?P\\d+): - \\[(?P.*)\\] (?P.*) \\((?P.*)\\))" - run: grep --with-filename --line-number --ignore-case todo ${target} - success_codes: [0, 1] - read_output_from: stdout - parser: - runtime: python - run: ${workspace}/todo-finder-parser.js -``` - -```python -#!/usr/bin/env python -import re, sys - -for line in sys.stdin.readlines(): - match = re.match("(.*):([0-9]+):(.*)", line) - if match: - path, line_number, line_contents = match.groups() - print(f"{path}:{line_number}:0: [error] " - "Found todo in \"{line_contents}\" (found-todo)") - -``` - -Remember to run `chmod u+x todo-finder-parser.py` so that `trunk` can run it! - - diff --git a/code-quality/overview/getting-started/configuration/lint/output.mdx b/code-quality/overview/getting-started/configuration/lint/output.mdx deleted file mode 100644 index 9364527..0000000 --- a/code-quality/overview/getting-started/configuration/lint/output.mdx +++ /dev/null @@ -1,207 +0,0 @@ ---- -title: "Output" ---- -## Output Sources - -The output format that Trunk expects from a linter is determined by its [`output`](./output#output-types) type. - -**`stdout`, `stderr` or `tmp_file`** - -`trunk` generally expects a linter to output its findings to `stdout`, but does support other output mechanisms: - -| `read_output_from` | Description | -| ------------------ | --------------------------------------------------------------------------------- | -| `stdout` | Standard output. | -| `stderr` | Standard error. | -| `tmp_file` | If `${tmpfile}` was specified in `command`, the path of the created `${tmpfile}`. | - -## Output Types - -Trunk supports several different generic output types. Most linters will use one of these output types, but if your linter doesn't conform well to any of these specifications, you can also write a [custom parser](./output-parsing). In general, SARIF should be preferred over other formats because it is the most flexible and battle tested. - -Trunk currently supports the following linter output types. - -| Linter Type | Autofix support | Description | -|---|---|---| -| [`sarif`](#sarif) | ✓ | Produces diagnostics as [Static Analysis Results Interchange Format](https://docs.oasis-open.org/sarif/sarif/v2.0/sarif-v2.0.html) JSON. | -| [`lsp_json`](#lsp-json) | | Produces diagnostics as [Language Server Protocol](https://microsoft.github.io/language-server-protocol/) JSON. | -| [`pass_fail`](#pass-fail-linters) | | Writes a single file-level diagnostic to `stdout`. | -| [`regex`](#regex) | | Produces diagnostics using a custom regex format. | -| [`arcanist`](#arcanist) | ✓ | Produces diagnostics as Arcanist JSON. | -| [`rewrite`](#formatters) | ✓ | Writes the formatted version of a file to `stdout`. | - -If your linter produces a different output type, you can also write a [parser](./output-parsing) to transform the linter's output into something Trunk can understand. - -### SARIF - -`output: sarif` linters produce diagnostics in the [Static Analysis Results Interchange Format](https://docs.oasis-open.org/sarif/sarif/v2.0/sarif-v2.0.html): - -```json -{ - "$schema": "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json", - "version": "2.1.0", - "runs": [ - { - "results": [ - { - "level": "warning", - "locations": [ - { - "physicalLocation": { - "artifactLocation": { - "uri": "/dev/shm/sandbox/detekt_test_repo/example.kt" - }, - "region": { - "startColumn": 12, - "startLine": 18 - } - } - } - ], - "message": { - "text": "A class should always override hashCode when overriding equals and the other way around." - }, - "ruleId": "detekt.potential-bugs.EqualsWithHashCodeExist" - } - ], - "tool": { - "driver": { - "downloadUri": "https://github.com/detekt/detekt/releases/download/v1.19.0/detekt", - "fullName": "detekt", - "guid": "022ca8c2-f6a2-4c95-b107-bb72c43263f3", - "informationUri": "https://detekt.github.io/detekt", - "language": "en", - "name": "detekt", - "organization": "detekt", - "semanticVersion": "1.19.0", - "version": "1.19.0" - } - } - } - ] -} -``` - -### LSP JSON - -`output: lsp_json` linters output issues as [Language Server Protocol](https://microsoft.github.io/language-server-protocol/specification#diagnostic) JSON. - -```json -[ - { - "message": "Not formatted correctly. Missing owner", - "code": "missing-owner", - "severity": "Error", - "range": { - "start": { - "line": 12, - "character": 8 - }, - "end": { - "line": 12, - "character": 12 - } - } - }, - { - "message": "TODO is assigned to someone not listed in this project", - "code": "unknown-user", - "severity": "Warning", - "range": { - "start": { - "line": 37, - "character": 0 - }, - "end": { - "line": 37, - "character": 14 - } - } - } -] -``` - -### Pass/Fail Linters - -`output: pass_fail` linters find either: - -* no issues in a file, indicated by exiting with `exit_code=0`, or -* a single file-level issue in a file, whose message is the linter's `stdout`, indicated by exiting\ - with `exit_code=1`. - -> Note: Exiting with `exit_code=1` but writing nothing to `stdout` is considered to be a linter tool failure. -> -> Note: `pass_fail` linters are required to have `success_codes: [0, 1]` - -### Regex - -`output: regex` linters produce output that can be parsed with custom regular expressions and named capture groups. The regular expression is specified in the `parse_regex` field. - -`regex` supports capturing strings from a linter output for the following named capture groups: - -* `path`: file path (required) -* `line`: line number -* `col`: column number -* `severity`: one of `note`, `notice`, `allow`, `deny`, `disabled`, `error`, `info`, `warning` -* `code`: linter diagnostic code -* `message`: description - -For example, the output - -``` -.trunk/trunk.yaml:7:81: [error] line too long (82 > 80 characters) (line-length) -``` - -can be parsed with the regular expression - -``` -((?P.*):(?P\d+):(?P\d+): \[(?P.*)\] (?P.*) \((?P.*)\)) -``` - -and would result in a `trunk` diagnostic that looks like this: - -``` -7:81 high line too long (82 > 80 characters) regex-linter/line-length -``` - -In the event that multiple capture groups of the same name are specified, the nonempty capture will be preferred. If there are multiple non-empty captures, a linter error will be thrown. Adjust your regular expression accordingly to match the specifics of your output. - -> Note: For additional information on building custom regular expressions, see [re2](https://github.com/google/re2/wiki/Syntax). More complicated regex may require additional escape characters in yaml configuration. - -### Arcanist - -You can also output JSON using the Arcanist format. - -```json -[ - { - "Char": 1, - "Code": "missing_copyright", - "Description": "Message about things\nMaybe contain multiple lines and web\nlinks\nhttps://website.com/notice-about-stuff\n", - "Line": 1, - "Name": "Incorrect (or missing) copyright notice", - "OriginalText": "", - "Path": "somefile.py" - } -] -``` - -### Formatters - -`output: rewrite` linters write the formatted version of a file to `stdout`; this becomes an autofix which `trunk` can prompt you to apply (which is what `trunk check` does by default) or automatically apply for you (if you `trunk check --fix` or `trunk fmt`). - -For example, if you wanted a linter to normalize your line endings, you could do this: - -```yaml -lint: - definitions: - - name: no-carriage-returns - files: [ALL] - commands: - - output: rewrite - formatter: true - command: sed s/\r// ${target} - success_codes: [0] -``` - -Setting `formatter: true` will cause `trunk fmt` to run this linter. diff --git a/code-quality/overview/getting-started/configuration/merge.mdx b/code-quality/overview/getting-started/configuration/merge.mdx deleted file mode 100644 index 87bec5c..0000000 --- a/code-quality/overview/getting-started/configuration/merge.mdx +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: "Merge" ---- -Custom `required_statuses` defined in the `.trunk/trunk.yaml` file take precedence over the GitHub required status checks from branch protection. - -Use custom `required_statuses` when your checks don't match what you configure on GitHub one-to-one. - -```yaml -version: 0.1 -``` - -```yaml -cli: - version: 1.16.0 -merge: - required_statuses: - - Trunk Check - - Unit tests & test coverage - # Add more required statuses here -``` diff --git a/code-quality/overview/getting-started/configuration/per-user-overrides.mdx b/code-quality/overview/getting-started/configuration/per-user-overrides.mdx deleted file mode 100644 index 2e2d9c9..0000000 --- a/code-quality/overview/getting-started/configuration/per-user-overrides.mdx +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: "Per User Overrides" ---- -## Per-User Customization - -Trunk can also be managed by the `.trunk/user.yaml` file in your repository. This file is optional, but it allows individual developers to customize how they want `trunk` to run on their machines. - -Simply configure `.trunk/user.yaml` as you would for `.trunk/trunk.yaml`. Now you can add additional linters, enable [actions](../actions/), or specify [default command options](./#cli), without impacting the way other developers run `trunk`. - -Be mindful that `.trunk/user.yaml` takes precedence over `.trunk/trunk.yaml`, so substantial modifications could violate hermeticity. - -## Identity Config - -Trunk also saves a user config in `$HOME/.cache/trunk/user.yaml`. This is auto-generated in order to manage [anonymous usage data](./telemetry) and persist login sessions. diff --git a/code-quality/overview/getting-started/configuration/plugins/exported-configs.mdx b/code-quality/overview/getting-started/configuration/plugins/exported-configs.mdx deleted file mode 100644 index a3ece1f..0000000 --- a/code-quality/overview/getting-started/configuration/plugins/exported-configs.mdx +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: "Exporting linter configs" -description: "Reusing linter configs across projects." ---- -Plugin repositories can also export their own linter config files to keep configuration synced across an organization. Simply add an `exported_configs` section to a `plugin.yaml`, with paths to all of the config files you want to export, relative to the repository root. For example: - -```yaml -lint: - exported_configs: - - configs: - - .eslintrc.yaml - - .trunk/configs/.shellcheckrc -``` - -These config files will be available for linters that enumerate them in `affects_cache`or `direct_configs` to reference. These files are automatically symlinked into the repository root during linter execution. The set of applicable config files can be viewed in the details yaml file listed when running `trunk check --verbose`. - -Plugin-exported configs are sourced in lockstep with the plugin itself, so you will need to update\ -the `ref` field to use the latest configs. - -Note that if you're using an IDE Extension like clangd with an LSP that relies on those configs being in the root, you will need to manually create a symlink to the plugin's config. You can do this by running `ln -s .trunk/plugins// `. - -For an example of a plugin repo with config files, see our own [configs](https://github.com/trunk-io/configs) repo. - -### Importing configs - -This process can also be reversed to import config files from a plugins repository which\ -does not explicitly export them. Given a plugin sourced with id `trunk`, the sourcing repository can\ -achieve the same effect by including the following in its `.trunk/trunk.yaml`. - -```yaml -lint: - exported_configs: - - plugin_id: trunk - configs: - - .eslintrc.yaml - - .trunk/configs/.shellcheckrc -``` diff --git a/code-quality/overview/getting-started/configuration/plugins/external-repositories.mdx b/code-quality/overview/getting-started/configuration/plugins/external-repositories.mdx deleted file mode 100644 index 490e606..0000000 --- a/code-quality/overview/getting-started/configuration/plugins/external-repositories.mdx +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: "Share config between codebases" -description: "Sharing configuration between codebases using public config repos" ---- -To standardize Trunk configuration across an organization, you can create and publish a public plugins repository. This repo can define new linter definitions, specify enabled linters and actions, and even [export linter configs](./exported-configs). - -Once you've created your plugin repository, you can source it in other repositories to adopt shared configuration across your organization. For an example of how we do this in our own org, check out our [configs repo](https://github.com/trunk-io/configs). - -Note that in order to keep linters and tools up to date in your plugin configs repo, you'll need to run `trunk upgrade --apply-to=plugin.yaml` to apply [upgrades](../../../linters/upgrades). After making a public GitHub release with your plugin changes, other dependent repos will pick up these changes automatically when running `trunk upgrade`. - -### Get started - -Let's walk through how to create a simple linter that warns about TODOs in your codebase. - -We'll start by creating a new Git repository: - -```bash -PLUGIN_PATH=~/my-first-trunk-plugin -mkdir "${PLUGIN_PATH}" && cd "${PLUGIN_PATH}" -git init -``` - -And then create a linter that can find TODOs in your codebase using `grep` and `sed`: - -```bash -cat >plugin.yaml < -trunk check enable todo-finder -``` - -And now, to demonstrate how this works, let's `trunk check` some files where we know we have TODOs: - -```bash -trunk check $(git grep -li todo | head -n 10) -``` - -which will show you something like this: - -``` -.eslintrc.yaml:19:0 - 19:0 high Found todo in " # TODO(chris): Figure out why this causes a massive slowdown ... .trunk/dev-out/O1F.txt local.todo-finder/found-todo - 101:0 high Found todo in " node/no-unpublished-import: off # TODO: do we want this?" local.todo-finder/found-todo -``` - -### Organizing your code - -In the example we gave above, we put the linter's source code in `plugin.yaml`, which is fine for an example, but not really great for anything more than that. We can take the `sed` command from the plugin we created earlier and push that into the shell script: - -```bash -#!/bin/bash -sed -E 's/(.*):([0-9]+):(.*)/\1:\2:0: [error] Found todo in \"\3\" (found-todo)/'" -``` - -> Tip: Remember to run `chmod u+x todo-finder-parser.sh` so that `trunk` can run it! - -and also point the definition of `todo-finder` at it: - -```bash -version: 0.1 -lint: - definitions: - - name: todo-finder - files: [ALL] - commands: - - output: parsable - run: grep --with-filename --line-number --ignore-case todo ${target} - success_codes: [0, 1] - read_output_from: stdout - parser: - run: ${plugin}/todo-finder-parser.sh -``` - -We can also go another step and push the entire linter definition into a shell script: - -```bash -#!/bin/bash -grep --with-filename --line-number --ignore-case todo "${1}" | \ - sed -E 's/(.*):([0-9]+):(.*)/\1:\2:0: [error] Found todo in \"\3\" (found-todo)/'" -``` - -```yaml -version: 0.1 -lint: - definitions: - - name: todo-finder - files: [ALL] - commands: - - output: parsable - run: ${plugin}/todo-finder.sh - success_codes: [0] -``` - -See our documentation on [custom linters](../../../linters/custom-linters) and [custom parsers](../lint/output-parsing) for more on what you can do, such as writing your parser in Javascript or Python! - -### Publishing your plugin - -To share your plugin with the world, all you have to do is tag a release and push it to GitHub, GitLab, or some other repository hosting service: - -```bash -git add . -git commit "Create a TODO finder" -git tag -a v0.0.0 --message "Initial TODO finder release" -git remote add origin -git push origin main v0.0.0 -``` - -Now that it's available on the Internet, everyone else can just use your plugin by running: - -```bash -trunk plugins add --id=their-first-plugin v0.0.0 -``` diff --git a/code-quality/overview/getting-started/configuration/plugins/index.mdx b/code-quality/overview/getting-started/configuration/plugins/index.mdx deleted file mode 100644 index 3fd1f80..0000000 --- a/code-quality/overview/getting-started/configuration/plugins/index.mdx +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: "Plugins" ---- -### Plugin config merging - -Trunk uses a plugin system where a root configuration is defined in the [trunk-io/plugin repository](https://github.com/trunk-io/plugins). You can import many plugin config sources, and fields defined at each level override the level above. - - -When plugin configs are merged, only fields defined in a config file are merged into the level above. You can define just the fields you wish to override in `.trunk/trunk.yaml and .trunk/user.yaml.` - - -When using trunk, you can merge several sets of configuration files with a `trunk.yaml` schema. Config merging proceeds as follows: - -1. Remote plugins sourced in `.trunk/trunk.yaml` (and `.trunk/user.yaml`). Plugins are sourced in the order they're defined, with later plugins overriding those defined before it. The [`trunk`](https://github.com/trunk-io/plugins) plugin is implicitly sourced first. -2. Your repo level `.trunk/trunk.yaml` file, complete with a CLI version and any definitions or enables. Configurations defined here override what's defined in the remote plugins. -3. Optionally, `.trunk/user.yaml`, a local **git-ignored** file where users can provide their own overrides. - -Additionally, any files enumerated in the lint `exported_configs` section are symlinked from their relevant plugin into the root of the workspace when an applicable linter is run with `trunk check`. - -### Importing a plugin repository - -By default, trunk imports the trunk-io/plugins repository. To import a repo add it to the `plugins.sources` list. Each repo requires a URI and ref. - -```yaml -plugins: - sources: - - id: trunk - uri: https://github.com/trunk-io/plugins - ref: v1.2.6 -``` - -| Field | Description | -|---|---| -| `id` | unique identifier for this repository | -| `uri` | address used to clone the target repository | -| `ref` | commit id or tag to checkout. **Do not use branch names, as these can be unstable** | -| `local` | path to local (on-disk) repository. Takes precedence over uri/ref if defined | -| `import_to_global` (default: `true`) | import content into the global namespace. If set to false actions and linters defined in the plugin must be referenced by `.` | - -### Plugin capabilities - -Any configuration used in `trunk.yaml` can also be used in a plugin repository, with [some exceptions](./#excluded-fields). A plugin repository must have one root level `plugin.yaml` and can have any number of other `plugin.yaml` files in other subdirectories. These configuration files are then merged into one composite plugin configuration. - -The most common use for a plugin repository is to define custom linters, actions, or tools. But they can also be used to define a common set of shared tools across an organization. For more info, see [organization configs](./external-repositories). - -The root `plugin.yaml` file may also have a `required_trunk_version` field which governs compatibility when [upgrading](../../../linters/upgrades) between CLI versions. - -#### Add a plugin to your `trunk.yaml` file - -To add a plugin from GitHub: - -``` -trunk plugins add https://github.com/trunk-io/plugins --id=trunk -``` - -To add a plugin from GitHub at a specific version: - -``` -trunk plugins add https://github.com/trunk-io/plugins v1.2.6 --id=trunk -``` - -To add a plugin from a local repository: - -``` -trunk plugins add /home/user/self/hello-world --id=hello-world -``` - -Note that when specifying a remote plugin, the `ref` field must be a tag or SHA. - -### Plugins scope - -Plugins are merged serially, in the order that they are sourced, and can override almost any Trunk\ -configuration. This allows organizations to provide a set of overrides and definitions in one\ -central place. - -For instance, you can create your own `my-plugins` repository with `plugin.yaml`: - -```yaml -version: 0.1 -lint: - definitions: - - name: trufflehog - commands: - - name: lint - # override trufflehog to use '--only-verified' - run: trufflehog filesystem --json --fail --only-verified ${target} - enabled: - - ruff@0.0.256 -``` - -sourced in a `.trunk/trunk.yaml` file from another repository as follows: - -```yaml -version: 0.1 -plugins: - sources: - - id: trunk - uri: https://github.com/trunk-io/plugins - ref: v1.2.6 - - id: my-plugins - local: ../my-plugins -``` - -When a user runs `trunk` in the sourcing repository, they will already have `ruff` enabled, along with the `trufflehog` override from the `my-plugins` repository. - -Note that private GitHub plugin repositories are not currently supported. - -### Excluded fields - -Plugin `sources`, as well as the `cli` `version`, are not merged from plugin repositories to ensure\ -that config merging occurs in a predictable, stable fashion. diff --git a/code-quality/overview/getting-started/configuration/runtimes.mdx b/code-quality/overview/getting-started/configuration/runtimes.mdx deleted file mode 100644 index b277e7d..0000000 --- a/code-quality/overview/getting-started/configuration/runtimes.mdx +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: "Runtimes" ---- -Trunk manages the hermetic installation of all required runtimes. You can also specifically pin a version of a runtime you'd like Trunk to use, or tell Trunk to reuse an already-installed runtime on the system. - -Trunk makes it easy for you to run tools (such as linters and actions) because, under the hood, Trunk actually downloads everything a given tool depends on, and then executes said tool in the context of its dependencies. In other words, you can run tools like `golangci-lint` and `rubocop` without wasting hours figuring out how to install the right Go and Ruby versions on your machine, because Trunk will install a `go` and `ruby` runtime for those tools to depend on. - -Importantly, just like how Trunk by design requires you to version your tools, i.e. specify which version of `golangci-lint` and `rubocop` is enabled in your repository at a given commit, Trunk also versions your runtimes. This means that you can stop asking questions like "Wait, which version of Go are you using?" and "How do I choose a Ruby version to install on this new Jenkins runner?"; instead, all you have to do is look at the `runtimes` section in your `.trunk/trunk.yaml`, and you know which version of which runtime Trunk will use for a tool at any given moment: - -``` -runtimes: - enabled: - - go@1.18.3 - - node@16.14.2 - - python@3.10.3 - - ruby@3.1.0 -``` - -## How does this work? - -Runtimes are defined by a combination of configuration and native code inside Trunk itself. Let's walk through an example, `prettier`: - -```yaml -lint: - definitions: - - name: prettier - runtime: node - package: prettier - commands: - - run: prettier -w ${target} - ... -``` - -Since Prettier uses the `node` runtime, let's also look at that definition; specifically, the `runtime_environment` and `linter_environment`: - -```yaml -runtimes: - definitions: - - type: node - linter_environment: - - name: PATH - list: - - ${linter}/node_modules/.bin - runtime_environment: - - name: HOME - value: ${home} - - name: PATH - list: - - ${runtime}/bin -``` - -Now we have all the config fields we need to understand what Trunk does in this example. - -### Installing `prettier` - -Before Trunk can run `prettier`, it needs to install `prettier`; this is done using the package manager associated with a given runtime, the mechanism for which is defined natively inside Trunk (i.e. Trunk has custom code for every runtime to manage how packages for said runtime are installed). - -For most runtimes, this is as simple as executing the runtime's package manager in the context of the `runtime_environment`; in this example, that means doing `npm install ${package}` with environment variables `HOME=${home}` and `PATH=${runtime}/bin`. - -### Running `prettier` - -Once `prettier` is installed, we combine its runtime's `linter_environment` with any other environment variables that might be defined in a given `lint.definitions` entry (in this case there are none), and then use that as the environment when we execute the command for a given linter. - -## Specifying a runtime version - -If you would like to use the system-installed runtime instead of the Trunk managed version you can always use the `runtimes.definitions.system_version` property in your `trunk.yaml` file. - -```yaml -runtimes: - enabled: - - go@x.y.z - -# or -runtimes: - enabled: - - go@>=x.y.z - definitions: - - type: go - system_version: allowed -``` - -If you choose to use a system-managed version, you will also need to specify a runtime version constraint in your enabled section, e.g. `python@>=3.0.0`. diff --git a/code-quality/overview/getting-started/configuration/telemetry.mdx b/code-quality/overview/getting-started/configuration/telemetry.mdx deleted file mode 100644 index 1397878..0000000 --- a/code-quality/overview/getting-started/configuration/telemetry.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: "Telemetry" ---- -Trunk sends basic usage metrics from our local tools ([CLI](/broken/pages/OJc6wVrAfc2SLQZlJ2m1) & [VS Code Extension](../../ide-integration/vscode)) to our analytics system to help us understand our usage and improve our tools over time. We do not send your code or codebase to our backend. - -## Why we collect usage data - -Our product team constantly works on feature enhancement and new areas to invest in. Usage data allows us best to understand the ergonomics and performance of our tools. For example, if we add a new subcommand to the command line interface - how often is it used? Additionally, usage data is gathered to track usage and compliance against our free and paid product offerings. - -To give concrete examples: we track our users' client version and operating system to understand backward compatibility requirements, and the time it takes our user base to upgrade to our latest releases. - -## Example usage data - -```json -{ - "anonymous_id": , - "command": "check --all", - "launcher_version": "1.2.3", - "os": "macOS", - "release": 1.4.1, - "source": "client", - "time": , - "exit_code": 0, - "duration_ms": 232, - "repository": -} -``` - -## Can I disable usage data? - -Yes. You can disable usage telemetry by setting the following environment variable: - -```bash -TRUNK_TELEMETRY=off -``` diff --git a/code-quality/overview/getting-started/configuration/tools.mdx b/code-quality/overview/getting-started/configuration/tools.mdx deleted file mode 100644 index 3d0228c..0000000 --- a/code-quality/overview/getting-started/configuration/tools.mdx +++ /dev/null @@ -1,121 +0,0 @@ ---- -title: "Tools" ---- -Tool definitions - -Each tool definition shares a set of attributes: - -| Field | | -| --- | --- | -| `name` | The name of the tool. Must be unique. | -| `known_good_version` | The default version to initialize the tool at (required). | -| `shims` | A list of binaries exposed by the tool. Each of these will correspond to one identically named executable installed in `.trunk/tools.`In the most common case, there is exactly one shim matching the name of the tool. We'll discuss other cases below. | -| `environment` | You can specify an environment for the tool. We provide the `${tool}` template argument that resolves to the installation directory of the tool. By default, we prepend this to `$PATH` within the shim script, so this is used to locate the binary. For legacy reasons, `${linter}` also resolves to this directory. | - -> Note: If the tool has a `runtime` attribute, the runtime's environment is merged in to its environment (discussed in the examples below). - -Broadly speaking, there are 3 kinds of tools - download, package, and runtime-based tools. We'll look at each one in turn: - -#### Download-based tools - -Download-based tools are straightforward: They reference a named download configuration in the global `downloads` section. Here is an example: - -```yaml -downloads: - - name: gh - downloads: - - os: - linux: linux - cpu: - x86_64: amd64 - arm_64: arm64 - url: https://github.com/cli/cli/releases/download/v${version}/gh_${version}_${os}_${cpu}.tar.gz - strip_components: 1 - - os: - windows: windows - cpu: - x86_64: amd64 - arm_64: arm64 - url: https://github.com/cli/cli/releases/download/v${version}/gh_${version}_${os}_${cpu}.zip - strip_components: 1 - # macOS releases since 2.28.0 started using .zip instead of .tar.gz - - os: - macos: macOS - cpu: - x86_64: amd64 - arm_64: arm64 - url: https://github.com/cli/cli/releases/download/v${version}/gh_${version}_${os}_${cpu}.zip - strip_components: 1 - version: ">=2.28.0" - - os: - macos: macOS - cpu: - x86_64: amd64 - arm_64: arm64 - url: https://github.com/cli/cli/releases/download/v${version}/gh_${version}_${os}_${cpu}.tar.gz - strip_components: 1 -tools: - definitions: - - name: gh - download: gh - known_good_version: 2.27.0 - environment: - - name: PATH - list: ["${tool}/bin"] - shims: [gh] -``` - -Note that for the downloaded archive, the binary named `gh` is inside the `bin` directory, so we use the environment to point the `$PATH` there. - -#### Download fields - -`strip_components`: This number of leading directory components to remove from all files in an archive when extracting. - -`rename_single_file`: If an archive contains a single file, this will cause that file to be renamed to the name of the tool. This is most useful for downloads of gzip'd binaries with the platform name in the binary. - -#### Package-based tools - -Package-based tools depend on specified `package` and `runtime` attributes. Here is an example of configuring `mypy` as a tool: - -```yaml -tools: - definitions: - - name: mypy - runtime: python - package: mypy - shims: [mypy] - known_good_version: 0.931 - extra_packages: - - types-six@1.16.21 - - types-request -``` - -`extra_packages` behaves equivalently to a package file like `requirements.txt` for Python or `package.json` for Node. They can be optionally pinned at versions. - -The version of the primary package (in this case, `mypy`) is specified in the `tools.enabled`. So to enable the `mypy` tool at `1.4.0`, list it as `- mypy@1.4.0`. - -If you don't want to include additional packages in the tool definition, you can instead make them explicit in the enabled section of your `.trunk/trunk.yaml` as you would for [linters](../../linters/), for example: - -```yaml -tools: - enabled: - - mypy@1.4.0: - packages: - - types-six@1.16.21 -``` - -#### Runtime-based tools - -Runtime-based tools are a special case that are not explicitly defined. Rather, each runtime object exposes a set of `shims` (just like `tool` definitions). - -If the runtime is enabled and listed in `tools.runtimes`, then shims exposed by that runtime are automatically installed in the `.trunk/tools` directory alongside those of other tools (`trunk tools enable ` does that for you). Thus you can run `python`, `pip`, etc as `trunk`-managed tools. - -Example: - -```yaml -tools: - runtimes: - - python -``` - -If this is disruptive to your workflow, simply remove the runtime's name `(go, node, python,...)` from `tools.runtimes` section or run `trunk tools disable ` which will handle it for you. Runtimes cannot be enabled or versioned via the `tools.enabled` section, however, and runtimes must be enabled in the `runtimes` section to be available to have their shims installed. diff --git a/code-quality/overview/getting-started/index.mdx b/code-quality/overview/getting-started/index.mdx deleted file mode 100644 index 041487d..0000000 --- a/code-quality/overview/getting-started/index.mdx +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "Code Quality CLI" ---- -Trunk provides command-line tools for different products. Choose your product below: - -* [Trunk Launcher Install](./install): Trunk uses a launcher to automatically install the appropriate CLI for your platform -* [Trunk Code Quality CLI](./commands-reference/): commands reference -* [Trunk Code Quality CLI Configuration](./configuration/): the Trunk CLI has its top-level config defined in `.trunk/trunk.yaml` -* [Trunk Tools CLI](./tools): manage tools used by your repo -* [Trunk Actions](./actions/): local workflow automation and githooks manager - -## Initializing Trunk in a git repo is as simple as running: - -```bash -trunk init -``` - -This will scan your repository and create a `.trunk/trunk.yaml` file which enables all the linters, formatters, and security analyzers that [Trunk C](./code-quality)[ode Quality ](./code-quality)recommends. - - -Security-conscious users may want to also record the signature of the CLI, which the [Trunk Launcher](./install#the-trunk-launcher) will use to verify the CLI's provenance: - -``` -trunk init --lock -``` - - -### Tweak the configuration - -Trunk is completely controlled through the `trunk.yaml` file. If for example you are not using the `check` tool you can safely remove the `lint` section from the file. - -[Learn more about CLI configuration](./configuration/) - -### Single-player mode - -If you want to run `trunk` inside your repository but are not ready to roll it out team-wide, you can run `trunk` in what we call single-player mode. - -When in single-player mode, the `.trunk` directory will be listed in `.git/info/exclude`, which will cause git to ignore its contents. When trunk is automatically initialized by the VSCode extension, you will be started in this mode. You can also initialize this way explicitly with the `trunk init --single-player-mode` command. If at any time you wish to toggle single-player mode on or off, it can be done with the following two commands: - -```bash -# Turn single-player mode on. -trunk config hide -``` - -```bash -# Turn single-player mode off. -trunk config share -``` - -### Only enabling detected tools - -`trunk init` supports the flags `--only-detected-formatters` and `--only-detected-linters`. Each of these flags limits `trunk init` to only enable tools that we detect you are already using. - -We provide support for running `trunk` in GitHub Codespaces. - -[GitHub Codespaces](https://github.com/features/codespaces) are fully configured virtual containers for developing your GitHub repositories. diff --git a/code-quality/overview/getting-started/install.mdx b/code-quality/overview/getting-started/install.mdx deleted file mode 100644 index 9b42e71..0000000 --- a/code-quality/overview/getting-started/install.mdx +++ /dev/null @@ -1,138 +0,0 @@ ---- -title: "Install" ---- -### The Trunk launcher - -Trunk uses a launcher to automatically install the appropriate CLI for your platform. The launcher is a bash script that downloads the appropriate Trunk CLI version and runs it. The launcher invisibly runs the Trunk CLI version specified in a project's `.trunk/trunk.yaml` file. The actual Trunk CLI is a single binary that is cached locally in `~/.cache/trunk` and is updated automatically. - -### Install the launcher - -The Trunk CLI can be installed in many different ways, depending on your use case. - -#### Using NPM - -If your project uses a `package.json`, you can specify the Trunk Launcher as a dependency so your developers can start using Trunk after installing Node dependencies. - - -```sh npm -npm install -D @trunkio/launcher -``` -```sh pnpm -pnpm add -D @trunkio/launcher -``` -```sh yarn -yarn add -D @trunkio/launcher -``` -```sh bun -bun install -D @trunkio/launcher -``` - - -Then add Trunk Launcher in your `package.json` as a script: - -```json -{ - "scripts": { - "trunk": "trunk", - "lint": "trunk check", - "fmt": "trunk fmt" - } -} -``` - -#### Using cURL - -You can install the Trunk Launcher script directly by downloading it through cURL. The launcher script supports both macOS and Linux environments. - -To allow your teammates to use `trunk` without installing anything, the launcher can be committed directly into your repo: - -``` -curl -LO https://trunk.io/releases/trunk -chmod +x trunk -git commit ./trunk -m "Commit Trunk to our repo" -``` - -When the launcher is called for the first time by your teammates, the Trunk Launcher will download, manage, and run the appropriate binary for the environment. - -#### Using Homebrew - -You can run the following command if you prefer to install this tool via homebrew. Keep in mind that other developers on your team will also have to install manually. - -```bash -brew install trunk-io -``` - -#### Using Windows - -From **`git-bash` or `msys2`**, download the Bash launcher and add it to your `PATH`: - -```bash -curl https://get.trunk.io -fsSL | bash -``` - -From **`powershell`**, download the powershell launcher: - -``` -Invoke-RestMethod -Uri https://trunk.io/releases/trunk.ps1 -OutFile trunk.ps1 -``` - -Ensure you can execute powershell scripts: - -``` -Set-ExecutionPolicy Bypass -Scope CurrentUser -``` - -You can then execute trunk as `.\trunk.ps1`. - -#### Compatibility - -Trunk only supports Windows with the following versions and above: - -| Tool | Where to Modify | Minimum Required Version | -|---|---|---| -| CLI | `cli` `version` in `.trunk/trunk.yaml` | `1.13.0` | -| Plugins | `ref` for the `trunk` plugin in `.trunk/trunk.yaml` | `v1.0.0` | -| VSCode | Reload VSCode to update | `3.4.4` | - -You will also need to install [C and C++ runtime libraries](https://aka.ms/vs/17/release/vc_redist.x64.exe) in order to run some linters. - -### Uninstall instructions - -#### From your system - -Trunk has a very minimal installation, and therefore, there's not much to uninstall. The two system paths we use are: - -* `/usr/local/bin/trunk`: the [Trunk Launcher](./install#the-trunk-launcher) -* `~/.cache/trunk`: cached versions of the trunk cli, linters, formatters, etc. - -You can delete those two paths to uninstall. - -#### From a repo - -To cleanly remove Trunk from a particular repo, run: - -```bash -trunk deinit -``` - -#### VS Code extension - -To uninstall the Trunk VS Code extension, do so as you would any extension ([docs](https://code.visualstudio.com/docs/editor/extension-marketplace)). Then reload VS Code. - -### Binary download (not recommended) - -You can directly download the `trunk` binary. _We don't recommend this mode of operation because your ability to version the tool through_ `trunk.yaml` _will not function when launching_ `trunk` _directly from a downloaded binary._ Regardless you can bypass the launcher support by downloading the prebuilt binaries here: - -| variable | options | -|---|---| -| version | the semver of the binary you want to download | -| platform | 'darwin`, 'linux' | - -```bash -# for example https://trunk.io/releases/1.0.0/trunk-1.0.0-linux-x86_64.tar.gz -https://trunk.io/releases/${version}/trunk-${version}-${platform}-x86_64.tar.gz -``` - -### Pre-installing tools - -Trunk hermetically manages all the tools that it runs. To do this, it will download and install them into its cache folder only when they are needed. If you would like to ensure that all tools are installed ahead of time, then you can use the `trunk install` command. This may be useful if you want to prepare to work offline or if you would like to include the tools in a docker image. On Linux and macOS you may find the cache folder at `$HOME/.cache/trunk`. diff --git a/code-quality/overview/getting-started/tools.mdx b/code-quality/overview/getting-started/tools.mdx deleted file mode 100644 index c500aeb..0000000 --- a/code-quality/overview/getting-started/tools.mdx +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: "Tools" ---- -You can use the Trunk CLI to manage tools used by your repo. Trunk CLI can install the tools needed for a project according to what's configured in the `trunk.yaml` config file and let your teammates easily install the same versions of the tools. Trunk will also help you expose those installed tools by dynamically adding them to your `PATH` when you enter the project directory, but will not pollute your `PATH` outside of the project. - -### Command line - -| trunk tools \ | Description | -| -------------------------------- | ------------------------------------------------------------------------------ | -| `list` | list all available tools in the repository and whether they are enabled or not | -| `install` | install your enabled tools into `.trunk/tools` | -| `enable` `[@version]` | enable the provided tool, optionally at a specified version | -| `disable` `` | disable the provided tool | - -### Discovering tools - -The Trunk [plugins repo](https://github.com/trunk-io/plugins) ships with a collection of tools that can help supercharge your repository and provide examples for how to write your own. To see a list of tools that you can enable in your own repo run: - -```shell -trunk tools list -``` - - -![](/assets/image_(36).png) - - -### Configuring shell hooks - -Before running any tools managed by Trunk, enable shell hooks. With shell hooks, Trunk can manage your path variable dynamically, which lets you install tools used only in specific repos without polluting your shell by installing global tools. This is especially useful if you work on two repos using the same tool, but locked to different versions. - -You can enable shell hooks by running `trunk shellhooks install`, which will install the Trunk hooks to the config file of your $SHELL. You can also run `trunk shellhooks install ` to install a specific shell hook. - -Supported shells: - -* bash -* zsh -* tcsh -* fish -* elvish - -For organizations that want to require the use of the hooks, they can add to the config file: - -```yaml -# .trunk/trunk.yaml: -version: 0.1 -cli: - shell_hooks: - enforce: true -``` - -On the next Trunk command (like check or fmt), it will update your shell RC file to load our hooks. - -After reloading your shell, whenever you're inside your repo at the command line, you can just run shims installed by `trunk tools` directly by name. - -N.B. There is a known incompatibility with direnv when using PATH\_ADD. To use our hooks, remove PATH\_ADD from your .envrc and add them to your Trunk config as such: - -```yaml -version: 0.1 -cli: - shell_hooks: - path_add: - - "${workspace}/tools" -``` - -Paths can either be absolute, or relative to the workspace using the special `${workspace}` variable. - -### Running tools - -With shell hooks enabled, you can just run your tools by their name. For example, if you have run `trunk tools install grpcui` to install the GRPC UI tool, you can run it with: - -``` -grpcui -``` - -#### Running tools without shell hooks - -Trunk installs your enabled tools into the `.trunk/tools` directory. Each tool exposes a list of **shims** (these may or may not be identically named to the tool - most typically a tool has one shim matching the name of the tool). Each shim is installed into the `.trunk/tools` directory. - -You can run your tools by referring to the path `/.trunk/tools/` but this is unwieldy. We highly recommend using our shell hooks to manage your PATH. - -### Troubleshooting linters - -Tools enable you to run your linter binaries on the command line independent of `trunk check` and test and troubleshoot your integrations more easily. - -Tools are configured in the `tools` section of `trunk.yaml`. As with other settings, you can override these values in your [User YAML](./configuration/per-user-overrides). - -```yaml -tools: - auto_sync: false # whether shims should be hot-reloaded off config changes. - enabled: - - bazel@6.0.0 - - mypy@1.4.1 - - ibazel@0.22.0 - - helm@3.9.4 - - eksctl@0.74.0 - - asciinema@2.1.0 - disabled: - - gt - definitions: - - name: gh - download: gh - known_good_version: 2.27.0 - environment: - - name: PATH - list: ["${tool}/bin"] - shims: [gh] -``` - -Like with actions and linters, we have a (versioned) `enabled` section and a `disabled` section, which can be manipulated using `trunk tools enable/disable`. There is also a list of `definitions`, which are merged across your `trunk.yaml`, `user.yaml`, as well as any plugins that you use. - -`auto_sync` controls whether or not Trunk automatically installs your tools for you when your config changes. This defaults to `true`. Note that the daemon must be running with the monitor in order for this to function properly. diff --git a/code-quality/overview/ide-integration/github-codespaces.mdx b/code-quality/overview/ide-integration/github-codespaces.mdx deleted file mode 100644 index c2c0704..0000000 --- a/code-quality/overview/ide-integration/github-codespaces.mdx +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "GitHub Codespaces" ---- -We provide support for running `trunk` in GitHub Codespaces. - -[GitHub Codespaces](https://github.com/features/codespaces) are fully configured virtual containers for developing your GitHub repositories. - -## Installing the Trunk feature - -You can install the Trunk Launcher in your codespace by including the following line in your `devcontainer.json` file under `features`: - -```json - "features": { - "ghcr.io/trunk-io/devcontainer-feature/trunk": "latest", - }, -``` - -The feature is defined [here](https://www.github.com/trunk-io/devcontainer-feature). - -To have the launcher binary install the CLI tool and associated linters, you can add `trunk install` to `updateContentCommand` in `devcontainer.json`: - -```json -"updateContentCommand": "trunk install", -``` - -Read the [GitHub docs](https://docs.github.com/en/codespaces/prebuilding-your-codespaces/configuring-prebuilds#configuring-time-consuming-tasks-to-be-included-in-the-prebuild) to learn more about `updateContentCommand` . - -Note: You should only add `trunk install` if you have a Trunk-configured repository. - -You can then [configure pre-builds](https://docs.github.com/en/codespaces/prebuilding-your-codespaces/configuring-prebuilds) to run from GitHub workflows, ensuring the `trunk` CLI and needed linters are available and ready to go when you need to boot up your codespace. - -## Installing the Trunk extension - -If you are using the Trunk feature, we will automatically install the Trunk extension on your behalf. - -Note: We highly recommend turning off auto-save in your VSCode settings in your codespace (or set autosave to a longer timeout). Saving files triggers the extension to re-lint, which can quickly overload the extension for anything but the fastest linters. The auto-save setting is detailed [here](https://code.visualstudio.com/docs/editor/codebasics#_save-auto-save). - -Otherwise, You can add `trunk` to your list of extensions in `devcontainer.json` - - -```json - "customizations": { - "vscode": { - "extensions": [..., "trunk.io"] - } - }, -``` - -Then you're all set to run `trunk` in your Codespace! diff --git a/code-quality/overview/ide-integration/index.mdx b/code-quality/overview/ide-integration/index.mdx deleted file mode 100644 index 7731873..0000000 --- a/code-quality/overview/ide-integration/index.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "IDE integrations" ---- -Code Quality helps you shorten the feedback loop by integrating with your favorite IDEs and code editors. - -### How it works - -Code Quality runs a daemon that looks for files that change in real time and lints the changes using the same tools and configuration as running `trunk check`. With LSP support, you will get instant feedback on your code changes as you write. - -### Supported IDEs - - - - - - diff --git a/code-quality/overview/ide-integration/neovim.mdx b/code-quality/overview/ide-integration/neovim.mdx deleted file mode 100644 index f0a1c6d..0000000 --- a/code-quality/overview/ide-integration/neovim.mdx +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: "Neovim" ---- - -📘 The Trunk Code Quality Neovim Plugin is available for beta! - -Try it out by following the instructions below. - - -### Prerequisites - -The Neovim Plugin needs the following prerequisites: - -| Tool | Minimum Required Version | -| ------ | ------------------------ | -| CLI | 1.17.0 | -| Neovim | v0.9.2 | - -### Get started - -Using the [lazy.nvim](https://github.com/folke/lazy.nvim#readme) plugin manager: - -```lua -require("lazy").setup({ - { - "trunk-io/neovim-trunk", - lazy = false, - -- optionally pin a version - tag = "v0.1.3", - -- these are optional config arguments (defaults shown) - config = { - -- trunkPath = "trunk", - -- lspArgs = {}, - -- formatOnSave = true, - -- formatOnSaveTimeout = 10, -- seconds - -- logLevel = "info" - }, - main = "trunk", - dependencies = {"nvim-telescope/telescope.nvim", "nvim-lua/plenary.nvim"} - } -}) -``` - -For other plugin managers and installation methods, see our [Neovim Plugin repo](https://github.com/trunk-io/neovim-trunk#installation). - -### Features - -The Neovim Plugin is designed to mirror the [VSCode extension](./vscode). Supported features include: - -* Provide inline diagnostics and auto-fixes -* Format files on save -* Run [Trunk Actions](../getting-started/actions/) notifications -* Display the linters that Trunk runs on a file - -### Limitations - -The Trunk Code Quality Neovim Plugin is in beta with limited support. If you encounter any issues, feel free to reach out on [Slack](https://slack.trunk.io). - -For other notes and configuration, see the [Neovim Plugins repo](https://github.com/trunk-io/neovim-trunk#trunk-check-neovim-plugin). diff --git a/code-quality/overview/ide-integration/openai-codex-support.mdx b/code-quality/overview/ide-integration/openai-codex-support.mdx deleted file mode 100644 index 1fdfba6..0000000 --- a/code-quality/overview/ide-integration/openai-codex-support.mdx +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: "OpenAI Codex Support" -description: "Trunk Code Quality for OpenAI Codex" ---- -This document provides guidance for integrating Trunk Code Quality into OpenAI Codex environments. - -### Requirements - -Ensure you’re running the following minimum versions in your `.trunk/trunk.yaml` file: - -* Trunk CLI: v1.24.0 or later -* Trunk Plugins: v1.7.0 or later - -### Installation - -In your Codex environment setup script, include: - -``` -# Install Trunk CLI and dependent tools -curl https://get.trunk.io -fsSL | bash -trunk install -``` - -It's important to pre-install all trunk dependencies during the setup because codex environments are network-isolated post-setup. - -#### Debugging installation - -If the environment setup is slow, run the following to diagnose: - -``` -trunk install --debug -``` - -This command will detail installation timings and potential bottlenecks. - -### Handling network isolation - -Codex environments are network-isolated post-setup. Linters requiring network access must be excluded from running explicitly: - -Example: - -``` -trunk check --filter=-trufflehog,-semgrep -``` - -### Teaching Codex how to use Trunk - -Codex can automatically run trunk commands for you, by informing it to do so in your AGENTS.md file: - -``` -## AGENTS Instructions - -### Formatting and Linting -- Run `trunk check -y --filter=-trufflehog,-semgrep` after modifying code to format and fix linting issues. -- Review and verify changes before committing. -- If only formatting is required, run `trunk fmt`. -- Exclude linters requiring network access by adding them to the negative filter list as shown above. -``` diff --git a/code-quality/overview/ide-integration/vscode.mdx b/code-quality/overview/ide-integration/vscode.mdx deleted file mode 100644 index 04647b1..0000000 --- a/code-quality/overview/ide-integration/vscode.mdx +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: "VSCode" ---- -Trunk Code Quality is available as a [VSCode extension](https://marketplace.visualstudio.com/items?itemName=trunk.io) that you can use to streamline your linting and formatting experience. - -### Get started - -By default, Trunk will try to automatically initialize itself in single-player mode. This means that it'll create a Trunk configuration that is hidden from git, which allows you to try it out [without Trunk's versioning powers](./vscode#single-player-mode). - -If Trunk has not initialized itself in single-player mode, then you will need to initialize it manually, either by pressing the 'Initialize Trunk' button in the Trunk side panel: - - -![initialize trunk](https://static.trunk.io/assets/vscode_init_trunk.png) - - -### Features - -#### Discovery - -Trunk will suggest tools that will supercharge your development, from `actionlint`, for your GitHub Actions, to`sql-formatter` and `sqlfluff` for your SQL, to`yamllint`, for your YAML files. - -We believe that everything in your repository not only can be, but should be, automatically formatted and linted. We recognize that part of this is making it easy for developers to discover tools that apply to their codebases. When Trunk is initialized, we turn on as many additional tools as we can, and periodically follow up with additional suggestions. - -#### Seamless user experience - -On the sidebar to the left, you'll see the Trunk icon which you can use to open the side panel to view issues. By default, issues are populated for every file you open as well as any modified files. - - -![side panel](https://static.trunk.io/assets/vscode_side_panel.png) - - -Trunk also shows Trunk Code Quality Issues in a panel in the File Explorer, but you can hide it if you wish: - - -![hide explorer panel](https://static.trunk.io/assets/vscode_hide_explorer_panel.jpg) - - -#### Single-player mode - -In single-player mode, Trunk creates a [configuration file](../linters/configure-linters) and hides it from Git, so that you can test out Trunk on your own and get familiar with how it works, without committing this file. - -Users normally check this file into your repository so that you can run Trunk reproducibly. It pins the version of trunk, as well as that of every runtime and linter that you've enabled, allowing your team to guarantee that everyone and your CI runners are always running the same checks on your code. - -To check it into your repository, all you have to do is run - -```bash -trunk config share -``` - -or click on the notification to "Share trunk config", which will commit `.trunk/trunk.yaml`, the Trunk configuration file. - -### Trunk as default formatter - -You can use Trunk as your default formatter in VSCode if you have Trunk configured for the project. - -You can set `trunk.io` as the default formatter for just one language as in the example, or as a default for all languages. - -In your `settings.json` like this: - -```json -"[markdown]": { - "editor.defaultFormatter": "trunk.io" -} -``` - -For manual formatting, open the command palette and use `Format Document With...` and select `Trunk` there. - -### Learn more - -Check out how to [install the CLI](../setup-and-installation/), [set it up in CI](../initialize-trunk), [ignore issues](../linters/ignoring-issues-and-files), and set up [Custom Linters](../linters/custom-linters). - - -![linter code docs](https://static.trunk.io/assets/vscode_doc_links.png) - - - -![trunk-ignore](https://static.trunk.io/assets/vscode_ignore_issue.gif) - - -### Configuration - -* `trunk.inlineDecorators` – allows you to disable inline decorators for diagnostics. -* `trunk.inlineDecoratorsForAllExtensions` – allows you to only render inline decorators for diagnostics that were generated by Trunk. - -### Debugging - -If you look at the "Window" output for the extension, you may find useful error logs. - -### Feature requests and bug reports - -Looking for another feature? Hit a bug? 🐛 [Let us know!](https://slack.trunk.io/) diff --git a/code-quality/overview/index.mdx b/code-quality/overview/index.mdx deleted file mode 100644 index 59995d3..0000000 --- a/code-quality/overview/index.mdx +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: "Overview" -noRobotsIndex: true ---- -Trunk Code Quality is a metalinter and static analysis manager designed to unify linting, formatting, and security scanning across polyglot repositories. It consolidates tool management, runtime isolation, and execution logic into a single CLI and daemon. - -### Architecture - -Trunk consists of a C++ CLI that orchestrates the download, installation, and execution of third-party static analysis tools. - -#### Hermetic Tool Management - -Trunk manages tools and their runtimes hermetically. Instead of relying on the host system’s environment (e.g., `/usr/bin/python` or global `npm` packages), Trunk downloads and caches specific versions of runtimes required by the linters. - -* Isolation: A project requiring Python 3.10 for a specific linter will not conflict with a system installed Python 3.7. -* Consistency: All engineers and CI runners execute the exact same version of the linter and its runtime dependencies. -* Scope: Covers primary languages, Infrastructure as Code (IaC), build scripts, CI configurations (YAML), and documentation. - -#### The Trunk Daemon - -The CLI (`trunk check`) launches a background daemon. This process: - -1. Monitors file system events. -2. Triggers jobs to precompute linting results in the background. -3. Caches results to speed up subsequent checks. -4. Serves real-time annotations to IDE extensions (VSCode, Neovim). - -Users can override background execution behavior by modifying the `run_when` configuration for specific tools if they are too compute-intensive. - -### Execution Model - -#### Git-Aware Scanning - -Trunk optimizes execution by checking only modified files or lines. It relies on git diffs to determine the scope of analysis, preventing full-repo scans during standard development workflows. - -### Hold-the-line - -**Hold The Line** (HTL) is the principle that Trunk Code Quality will _only run on new changes_ in your codebase rather than every file in the whole repo. This allows you to use Check to improve your codebase **incrementally** rather than having to address all of the issues at once. HTL also runs checks much faster than scanning the entire codebase would. - -_Hold The Line_ **works at the line level** of your source code. For example, if a single line has multiple pre-existing issues and a new linter is added, which reports the new issue, then Trunk Code Quality will report just the new issue and not the previous ones. - -By default, Trunk runs in hold-the-line mode: - -``` -trunk check foo.file -``` - -You can still run on all files. - -``` -trunk check --all -``` - -_**Hold the Line**_ is built into Trunk Code Quality itself. This means existing linters that do not support line-by-line functionality will still work with _Hold the Line_. Even [custom linters](./linters/custom-linters) you write yourself. - -### Daemon - -The Trunk CLI, specifically `trunk check`, runs a daemon that monitors relevant file changes and triggers jobs to precompute in the background while you work. The daemon is used both to support real-time background checking in supported extensions such as [VSCode](./ide-integration/vscode) and [Neovim](./ide-integration/neovim), and to precompute check results for faster commits/pushes. - -Some native linters are more compute/memory intensive and `check` allows you to disable background linting for those tools. By default, linters run whenever a file is modified in the background. You can override this behavior by editing the [`run_when`](./getting-started/configuration/lint/commands#run_when) configuration for a tool. - -### Hermetic tools and runtime management - -Trunk hermetically installs the static analysis tools you run and their required runtimes. This means these tools are installed and managed by the Trunk CLI, and are unaffected by your systems environment. - -If a tool requires `python 3.10` but the projects you're working on require `python 3.7`, Trunk will manage that tool and its `python 3.10` runtime automatically and not affect the `python 3.7` environment. This means Trunk will not modify or pollute your machine. - -Trunk manages the hermetic installation of all required runtimes. You can also specifically pin a version of a runtime you'd like Trunk to use, or tell Trunk to reuse an already-installed runtime on the system. - -### Plugin system - -Trunk is fully extensible and configurable through the [Trunk Plugins Repo](https://github.com/trunk-io/plugins/). When installing a plugin through Trunk, the definition of a plugin's behavior, including install, run, and report instructions, is defined in the Plugins Repo. - -This can be overridden by defining your own plugin repo to import, overriding individual linter definitions locally, and even writing your own custom linters. - -[Learn more about the plugin system.](./getting-started/configuration/plugins/) - -### Run on every pull request - -Trunk works in CI. Trunk Code Quality provides [GitHub integration](./setup-and-installation/github-integration) and can run in any other CI environment. This lets you check Code Quality in every PR with consistent config and consistent results. - -[Learn more about Code Quality in CI.](./prevent-new-issues/) - -### Setup and installation - -Trunk Code Quality is easy to adopt for new and legacy projects alike. You can run Trunk Code Quality using your existing linter configurations, incrementally address existing problems, and prevent new issues from being committed to your repo. - - - - Initialize Trunk in your repo to generate Trunk config files and get linter recommendations based on your project's files. - - - Check for existing issues in your project. You can address problems up front, use hold-the-line to fix them incrementally, and configure ignores for irrelevant issues. - - - Set up automated runs on commits, before pushes, and on PRs to prevent new issues from appearing in your repo. - - diff --git a/code-quality/overview/initialize-trunk.mdx b/code-quality/overview/initialize-trunk.mdx deleted file mode 100644 index 422f2c8..0000000 --- a/code-quality/overview/initialize-trunk.mdx +++ /dev/null @@ -1,196 +0,0 @@ ---- -title: "Initialize Trunk" ---- -Before you can start using Trunk Code Quality, you need to install and initialize Trunk in your repo. This page covers the initialization process. - -### Install the CLI - -The Trunk CLI can be installed in many different ways depending on your use case. - - -We recommend installing the CLI via **NPM** if you’re already using NPM, or using **cURL** and **committing the launcher to Git** for all other projects. Both methods allow your teammates to use Trunk without needing an additional install step. - - -#### The Trunk Launcher - -The easiest way to give everyone access to Trunk is to use the Trunk launcher. The Trunk launcher is a small script that will automatically install and run Trunk when invoked for the first time, similar to other command line tools like the [Gradle Wrapper](https://docs.gradle.org/current/userguide/gradle_wrapper.html). - -You can install the [Trunk Launcher](./getting-started/install#the-trunk-launcher) script directly by downloading it through cURL. The launcher script supports both macOS and Linux environments. - - -```bash bash -curl https://get.trunk.io -fsSL | bash -``` -```bash bash (no prompts) -curl https://get.trunk.io -fsSL | bash -s -- -y -``` - - -To allow your teammates to use `trunk` without installing anything, the launcher can be committed directly into your repo: - -``` -curl -fsSLO --retry 3 https://trunk.io/releases/trunk -chmod +x ./trunk -git commit ./trunk -m "Commit Trunk to our repo" -``` - -#### Other ways to install - - - -If your project uses a `package.json`, you can specify the Trunk Launcher as a dependency so your developers can start using Trunk after installing Node dependencies. - -```sh -# npm -npm install -D @trunkio/launcher -# pnpm -pnpm add -D @trunkio/launcher -# yarn -yarn add -D @trunkio/launcher -# bun -bun install -D @trunkio/launcher -``` - -Then add Trunk Launcher in your `package.json` as a script: - -```json -{ - "scripts": { - "trunk": "trunk", - "lint": "trunk check", - "fmt": "trunk fmt" - } -} -``` - - - -You can run the following command if you prefer to install this tool via [homebrew](https://brew.sh/). Keep in mind that other developers on your team will also have to install manually. - -```bash -brew install trunk-io -``` - - - -From **`git-bash` or `msys2`**, download the Bash launcher and add it to your `PATH`: - -```bash -curl https://get.trunk.io -fsSL | bash -``` - -From **`powershell`**, download the powershell launcher: - -``` -Invoke-RestMethod -Uri https://trunk.io/releases/trunk.ps1 -OutFile trunk.ps1 -``` - -Ensure you can execute powershell scripts: - -``` -Set-ExecutionPolicy Bypass -Scope CurrentUser -``` - -You can then execute trunk as `.\trunk.ps1`. - -**Compatibility** - -Only some versions of Trunk are compatible with Windows. See the compatibility page for [Windows](./getting-started/compatibility) to learn more. - -You will also need to install [C and C++ runtime libraries](https://aka.ms/vs/17/release/vc_redist.x64.exe) in order to run some linters. - - - -### Initializing Trunk - -Before you can use Trunk, you need to initialize Trunk in your repo. Initializing Trunk will generate the necessary config files, recommend linters based on your project files, and configure githooks. - -Initialize Trunk by running the `init` command. - -```bash -./trunk init -``` - -Follow the wizard. You'll be prompted with the following options: - -1. `Sign up or log in`: Connect the CLI with your Trunk account to enable all of Trunk's features. -2. Trunk will automatically [enable the most useful linters](#recommended-linters) based on the files in your repo. -3. `Trunk will manage your git hooks and enable some built-in hooks.`: This sets up Trunk to run automatically on commit and before you push, saving you time waiting for CI only to have it fail. -4. `Trunk will now run a local, one-time scan of your code and report any issues it finds`: This initial scan will give you a good overview of the problem areas in your code. Subsequent scans will only run on changed lines using hold-the-line. - - -**Trunk is Git aware** - -Trunk speeds up your linting process by running on only the files that have changed in your branch compared to upstream. This means if you're using a base/trunk branch that's not `master` or `main`, you will need to specify it in your `.trunk/trunk.yaml` - -```yaml -version: 0.1 -cli: - version: 1.22.2 -repo: - # develop is the branch that everyone's work is merged into - trunk_branch: develop -... rest of configs -``` - - -### Run Linters - -After initialization, you can run the [recommended set of linters](./initialize-trunk#recommended-linters) by running: - -``` -./trunk check -``` - -:tada: And just like that, you're ready to start using Trunk Code Quality. - -### The .trunk Directory - -After initialization, a new folder `.trunk` will be generated with the following content. - -``` -.trunk -├── actions/ -├── configs/ # This is where linter configs live -├── logs/ # Logs for debugging -├── notifications/ -├── out/ -├── plugins/ -├── tools/ -└── trunk.yaml # Top-level Trunk config -``` - -You will spend most of your time configuring Trunk Code Quality's linter definitions `trunk.yaml` and individual linter configurations in `configs`. - -### Recommended Linters - -During initialization, Trunk Code Quality will recommend some linters based on files found in your project. Trunk Code Quality will recommend common linters for your language, but the [full list of supported linters can be found here](./linters/supported/). - -You can enable and disable individual linters by running: - -```bash -trunk check enable -trunk check disable -``` - -You can also see all linters and whether they're enabled by running: - -```bash -trunk check list -``` - -### IDE Integration - -Trunk Code Quality supports [VSCode](./ide-integration/vscode) and [Neovim](./ide-integration/neovim) through extensions. Using VSCode and Neovim will provide inline linter annotations as you code. - -### Move Existing Configs - -If you have existing linter configs in your repo, you can move them into the `.trunk/configs` folder. These config files will be symlinked in during any `trunk check` run. - - -If you're using an IDE Extension like `clangd` with an LSP that relies on those configs being in the root, you must create an additional symlink from the hidden config to the workspace root. - - -### Next Steps - -After initializing Trunk Code Quality, you can check for issues and configure Code Quality. The [next steps](./deal-with-existing-issues) in Setup & Installation will walk you through this process. diff --git a/code-quality/overview/licensing.mdx b/code-quality/overview/licensing.mdx deleted file mode 100644 index 5fe8e64..0000000 --- a/code-quality/overview/licensing.mdx +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: "Licensing" ---- -### Introduction - -Trunk Code Quality is a powerful metalinter that simplifies linting, formatting, and static analysis across your entire codebase. By integrating over 100 supported tools like ESLint, Prettier, Ruff, and more, it enables you to manage code quality with unified configuration and consistent reporting. Trunk Code Quality helps you install tools hermetically, run them efficiently, and integrate seamlessly with pull requests and CI pipelines. - -### Licensing overview - -Trunk Code Quality is composed of a closed-source core complemented by open-source components that enhance extensibility and integration. Understanding the licensing terms for each part ensures compliance and optimal use. - -#### Closed-Source components - -* Trunk CLI: The core command-line tool is closed-source but free to use under specific conditions. -* VS Code Extension: Integrates Trunk Code Quality directly into your development environment. Under the hood, all code-checking by the VS Code extension is completed via the Trunk CLI, which drives the VS Code extension. - -#### Open-Source components - -* Plugin System and Configurations: An extensible plugin system that allows you to define, extend, and share linter configurations. These plugins are open-source under the MIT License, enabling you to modify them or create new ones to integrate additional tools or customize behavior. -* GitHub Action: Scripts that automate Trunk Code Quality checks in your GitHub workflows. GitHub Actions require the source code to be visible for transparency and security. Our GitHub Action is open-source under the MIT License, allowing you to review, modify, and ensure it meets your needs. - -By open-sourcing these components, we promote transparency, extensibility, and community collaboration. This approach encourages our community and customers to contribute to the ecosystem, enhancing Trunk Code Quality for everyone. - -#### Free usage - -You can use the Trunk CLI and access core functionalities for free under the following conditions: - -* Open-Source and Public Projects: Unlimited use in public repositories. -* Private Repositories - * Free for teams with up to 5 active non-bot committers. - * An active committer is a non-bot user who has committed in the last 30 days. - -#### Paid licensing - -For private repositories with over 5 active committers, a paid license is required to comply with Trunk Code Quality’s licensing agreement. While all features remain accessible, payment is necessary to meet licensing obligations and support the continued development of the product. - -Compliance and Support - -* Licensing Compliance: Payment ensures your use of Trunk Code Quality aligns with the licensing terms for larger teams. -* Dedicated Support: Paid customers receive prioritized support to help with integration, troubleshooting, and maximizing the benefits of Trunk Code Quality. - -#### How billing works - -Trunk Code Quality offers two billing options for paid licenses: - -**1. Team Plan - Monthly Self-Serve Billing** - -* Per-Seat Model: Billing is based on the monthly active committers in your private repositories. -* User Count Calculation: - * Counts non-bot users who have made commits in the last 30 days. - * Calculated at the end of each billing period to adjust the next invoice. -* Integration with GitHub App: Install the Trunk GitHub App to allow us to measure active monthly users. -* Billing Cycle: Month-to-month billing with invoices reflecting the latest user count. - -**2. Enterprise Plan - Annual Site License** - -* Fixed User Count: Based on the number of active committers at the beginning of the licensing term. -* Organization-Wide License: Provides a license for all users in the organization during the entire term without the need to purchase additional licenses for new employees. -* Simplified Billing: One annual payment covers all users for the year. -* Discount: Incentives available with annual plans for logo usage, case study, and/or scale. - -**Choosing the Right Option** - -* Team Plan: Appropriate for small teams that prefer flexibility and want to pay monthly with a credit card. -* Enterprise Plan: Best for organizations that prefer predictable costs, to avoid the administrative overhead of tracking monthly user counts, and wish to benefit from the discounted rate. - -### FAQs - -**What are the benefits of paying for Trunk Code Quality?** - -Paying for Trunk Code Quality offers several important benefits: - -* Licensing Compliance: For private repositories with 5 or more active committers, purchasing a license is required to comply with Trunk Code Quality’s licensing terms and continue using the product legally and effectively. -* Dedicated Support: Receive prioritized assistance to help integrate, troubleshoot, and maximize the product’s benefits in your production environment. -* Priority Feature Requests: Your requests for new features and plugin integrations receive high priority, allowing you to influence the product’s development to suit your needs better. -* Expert Consultation: Access advisory services from our team to optimize your code quality setup and linting processes. -* Onboarding Assistance: Receive support and best practices guidance during the integration of Trunk Code Quality into your workflows. - -Importantly, all features are available regardless of licensing status; you do not unlock additional features by purchasing a license. However, buying a license ensures compliance with the licensing terms, supports the continued development of Trunk Code Quality, and provides access to the dedicated support and benefits listed above. - -**Do you provide free Proofs of Concept (POCs)?** - -Yes, we are happy to provide 2–4 week free POCs for teams that want to evaluate our product's capabilities with their team and as part of their CI. We also provide dedicated support and guidance throughout the POC period. Email us to get started at: [sales@trunk.io](mailto:sales@trunk.io). - -**What happens if I exceed the free usage limits?** - -If you exceed the free tier limits (e.g., more than 5 active committers in a private repository), you must obtain a paid license to continue using Trunk Code Quality in compliance with the licensing agreement. - -**Is the Trunk CLI free to use?** - -The Trunk CLI is free for public repositories and private repositories with fewer than 5 active committers. For private repositories with 5 or more active committers, a paid license is required to comply with the licensing agreement. - -**Can I use Trunk Code Quality in CI/CD pipelines for free?** - -Yes, you can integrate the Trunk CLI into your CI/CD pipelines for free if you’re within the free usage limits (public repositories or private repositories with fewer than 5 active committers). Exceeding these limits requires a paid license. - -**Is support provided for free users?** - -Yes, free users can seek help through our community Slack channel at [slack.trunk.io](https://slack.trunk.io) or participate in discussions on our [GitHub page](https://github.com/orgs/trunk-io/discussions). Our community members and developer relations engineers regularly participate in discussions and answer questions. - -**Why are some components open-source while the core is closed-source?** - -* Core Functionality: The Trunk CLI provides the core functionality and is closed-source to protect proprietary technology and ensure a consistent, reliable experience. -* Open-Source Components: The plugin system and GitHub Action are open-source to promote transparency, security, and community-driven extensibility. This allows you to customize integrations and contribute to the development of plugins and workflows. - -**Do I need the Trunk CLI to use the open-source components?** - -Yes, the open-source components are designed to work with the Trunk CLI. They enhance and extend the functionality provided by the core tool but are not standalone applications. - -**How to Contribute to the Open-Source Components?** - -* Plugin Development: You can develop new plugins or improve existing ones by visiting our public GitHub repository at [github.com/trunk-io/plugins](https://github.com/trunk-io/plugins). -* GitHub Action: Modify or fork our GitHub Action to better suit your CI workflows. The source code is available at [github.com/trunk-io/trunk-action](https://github.com/trunk-io/trunk-action). -* Community Engagement: Join our community Slack channel at [slack.trunk.io](https://slack.trunk.io) to collaborate with other users and our development team. - -#### Contact us - -For licensing inquiries, to obtain a paid license, or to discuss which billing option is best for your organization, please contact [sales@trunk.io](mailto:sales@trunk.io). We’re here to help you ensure compliance and get the most out of Trunk Code Quality. diff --git a/code-quality/overview/linters/configure-linters.mdx b/code-quality/overview/linters/configure-linters.mdx deleted file mode 100644 index f1b5269..0000000 --- a/code-quality/overview/linters/configure-linters.mdx +++ /dev/null @@ -1,285 +0,0 @@ ---- -title: "Configure linters" ---- -Trunk Code Quality's linter integrations are fully configurable. This means that you can easily tune existing linters or leverage our caching and [hold-the-line](/broken/pages/U4nTQBazaodt2vJadyRw#hold-the-line) solution with your own custom linters. - -Here's an overview of the ways you can configure linters. - -### Config hierarchy - -Linters can be configured at different places: - -1. The source plugin repo usually `https://github.com/trunk-io/plugins`. -2. The repo-wide Trunk config file overrides the definitions in the plugin repos, `.trunk/trunk.yaml` -3. Local, per-user configuration in `.trunk/user.yaml` which is used for local overrides of `.trunk/trunk.yaml` and doesn't -4. Per linter configuration in linter config files such as `eslint.config.js` or `.prettierrc`. - -### Plugin system - -Trunk defines linter configuration in a plugin system. By default, it'll point to the [Trunk plugin repo on GitHub](https://github.com/trunk-io/plugins). You can check if other custom plugin sources are specified in your `trunk.yaml` file for [shared-configs.md](./shared-configs.md "mention"). - -```yaml -version: 0.1 -cli: - version: 1.22.2 -# Trunk provides extensibility via plugins. (https://docs.trunk.io/cli/configuration/plugins) -plugins: - sources: - - id: trunk - ref: v1.6.1 - uri: https://github.com/trunk-io/plugins -``` - -### Linter definitions - -Each linter implemented in the Plugin Repo has its own linter definition. Let's take clang-tidy as an example, which ships with the following default configuration: - -```yaml -definitions: - ... - - name: clang-tidy - files: [c/c++-source] - type: llvm - commands: - - output: llvm - run: clang-tidy --export-fixes=- ${target} - success_codes: [0] - download: clang-tidy - direct_configs: [.clang-tidy] - disable_upstream: true - include_scanner_type: compile_command - environment: - - name: PATH - list: ["${linter}/bin"] - ... -``` - -#### Linter definition reference - -You can find the default definitions for linters in the [Plugin Repo](https://github.com/trunk-io/plugins/tree/main/linters) and find references for these fields on the [Linter Definitions](../getting-started/configuration/lint/definitions) page. - -### Overriding default linter definitions - -You may find while using Trunk that you want to modify one of these defaults: perhaps you want `clang-tidy` to not run on the upstream, or maybe you want the `node` runtime to include another environment variable. In these cases, you can specify the field in your `trunk.yaml` to override the default value. - -If you wanted to flip the value of `disable_upstream` to `false`, you could, in your own `trunk.yaml`, specify: - -```yaml -definitions: - ... - - name: clang-tidy - disable_upstream: false - ... -``` - - -Overriding definitions in your `trunk.yaml` file doesn't require you to specify the entire definition again. You only need to specify what's being overridden. - - -#### Configure linter commands - -Some linters have multiple commands, such as [Ruff](./supported/ruff), which can run in different ways. By default, Ruff is configured to only run as a linter: - -```yaml -lint: - enabled: - - ruff@: - commands: [lint] -``` - -You can configure ruff to also run the format command by adding it to the commands tuple: - -```yaml -lint: - enabled: - - ruff@: - commands: [lint, format] -``` - -#### Configure linter platforms - -Similarly, some linters are configured to run differently on different platforms or at different versions. When overriding a command definition, overrides are applied on the tuple `[name, version, platforms]`. - -For example, if you wanted to disable batching when running [ktlint](https://github.com/trunk-io/plugins/blob/main/linters/ktlint/plugin.yaml) on Windows, you could consider its default configuration: - -```yaml -definitions: - ... - - name: ktlint - ... - commands: - - name: format - platforms: [windows] - run: java -jar ${linter}/ktlint.exe -F "${target}" - output: rewrite - cache_results: true - formatter: true - in_place: true - batch: true - success_codes: [0, 1] - - name: format - run: ktlint -F "${target}" - output: rewrite - cache_results: true - formatter: true - in_place: true - batch: true - success_codes: [0, 1] - ... -``` - -And override it as such: - -```yaml -definitions: - ... - - name: ktlint - ... - commands: - - name: format - platforms: [windows] - batch: false - ... -``` - -When executing linters, Trunk will execute the first matching command based on its compatible platforms and linter version. Note when overriding that new commands that don't match an existing tuple are prepended to the resulting commands list. - -Alternatively, consider the default `node` runtime: - -```yaml -runtimes: - definitions: - - type: node - download: node - runtime_environment: - - name: HOME - value: ${home} - - name: PATH - list: ["${runtime}/bin"] - linter_environment: - - name: PATH - list: ["${linter}/node_modules/.bin"] - version: 16.14.2 - version_commands: - - run: "node --version" - parse_regex: ${semver} -``` - -If you want to add `${home}/my/special/node/path` to `PATH`, you could specify the following: - -```yaml -runtimes: - - type: node - runtime_environment: - - name: HOME - value: ${home} - - name: PATH - list: ["${home}/my/special/node/path", "${runtime}/bin"] -``` - -### Blocking thresholds - -All issue severities low-high are considered blocking by default. In cases where you might want to slowly try out a new linter, we provide a mechanism to set specific thresholds for each linter. - -```yaml -lint: - threshold: - - linters: [clang-tidy] - level: high -``` - -Every entry in `threshold` defines a set of linters and the severity threshold that is considered blocking. In this example, we're saying that only `high` lint issues should be considered blocking for `clang-tidy`. - -| Key | Value | -|---|---| -| linters | List of linters (e.g. `[black, eslint]`) or the special `[ALL]` tag | -| level | Default `low`. Threshold at which issues are considered blocking. One of: `note`, `low`, `medium`, `high`, or `none` (this last option will result in issues never blocking) | - -### Trigger rules - -Some linters do not operate on individual files. Instead, you must lint your entire repo at once. The way this is handled in Trunk is to set up a trigger rule. Most linters will not require the use of a trigger rule. - -Trigger rules work on 3 principles: - -1. Input(s) that trigger the linters. These can be files, directories, or extended globs. -2. Linter(s) to run when a triggered file is modified. -3. Targets(s) to pass to the linters (can be files or directories). - -An example for ansible-lint: - -```yaml -lint: - enabled: - - ansible-lint@5.3.2 - - triggers: - - linters: - - ansible-lint - paths: - - ansible # A directory - targets: - - ansible # A directory -``` - -Triggered linters will also be run when executing trunk check with `--all` so long as a file exists that matches one of the listed paths. - -You may use `.` as a target to run on the entire repo instead of an isolated directory. - -### File size - -By default, Trunk only lints files up to 4 MiB in size. To override this globally, specify a `default_max_file_size` in `lint`: - -```yaml -lint: - default_max_file_size: 1048576 # Bytes -``` - -To override this for a specific linter, specify a `max_file_size` in its definition: - -```yaml -lint: - definitions: - - name: prettier - max_file_size: 2097152 # Bytes -``` - -### Timeout - -Each linter has a default timeout of 10 minutes. If its execution takes longer than this amount of time, Trunk Code Quality will terminate the process and return an error to the user. - -To override the timeout for a specific linter, specify a `run_timeout` in its definition: - -``` -lint: - definitions: - - name: clang-tidy - run_timeout: 5m -``` - -The `run_timeout` value can be specified in seconds (`s`), minutes (`m`), or hours (`h`). - -### Local linter overrides - -Trunk can also be managed by the `.trunk/user.yaml` file in your repository. This file is optional, but it allows individual developers to customize how they want `trunk` to run on their machines. - -Simply configure `.trunk/user.yaml` as you would for `.trunk/trunk.yaml`. Be mindful that `.trunk/user.yaml` takes precedence over `.trunk/trunk.yaml`, so substantial modifications could violate hermeticity. - -### Per linter definitions - -Trunk allows you to keep using your existing linter configs, and new linters recommended by Trunk will have their configs added in the `.trunk/configs` folder. These config files will be symlinked in during any `trunk check` run. - - -If you're using an IDE Extension like clangd with an LSP that relies on those configs being in the root, you will need to create an additional symlink from the hidden config to the workspace root. - - -#### Moving linters - -You can move existing linter config files into the `.trunk/config` folder. You can check which files are automatically symlinked by looking for the `direct_configs` of [each plugin's definition](https://github.com/trunk-io/plugins/). If there are config files not listed, you can add them by overriding the definition like this: - -```yaml -lint: - definitions: - - name: some_linter_name - direct_configs: - - .custom_config.file -``` diff --git a/code-quality/overview/linters/custom-linters.mdx b/code-quality/overview/linters/custom-linters.mdx deleted file mode 100644 index bf25d90..0000000 --- a/code-quality/overview/linters/custom-linters.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: "Custom linters" ---- -Trunk Code Quality allows you to define custom linters. If a linter is not within the [list of supported linters](./supported/) or you have a bespoke solution, you can define a custom linter. - -### Defining a custom linter - -You can define linters right in your `.trunk/trunk.yaml` file in your repo. These definitions have the same configurable parameters as in our [public plugins repo](https://github.com/trunk-io/plugins/blob/main/CONTRIBUTING.md) or [your own plugins repo](../getting-started/configuration/plugins/external-repositories). - -#### Pass-Fail linter script example - -For example, you can define a simple [pass-fail linter](./custom-linters#pass-fail-linter-script-example) that runs a custom script file. The linter passes or fails based on the status code returned. - -```yaml -version: 0.1 -cli: - version: 1.22.1 -lint: - enabled: - - SampleLinter - definitions: - - name: SampleLinter - files: [javascript, typescript] - commands: - - name: lint - run: sh ${workspace}/.trunk/myscript.sh ${target} - output: pass_fail - success_codes: [0, 1] -``` - -#### Inline grep command example - -You can also define simple linters inline using tools like `grep`. This linter will grep against your custom regex pattern, format the output using sed, and then parse the output into pattern groups using a [regex output](../getting-started/configuration/lint/output#regex) for Trunk Code Quality to report. - -```yaml -# This file controls the behavior of Trunk: https://docs.trunk.io/cli -# To learn more about the format of this file, see https://docs.trunk.io/cli/configuration -version: 0.1 -cli: - version: 1.22.1 -lint: - enabled: - - SampleGrepLinter - definitions: - - name: SampleGrepLinter - files: [ALL] - commands: - - name: lint - run: bash -c "grep -o -E '' --line-number --with-filename ${target}" - success_codes: [0, 1] - read_output_from: stdout - parser: - run: 'sed -E "s/([^:]*):([0-9]+):(.*)/\1:\2:0: [error] Found \3 in line (numeric-\3)/"' - output: regex - parse_regex: "(?P.*):(?P-?\\d+):(?P-?\\d+): \\[(?P[^\\]]*)\\] (?P[^\\(]*) \\((?P[^\\)]*)\\)" -``` - -To see the configurable fields available [Linter Definition Reference](../getting-started/configuration/lint/definitions). - -### Contributing a new linter - -The [Trunk Code Quality plugins repo](https://github.com/trunk-io/plugins/blob/main/CONTRIBUTING.md) is public and welcomes contributions. Feel free to open a PR if the new custom linter you defined could be useful to others. You can reach out to us [on Slack](https://slack.trunk.io/) if you need a hand. diff --git a/code-quality/overview/linters/ignoring-issues-and-files.mdx b/code-quality/overview/linters/ignoring-issues-and-files.mdx deleted file mode 100644 index 054e892..0000000 --- a/code-quality/overview/linters/ignoring-issues-and-files.mdx +++ /dev/null @@ -1,159 +0,0 @@ ---- -title: "Ignoring issues and files" ---- -## Ignoring parts of a file - -Sometimes we want to deliberately tell a linter that, yes, I know what I'm doing, and yes, in any other situation I should _not_ do this, but in this specific case it's fine. Maybe there's a dummy private key you're using for a test stack, or fixing the lint issue will actually make your code less readable: whatever it is, you now need to figure out how to suppress a given lint issue. - -Trunk provides a simple, standardized mechanism to do this, saving you from having to look up the linter-specific syntax for doing so: - -```cpp -struct FooBar { - // trunk-ignore(clang-tidy/modernize-use-nullptr): load-bearing NULL, see ISSUE-832 - void *ptr = NULL; -}; -``` - -This tells Trunk that the `clang-tidy` linter found a `modernize-use-nullptr` issue on the highlighted line and that Trunk should suppress this linter issue. - -Comments may be omitted: - -```cpp -struct FooBar { - // trunk-ignore(clang-tidy/modernize-use-nullptr) - void *ptr = NULL; -}; -``` - -You can also omit the name of the check to simply tell Trunk that all issues from a given linter on a specific line should be suppressed: - -```cpp -struct FooBar { - // trunk-ignore(clang-tidy) - void *ptr = NULL; -}; -``` - -`trunk-ignore` directives can also be placed at the end of the line on which they're suppressing lint issues: - -```cpp -struct FooBar { - void *ptr1 = NULL; // trunk-ignore(clang-tidy/modernize-use-nullptr) - void *ptr2 = NULL; // trunk-ignore(clang-tidy) -}; -``` - -If you need to suppress issues from multiple linters, `trunk-ignore` supports that too: - -```cpp -struct FooBar { - // trunk-ignore(clang-tidy): ISSUE-914 explains why the `void *` type is needed - // trunk-ignore(gitleaks,my-custom-linter/do-not-hardcode-passwords): see ISSUE-915 - void *super_secret_password = (void *)("915dr~S$Pzqod~oR*CrQ$/SQ@hbtQBked:CL@z!y]"); -}; -``` - -`trunk-ignore` directives can also apply to other `trunk-ignore`s if need be: - -```ts -// trunk-ignore(eslint/max-line-length) -// trunk-ignore(eslint/@typescript-eslint/no-unsafe-member-access,eslint/@typescript-eslint/no-unsafe-assignment) -const version = parsedConfig.version; -``` - -### Ignoring all issues/formatting in a file - -You can also ignore all issues or formatting in a file: - -```cpp -// trunk-ignore-all(clang-tidy) -struct FooBar { - void *ptr1 = NULL; - void *ptr2 = NULL; -}; -``` - - -`trunk-ignore-all` is not required to be the first line of a file, because we recognize that other constructs (shebangs, front matter, docstrings) may need to take precedence. - - -### Ignoring all issues in a code block - -Alternatively, you can ignore all matching issues in a code block: - -```cpp -struct FooBar { - // trunk-ignore-begin(clang-tidy) - void *ptr1 = NULL; - void *ptr2 = NULL; - // trunk-ignore-end(clang-tidy) -}; -``` - -### Tracking unused ignores - -Trunk will alert you if your `trunk-ignore` directives are unused. This can happen due to user error or even innocuously over time, for example, if your internal APIs change or if a linter's output changes. - -``` -app/parse.ts:18:3 - 18:3 note trunk-ignore(eslint/@typescript-eslint/no-unsafe-member-access) trunk/ignore-does-nothing - is not suppressing a lint issue -``` - -Hold the Line will continue to only surface ignore issues that you have introduced, and these issues will have a `note` [severity](./configure-linters#blocking-thresholds), indicating they are non-blocking by default. - -If you need to, you can ignore issues from unused `trunk-ignore` directives, using `trunk-ignore(trunk)`: - -``` -// trunk-ignore(trunk): This error will resurface after our API migration. -// trunk-ignore(eslint/@typescript-eslint/no-unsafe-member-access) -``` - -### Specification - -The syntax of a trunk-ignore directive is as follows: - -``` - ::= "(" ")" - ::= "trunk-ignore" | "trunk-ignore-begin" | "trunk-ignore-end" | "trunk-ignore-all" - ::= - ::= "," - ::= - ::= "/" - ::= ": " -``` - -## Ignoring multiple files - -Some files are never meant to be checked, such as generated code. To ignore them, use the `ignore` key to your `.trunk/trunk.yaml` file: - -```yaml -lint: - ignore: - - linters: [ALL] - paths: - # Ignore generated files - - src/generated/** - # Except for files ending in .foo - - !src/generated/**/*.foo # Test data - - test/test_data -``` - -Every entry in `ignore` defines both a set of linters and a set of paths to ignore. - -| Key | Value | -|---|---| -| linters | List of linters (i.e. `[black, eslint]`) or the special `[ALL]` tag | -| paths | List of [glob paths](https://en.wikipedia.org/wiki/Glob_(programming)), relative to the root of the repo, to ignore. If a path begins with a `!` then it represents an inverse ignore. This means that any file matching that glob will not be ignored, even if matched by other globs. | - - -Trunk is `git`-aware, which means it ignores `gitignore'd` files by default. - - -### Known issues - -`trunk-ignore` does not currently support: - -* suppressing findings on lines 0 or 1 using inline/block directives - -If you need any of these to be supported, or you have another edge case, please reach out to us on the [Trunk community slack](https://slack.trunk.io). diff --git a/code-quality/overview/linters/index.mdx b/code-quality/overview/linters/index.mdx deleted file mode 100644 index 52f6b2b..0000000 --- a/code-quality/overview/linters/index.mdx +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: "Linters" ---- -Trunk Code Quality supports over [100 different linters](./supported/) and formatters out of the box. This section covers how to run, manage, and configure these linters. - -### Supported Linters - -Trunk supports 100+ different linters and formatters. See the [Supported Linters](./supported/) page to find the linters you need to maintain code quality in your repos. - -### Run Linters - -Trunk Code Quality supports many flexible ways to run your installed linters, for every project and every occasion. - -[Learn the commands available ](./run-linters)for the Trunk CLI. - -### Manage Linters - -Find and enable the linters you need to keep your code base healthy. Trunk helps you manage your long list of static analysis tools and runtimes through hermetic installs. - -[Learn how to discover, install, and upgrade linters](./#manage-linters) in your projects. - -### Configure Linters - -Trunk Code Quality's linter integrations are fully configurable. This means that you can easily tune existing linters or leverage our caching and [hold-the-line](/broken/pages/U4nTQBazaodt2vJadyRw#hold-the-line) solution with your custom linters. - -[Learn to configure your linters](./configure-linters) to get the most out of Trunk Code Quality. - -### Ignoring Issues and Files - -Trunk Code Quality lets you configure flexible ignore for your linters. You can ignore issues by line, by files, by path, by issue type, by severity level, by file extension, and more. - -[Learn to ignore irrelevant issues.](./ignoring-issues-and-files) - -### Custom Linters - -Trunk lets you turn simple scripts into fully-powered linters by running these linters and giving them support for features like ignores, [hold-the-line,](/broken/pages/U4nTQBazaodt2vJadyRw#hold-the-line) and other powerful configurable features, - -[Learn to create your own Custom Linters.](./custom-linters) - -### Shared Configs - -If your team has many repositories, many teams, and many languages, you would want to share a common set of config files to keep your **entire organization consistent**. - -[Learn to share configurations](./shared-configs) across your org. diff --git a/code-quality/overview/linters/manage-linters.mdx b/code-quality/overview/linters/manage-linters.mdx deleted file mode 100644 index 4450654..0000000 --- a/code-quality/overview/linters/manage-linters.mdx +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "Manage linters" ---- -### Using the CLI - -List all of the available linters - -```sh -trunk check list -``` - -Enable a single linter - -```sh -trunk check enable -``` - -Disable a single linter - -```sh -trunk check disable -``` - -### Using Trunk config files - -Trunk only runs linters listed in the `enabled` section; linters which are defined in `lint.definitions` but are not listed in `enabled` are not run. - -When enabling a linter, you must specify a version for the linter: - -```yaml -lint: - enabled: - # enabling a version with a linter - - gitleaks@7.6.1 - - gofmt@1.16.7 - - golangci-lint@1.41.1 - - hadolint@2.6.0 -``` - -Custom linters are slightly different; see [those docs](./custom-linters) to learn more. - -### Disable linters - -Trunk will continuously monitor your repository and make recommendations of additional new tools to run on your codebase. You can tell Trunk not to recommend a specific linter by adding it to the disabled list. - -```yaml -lint: - disabled: - # disabled a linter tells trunk not to recommend it during upgrade scans - - rufo - - tflint -``` - -### Upgrading linters - -Run `trunk upgrade` to update the Trunk CLI and all your plugins, linters, tools, and runtimes. diff --git a/code-quality/overview/linters/run-linters.mdx b/code-quality/overview/linters/run-linters.mdx deleted file mode 100644 index 8bddd68..0000000 --- a/code-quality/overview/linters/run-linters.mdx +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: "Run Linters" ---- -The main commands when running `trunk` from the command line are: - -```bash -trunk check # runs the universal linter on all applicable files -trunk fmt # runs all the enabled formatters and auto-applies changes -``` - -You can always find this list using `trunk check --help`. - - -Trunk is git-aware. When you run `trunk check` it will **only run on files you've modified according to git**. To run on a sampling in your repo, run: `trunk check --sample 5` - - -### check - -`trunk check` runs linters & formatters on your changed files, prompting you to apply fixes. Without additional args, `trunk check` will run all applicable linters on all files changed in the current branch. - -### fmt - -Run all applicable formatters as configured in `trunk.yaml`. `trunk fmt` is short-hand for running\ -`trunk check` with a `--fix --filter` set to all formatters enabled in your repository. - -## Options - -| options | | -|---|---| -| `--all` | Run on all the files in the repository. Useful if trying to assess a new linter in the system, or to find and fix pre-existing issues | -| `--fix` | Auto-apply all suggested fixes | -| `--no-fix` | Surface, but do not prompt for autofixes | -| `--filter` | List of comma-separated linters to run. Specify `--filter=-linter` to disable a linter. | -| `--sample=N` | Run check on a [sampling](#sample) of all files in the repo | -| `--help` | Output help information | - -### Recipes - -| Check | Command | -| ------------------------------------------------------------ | -------------------------------------------- | -| all files | `trunk check --all --no-fix` | -| a specific file | `trunk check some/file.py` | -| all applicable files with flake8 | `trunk check --all --no-fix --filter=flake8` | -| a selection of five files in the repo | `trunk check --sample 5` | -| a selection of five files in the repo with a specific linter | `trunk check --sample 5 --filter=flake8` | -| format the whole repo | `trunk fmt --all` | -| format a specific file | `trunk fmt some/file.py` | -| format all python code with `black` | `trunk fmt --all --filter=black` | - diff --git a/code-quality/overview/linters/shared-configs.mdx b/code-quality/overview/linters/shared-configs.mdx deleted file mode 100644 index ccb36af..0000000 --- a/code-quality/overview/linters/shared-configs.mdx +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: "Shared configs" ---- -## Single repo - -Linters are automatically shared with all developers for a repository using the [`.trunk/trunk.yaml` file](../getting-started/configuration/). This file is committed to the repo, so whenever anyone checks out the code, they will get the same configuration and linters. See the [Trunk YAML guide](../getting-started/configuration/) for more details. - -## Per user config - -If you wish to customize a linter for just one developer (say, disable a slow linter on a slow machine), you can create a per-user config in the `.trunk/user.yaml` file, which should **not** be committed to the repo. - -## Multiple repos - -If you wish to share linters between different repos, copy the config manually or create a shared Plugin repo. This is a set of configuration and code that is imported into the `plugins` section of a project's `./trunk/trunk.yaml` . diff --git a/code-quality/overview/linters/supported/actionlint.mdx b/code-quality/overview/linters/supported/actionlint.mdx deleted file mode 100644 index 26d9bad..0000000 --- a/code-quality/overview/linters/supported/actionlint.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "Actionlint" -description: "Explore our guide on Actionlint, the linter for GitHub Actions. Learn about its features, installation, and configuration." ---- -[**Actionlint**](https://github.com/rhysd/actionlint) is a linter for GitHub. - -You can enable the Actionlint linter with: - -```shell -trunk check enable actionlint -``` - -## Auto Enabling - -Actionlint will be auto-enabled if any _GitHub-workflow_ files are present. - -## Settings - -Actionlint supports the following config files: - -* `.github/actionlint.yaml` -* `.github/actionlint.yml` - -Unlike with most tools under `trunk check`, these files cannot be moved. - -## Links - -* [Actionlint site](https://github.com/rhysd/actionlint) -* Actionlint Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/actionlint) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/ansible-lint.mdx b/code-quality/overview/linters/supported/ansible-lint.mdx deleted file mode 100644 index 6c3c29f..0000000 --- a/code-quality/overview/linters/supported/ansible-lint.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: "Ansible-lint" -description: "Checks playbooks for practices and behavior that could potentially be improved and can fix some of the most common ones for you" ---- -[**Ansible-lint**](https://github.com/ansible/ansible-lint) is a linter for Ansible. - -You can enable the Ansible-lint linter with: - -```shell -trunk check enable ansible-lint -``` - -## Auto Enabling - -Ansible-lint will never be auto-enabled. It must be enabled manually. - -## Settings - -Ansible-lint supports the following config files: - -* `.ansible-lint` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Usage Notes - -**Ansible-lint** must be configured with a trigger. See the [trigger rules](../#trigger-rules) documentation for more information. - -If your ansible setup is not contained within a single folder you would list all files and directories belonging to your ansible setup. - -## Links - -* [Ansible-lint site](https://github.com/ansible/ansible-lint) -* Ansible-lint Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/ansible-lint) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/autopep8.mdx b/code-quality/overview/linters/supported/autopep8.mdx deleted file mode 100644 index e826e81..0000000 --- a/code-quality/overview/linters/supported/autopep8.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: "Autopep8" -description: "Autopep8 automatically formats Python code to meet PEP 8 standards, using pycodestyle to identify and correct formatting issues for cleaner code." ---- -[**Autopep8**](https://github.com/hhatto/autopep8#readme) is a formatter for Python. - -You can enable the Autopep8 formatter with: - -```shell -trunk check enable autopep8 -``` - - -![autopep8 example output](/assets/autopep8.gif) - - -## Auto Enabling - -Autopep8 will be auto-enabled if a `.pep8` config file is present. - -## Settings - -Autopep8 supports the following config files: - -* `.pep8` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [Autopep8 site](https://github.com/hhatto/autopep8#readme) -* Autopep8 Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/autopep8) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/bandit.mdx b/code-quality/overview/linters/supported/bandit.mdx deleted file mode 100644 index 8788cb6..0000000 --- a/code-quality/overview/linters/supported/bandit.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: "Bandit" -description: "Bandit is a security linter for Python codebases. Bandit flags problems like hard-coded passwords, injection vulnerabilities, and the use of insecure libraries." ---- -[**Bandit**](https://github.com/PyCQA/bandit) is a linter for Python. - -You can enable the Bandit linter with: - -```shell -trunk check enable bandit -``` - - -![bandit example output](/assets/bandit.gif) - - -## Auto Enabling - -Bandit will be auto-enabled if any _Python_ files are present. - -## Settings - -Bandit supports the following config files: - -* `.bandit` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [Bandit site](https://github.com/PyCQA/bandit) -* Bandit Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/bandit) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/biome.mdx b/code-quality/overview/linters/supported/biome.mdx deleted file mode 100644 index 14fe8b6..0000000 --- a/code-quality/overview/linters/supported/biome.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "Biome" -description: "Biome is a linter for JavaScript and TypeScript, improving code quality by automatically fixing issues, enforcing standards, and ensuring consistency." ---- -[**Biome**](https://biomejs.dev/) is a linter for JavaScript, TypeScript, jsx and json. - -You can enable the Biome linter with: - -```shell -trunk check enable biome -``` - -## Auto Enabling - -Biome will be auto-enabled if any of its config files are present: _`biome.json`, `rome.json`_. - -## Settings - -Biome supports the following config files: - -* `biome.json` -* `rome.json` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [Biome site](https://biomejs.dev/) -* Biome Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/biome) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/black.mdx b/code-quality/overview/linters/supported/black.mdx deleted file mode 100644 index 6aa6700..0000000 --- a/code-quality/overview/linters/supported/black.mdx +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: "Black" -description: "Discover Black, the Python code formatter. Learn how to integrate it with Trunk Check for seamless coding style enforcement." ---- -[**Black**](https://pypi.org/project/black/) is a formatter for Python. - -You can enable the Black formatter with: - -```shell -trunk check enable black -``` - - -![black example output](/assets/black.gif) - - -## Auto Enabling - -Black will be auto-enabled if any _Python, Jupyter or Python-interface_ files are present. - -## Links - -* [Black site](https://pypi.org/project/black/) -* Black Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/black) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/brakeman.mdx b/code-quality/overview/linters/supported/brakeman.mdx deleted file mode 100644 index ddf844e..0000000 --- a/code-quality/overview/linters/supported/brakeman.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "Brakeman" -description: "Brakeman is a static analysis tool designed for Ruby on Rails applications. It statically analyzes Rails application code to find security issues." ---- -[**Brakeman**](https://github.com/presidentbeef/brakeman) is a linter for Ruby. - -You can enable the Brakeman linter with: - -```shell -trunk check enable brakeman -``` - -## Auto Enabling - -Brakeman will be auto-enabled if any _Ruby_ files are present. - -## Settings - -Brakeman supports the following config files: - -* `brakeman.ignore` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [Brakeman site](https://github.com/presidentbeef/brakeman) -* Brakeman Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/brakeman) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/buf.mdx b/code-quality/overview/linters/supported/buf.mdx deleted file mode 100644 index 0bdbfa4..0000000 --- a/code-quality/overview/linters/supported/buf.mdx +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: "buf" -description: "buf is a linter for Protobuf" ---- -[**buf**](https://github.com/bufbuild/buf#readme) is a linter for Protobuf. - -buf is composed of several linter commands. - -`buf-format` only runs the reformatting, not lint checking. - -You can enable the `buf-format` linter with: - -```shell -trunk check enable buf-format -``` - -`buf-lint` only runs the lint checking, not reformatting. - -You can enable the `buf-lint` linter with: - -```shell -trunk check enable buf-lint -``` - -`buf-breaking` only checks for breaking proto changes. - -You can enable the `buf-breaking` linter with: - -```shell -trunk check enable buf-breaking -``` - -## Auto Enabling - -buf will never be auto-enabled. It must be enabled manually. - -## Settings - -buf supports the following config files: - -* `buf.yaml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [buf site](https://github.com/bufbuild/buf#readme) -* buf Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/buf) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/buildifier.mdx b/code-quality/overview/linters/supported/buildifier.mdx deleted file mode 100644 index 3a7526f..0000000 --- a/code-quality/overview/linters/supported/buildifier.mdx +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: "Buildifier" -description: "Learn how to install, configure, and use buildifier effectively for Bazel build scripts." ---- -[**Buildifier**](https://github.com/rhysd/actionlint) is a linter for Bazel, Starlark. - -You can enable the Buildifier linter with: - -```shell -trunk check enable buildifier -``` - - -![buildifier example output](/assets/buildifier.gif) - - -## Auto Enabling - -Buildifier will be auto-enabled if any _Bazel or Starlark_ files are present. - -## Settings - -Buildifier supports the following config files: - -* `.buildifier.json` -* `.buildifier-tables.json` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [Buildifier site](https://github.com/rhysd/actionlint) -* Buildifier Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/buildifier) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/cfnlint.mdx b/code-quality/overview/linters/supported/cfnlint.mdx deleted file mode 100644 index b540f76..0000000 --- a/code-quality/overview/linters/supported/cfnlint.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "cfnlint" -description: "cfnlint is a linter for CloudFormation" ---- -[**cfnlint**](https://github.com/aws-cloudformation/cfn-lint#readme) is a linter for CloudFormation. - -You can enable the cfnlint linter with: - -```shell -trunk check enable cfnlint -``` - -## Auto Enabling - -cfnlint will be auto-enabled if any _CloudFormation_ files are present. - -## Links - -* [cfnlint site](https://github.com/aws-cloudformation/cfn-lint#readme) -* cfnlint Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/cfnlint) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/checkov.mdx b/code-quality/overview/linters/supported/checkov.mdx deleted file mode 100644 index 2d4bb4f..0000000 --- a/code-quality/overview/linters/supported/checkov.mdx +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: "Checkov" -description: "Checkov is a static code analysis tool for scanning infrastructure as code. It identifies misconfigurations in IaC files that could lead to security breaches." ---- -[**Checkov**](https://github.com/bridgecrewio/checkov) is a linter for CloudFormation, Security, Terraform and Docker. - -You can enable the Checkov linter with: - -```shell -trunk check enable checkov -``` - - -![checkov example output](/assets/checkov.gif) - - -## Auto Enabling - -Checkov will be auto-enabled if any _Terraform, CloudFormation, Docker, Yaml or Json_ files are present. - -## Settings - -Checkov supports the following config files: - -* `.checkov.yml` -* `.checkov.yaml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [Checkov site](https://github.com/bridgecrewio/checkov) -* Checkov Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/checkov) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/circleci.mdx b/code-quality/overview/linters/supported/circleci.mdx deleted file mode 100644 index 19c34cd..0000000 --- a/code-quality/overview/linters/supported/circleci.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "circleci" -description: "circleci is a linter for CircleCI Config" ---- -[**circleci**](https://github.com/CircleCI-Public/circleci-cli#readme) is a linter for CircleCI Config. - -You can enable the circleci linter with: - -```shell -trunk check enable circleci -``` - -## Auto Enabling - -circleci will never be auto-enabled. It must be enabled manually. - -## Links - -* [circleci site](https://github.com/CircleCI-Public/circleci-cli#readme) -* circleci Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/circleci) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/clang-format.mdx b/code-quality/overview/linters/supported/clang-format.mdx deleted file mode 100644 index 197bbd9..0000000 --- a/code-quality/overview/linters/supported/clang-format.mdx +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: "ClangFormat" -description: "Clang Format is a set of tools to format code that is processed by the Clang compiler suite." ---- -[**ClangFormat**](https://clang.llvm.org/docs/ClangFormat.html) is a formatter for Protobuf and C, C++. - -You can enable the ClangFormat formatter with: - -```shell -trunk check enable clang-format -``` - -## Auto Enabling - -ClangFormat will be auto-enabled if a `.clang-format` config file is present. - -## Settings - -ClangFormat supports the following config files: - -* `.clang-format` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Usage Notes - -By default, Trunk uses ClangFormat to additionally format `.proto` files. However, for this to work, you need to have told `clang-format` to do so in your `.clang-format` config file. You can do that by adding the following to the end of your `.clang-format file`: - -```yaml ---- -Language: Proto -``` - -For example, you might have this for your entire `.clang-format` file: - -```yaml -BasedOnStyle: Google -ColumnLimit: 100 ---- -Language: Cpp -DerivePointerAlignment: false ---- -Language: Proto -``` - -## Links - -* [ClangFormat site](https://clang.llvm.org/docs/ClangFormat.html) -* ClangFormat Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/clang-format) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/clang-tidy.mdx b/code-quality/overview/linters/supported/clang-tidy.mdx deleted file mode 100644 index 1d02384..0000000 --- a/code-quality/overview/linters/supported/clang-tidy.mdx +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: "clang-tidy" -description: "A clang-based C++ linter tool to provide an extensible framework for diagnosing and fixing programming errors that can be deduced via static analysis." ---- -## clang-tidy - -[**clang-tidy**](https://clang.llvm.org/extra/clang-tidy/) is a linter for Protobuf and C, C++. - -You can enable the clang-tidy linter with: - -```shell -trunk check enable clang-tidy -``` - -### Auto Enabling - -clang-tidy will be auto-enabled if a `.clang-tidy` config file is present. - -### Settings - -clang-tidy supports the following config files: - -* `.clang-tidy` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. Trunk Code Quality provides a default `.clang-tidy` if your project does not already have one. - -### Usage Notes - -We only support using clang-tidy from Bazel and CMake projects. - -In order to only see issues in your own code, not from library header files your code includes, add this to your `.clang-tidy` file: - -```yaml -HeaderFilterRegex: \./.+ -``` - -You may have to build your project first if you depend on any generated header files. - -## Linter Failures - -If a file you're linting does not compile, clang-tidy may fail to process it. In `trunk`, this will show up as a _Linter Failure_. The output you'll see will look like a compilation error. This can also happen if the pre-reqs to running clang-tidy haven't been met (see below). - -## Using Bazel - -By default Trunk will query `bazel` for compile commands used to run `clang-tidy`. This requires no configuration. - -Trunk will build needed compilation pre-requisites before invoking `clang-tidy` on each file (e.g. generated protobuf headers). - -You can generate a local compilation database by running `trunk generate-compile-commands`. - -**Finding the bazel binary** - -Trunk will search for the `bazel` binary in two ways. - -* Paths relative to the workspace root. -* Binaries in any of the directories in the PATH environment variable. - -First trunk will search all workspace root relative paths and then all system directories. If you override anything in `lint.bazel.paths` then we only search the paths you specify. By default the configuration is as follows. - -```yaml -lint: - bazel: - paths: - workspace: - - tools/bazel - - bazelisk - system: - - bazel - - bazelisk -``` - -## Using `compile_commands.json` generated by CMake - -Trunk supports using the `compile_commands.json` file generated by CMake. If you run `cmake` from a directory called `build` in the root of your project then Trunk will find the compile commands automatically. If you run it in some other directory then you will have to symlink the `compile_commands.json` in that directory to the root of your repo for trunk to find them. Note that Trunk does not currently support CMake out of tree builds. - -## Another tool claims I have clang-tidy issues, but not Trunk. What gives? - -Trunk runs `clang-tidy` with a compile commands database so that we can guarantee clang-tidy produces the correct diagnostics about your code. Other tools, such as `clangd`, may use best-effort heuristics to guess a compile command for a given clang-tidy input file (for example, see [this discussion)](https://github.com/clangd/clangd/issues/519) and consequently produce incorrect clang-tidy findings because they guessed the compile command wrong. - -### Links - -* [clang-tidy site](https://clang.llvm.org/extra/clang-tidy/) -* clang-tidy Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/clang-tidy) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/clippy.mdx b/code-quality/overview/linters/supported/clippy.mdx deleted file mode 100644 index 4b2ce36..0000000 --- a/code-quality/overview/linters/supported/clippy.mdx +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: "Clippy" -description: "A collection of lints to catch common mistakes and improve your Rust code." ---- -[**Clippy**](https://doc.rust-lang.org/clippy/) is a linter for Rust. - -You can enable the Clippy linter with: - -```shell -trunk check enable clippy -``` - -## Auto Enabling - -Clippy will be auto-enabled if any _Rust_ files are present. - -## Settings - -Clippy supports the following config files: - -* `clippy.toml` -* `.clippy.toml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Usage Notes - -Clippy is distributed with rust itself, so specify your rust version for your clippy version (for example `clippy@1.61.0`). - -## Links - -* [Clippy site](https://doc.rust-lang.org/clippy/) -* Clippy Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/clippy) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/cmake-format.mdx b/code-quality/overview/linters/supported/cmake-format.mdx deleted file mode 100644 index 6c3b334..0000000 --- a/code-quality/overview/linters/supported/cmake-format.mdx +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: "cmake-format" -description: "Learn how to install, configure, and run CMake-Format with Trunk Check to ensure consistent formatting and best practices for your CMake scripts." ---- -[**cmake-format**](https://github.com/cheshirekow/cmake_format) is a formatter for C, C++. - -You can enable the cmake-format formatter with: - -```shell -trunk check enable cmake-format -``` - -## Auto Enabling - -cmake-format will be auto-enabled if any of its config files are present: _`.cmake-format.json`, `.cmake-format.py`, `.cmake-format.yaml`_. - -## Settings - -cmake-format supports the following config files: - -* `.cmake-format.json` -* `.cmake-format.py` -* `.cmake-format.yaml` -* `cmake-format.json` -* `cmake-format.py` -* `cmake-format.yaml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [cmake-format site](https://github.com/cheshirekow/cmake_format) -* cmake-format Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/cmake-format) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/codespell.mdx b/code-quality/overview/linters/supported/codespell.mdx deleted file mode 100644 index 22cc3ef..0000000 --- a/code-quality/overview/linters/supported/codespell.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: "codespell" -description: "Codespell fixes common misspellings in text files. It's designed primarily to check misspelled words in source code." ---- -[**codespell**](https://github.com/codespell-project/codespell#readme) is a linter for All. - -You can enable the codespell linter with: - -```shell -trunk check enable codespell -``` - - -![codespell example output](/assets/codespell.gif) - - -## Auto Enabling - -codespell will be auto-enabled if a `.codespellrc` config file is present. - -## Settings - -codespell supports the following config files: - -* `.codespellrc` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [codespell site](https://github.com/codespell-project/codespell#readme) -* codespell Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/codespell) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/cspell.mdx b/code-quality/overview/linters/supported/cspell.mdx deleted file mode 100644 index f134c22..0000000 --- a/code-quality/overview/linters/supported/cspell.mdx +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: "cspell" -description: "CSpell is a linter for identifying and fixing spelling errors in source code, documentation, and configuration files, enhancing overall project quality." ---- -[**cspell**](https://github.com/streetsidesoftware/cspell#readme) is a linter for All. - -You can enable the cspell linter with: - -```shell -trunk check enable cspell -``` - - -![cspell example output](/assets/cspell.gif) - - -## Auto Enabling - -cspell will never be auto-enabled. It must be enabled manually. - -## Settings - -cspell supports the following config files: - -* `.cspell.json` -* `cspell.json` -* `.cSpell.json` -* `cSpell.json` -* `cspell.config.js` -* `cspell.config.cjs` -* `cspell.config.json` -* `cspell.config.yaml` -* `cspell.config.yml` -* `cspell.yaml` -* `cspell.yml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. Trunk Code Quality provides a default `cspell.yaml` if your project does not already have one. - -## Links - -* [cspell site](https://github.com/streetsidesoftware/cspell#readme) -* cspell Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/cspell) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/cue-fmt.mdx b/code-quality/overview/linters/supported/cue-fmt.mdx deleted file mode 100644 index b6c88d6..0000000 --- a/code-quality/overview/linters/supported/cue-fmt.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "cue-fmt" -description: "cue-fmt is a formatter for CUE files that improves consistency and readability. Learn how to install, configure, and run cue-fmt." ---- -[**cue-fmt**](https://cuelang.org) is a formatter for Cue. - -You can enable the cue-fmt formatter with: - -```shell -trunk check enable cue-fmt -``` - -## Auto Enabling - -cue-fmt will be auto-enabled if any _Cue_ files are present. - -## Links - -* [cue-fmt site](https://cuelang.org) -* cue-fmt Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/cue-fmt) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/dart.mdx b/code-quality/overview/linters/supported/dart.mdx deleted file mode 100644 index 2964729..0000000 --- a/code-quality/overview/linters/supported/dart.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "dart" -description: "dart is a linter for Dart" ---- -[**dart**](https://dart.dev/tools/dart-format) is a linter for Dart. - -You can enable the dart linter with: - -```shell -trunk check enable dart -``` - -## Auto Enabling - -dart will never be auto-enabled. It must be enabled manually. - -## Links - -* [dart site](https://dart.dev/tools/dart-format) -* dart Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/dart) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/deno.mdx b/code-quality/overview/linters/supported/deno.mdx deleted file mode 100644 index c4eb7be..0000000 --- a/code-quality/overview/linters/supported/deno.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "deno" -description: "deno is a linter for JavaScript, JSON, TypeScript and Markdown" ---- -[**deno**](https://deno.land/manual) is a linter for JavaScript, JSON, TypeScript and Markdown. - -You can enable the deno linter with: - -```shell -trunk check enable deno -``` - -## Auto Enabling - -deno will be auto-enabled if any of its config files are present: _`deno.json`, `deno.jsonc`_. - -## Settings - -deno supports the following config files: - -* `deno.json` -* `deno.jsonc` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [deno site](https://deno.land/manual) -* deno Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/deno) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/detekt.mdx b/code-quality/overview/linters/supported/detekt.mdx deleted file mode 100644 index 6cbf562..0000000 --- a/code-quality/overview/linters/supported/detekt.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: "Detekt" -description: "Static code analysis for Kotlin" ---- -[**Detekt**](https://github.com/detekt/detekt) is a linter for Kotlin. - -detekt is composed of several linter commands. - -`detekt` runs detekt with the built-in default config and any overrides in `.detekt.yaml`. - -You can enable the `detekt` linter with: - -```shell -trunk check enable detekt -``` - -`detekt-explicit` disables the default config and uses `.detekt.yaml` as the source of truth. - -You can enable the `detekt-explicit` linter with: - -```shell -trunk check enable detekt-explicit -``` - -`detekt-gradle` runs detekt using Gradle. Only use if you already are using Gradle for the rest of your build setup. - -You can enable the `detekt-gradle` linter with: - -```shell -trunk check enable detekt-gradle -``` - -## Auto Enabling - -Detekt will never be auto-enabled. It must be enabled manually. - -## Settings - -Detekt supports the following config files: - -* `.detekt.yaml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Usage Notes - -Detekt is usually invoked through gradle, which allows specifying additional configuration in `build.gradle`. We do not yet automatically parse your Gradle scripts to infer your `detekt` configuration; instead, what we do is this: - -* `detekt` invokes [`detekt-cli`](https://detekt.github.io/detekt/cli.html) with the `--build-upon-default-config` flag (this appears to be [more common](https://cs.github.com/?q=%2FbuildUponDefaultConfig.*%28true%29%2F+detekt) than the alternative). -* `detekt-explicit` invokes [`detekt-cli`](https://detekt.github.io/detekt/cli.html) without the `--build-upon-default-config` flag. - -You will also need to provide a valid detekt config as `.detekt.yaml` (an empty `.detekt.yaml` is valid, if you don't want to configure `detekt`). If you already have a detekt config, then you can symlink it like so: - -```bash -ln -s path/to/existing/detekt-config.yml .detekt-config.yaml -``` - -To use `./gradlew detekt` to invoke Detekt, you can add `detekt-gradle@SYSTEM` to your `enabled` list. Note that since you're running Detekt via Gradle, you should also add the paths to your Detekt configurations to `direct_configs`, e.g. - -```undefined -direct_configs: ["lib/detekt.yaml"] -``` - -## Links - -* [Detekt site](https://github.com/detekt/detekt) -* Detekt Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/detekt) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/djlint.mdx b/code-quality/overview/linters/supported/djlint.mdx deleted file mode 100644 index d7dc334..0000000 --- a/code-quality/overview/linters/supported/djlint.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "djlint" -description: "djlint is a linter for HTML Templates" ---- -[**djlint**](https://github.com/Riverside-Healthcare/djlint#readme) is a linter for HTML Templates. - -You can enable the djlint linter with: - -```shell -trunk check enable djlint -``` - -## Auto Enabling - -djlint will be auto-enabled if a `.djlintrc` config file is present. - -## Settings - -djlint supports the following config files: - -* `.djlintrc` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. Trunk Code Quality provides a default `.djlintrc` if your project does not already have one. - -## Links - -* [djlint site](https://github.com/Riverside-Healthcare/djlint#readme) -* djlint Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/djlint) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/dotenv-linter.mdx b/code-quality/overview/linters/supported/dotenv-linter.mdx deleted file mode 100644 index 9ddad88..0000000 --- a/code-quality/overview/linters/supported/dotenv-linter.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "dotenv-linter" -description: "dotenv-linter is a linter for Dotenv" ---- -[**dotenv-linter**](https://github.com/dotenv-linter/dotenv-linter#readme) is a linter for Dotenv. - -You can enable the dotenv-linter linter with: - -```shell -trunk check enable dotenv-linter -``` - -## Auto Enabling - -dotenv-linter will be auto-enabled if any _Dotenv_ files are present. - -## Links - -* [dotenv-linter site](https://github.com/dotenv-linter/dotenv-linter#readme) -* dotenv-linter Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/dotenv-linter) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/dotnet-format.mdx b/code-quality/overview/linters/supported/dotnet-format.mdx deleted file mode 100644 index d391808..0000000 --- a/code-quality/overview/linters/supported/dotnet-format.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "dotnet-format" -description: "dotnet-format is a linter for C#" ---- -[**dotnet-format**](https://github.com/dotnet/format#readme) is a linter for C#. - -You can enable the dotnet-format linter with: - -```shell -trunk check enable dotnet-format -``` - -## Auto Enabling - -dotnet-format will never be auto-enabled. It must be enabled manually. - -## Links - -* [dotnet-format site](https://github.com/dotnet/format#readme) -* dotnet-format Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/dotnet-format) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/dustilock.mdx b/code-quality/overview/linters/supported/dustilock.mdx deleted file mode 100644 index 95d3921..0000000 --- a/code-quality/overview/linters/supported/dustilock.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "dustilock" -description: "dustilock is a linter for Security" ---- -[**dustilock**](https://github.com/Checkmarx/dustilock) is a linter for Security. - -You can enable the dustilock linter with: - -```shell -trunk check enable dustilock -``` - -## Auto Enabling - -dustilock will never be auto-enabled. It must be enabled manually. - -## Links - -* [dustilock site](https://github.com/Checkmarx/dustilock) -* dustilock Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/dustilock) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/eslint.mdx b/code-quality/overview/linters/supported/eslint.mdx deleted file mode 100644 index 4bbac00..0000000 --- a/code-quality/overview/linters/supported/eslint.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: "ESLint" -description: "ESLint statically analyzes your code to quickly find problems." ---- -## ESLint - -[**ESLint**](https://eslint.org/) is a linter for JavaScript, JSON and TypeScript. - -You can enable the ESLint linter with: - -```shell -trunk check enable eslint -``` - -### Auto Enabling - -ESLint will be auto-enabled if any of its config files are present: _`eslint.config.js`, `eslint.config.mjs`, `eslint.config.cjs`_. - -### Settings - -ESLint supports the following config files: - -* `eslint.config.js` -* `eslint.config.mjs` -* `eslint.config.cjs` -* `.eslintrc` -* `.eslintrc.cjs` -* `.eslintrc.js` -* `.eslintrc.json` -* `.eslintrc.mjs` -* `.eslintrc.yaml` -* `.eslintrc.yml` - -Unlike with most tools under `trunk check`, these files cannot be moved. - -### Usage Notes - -## ESLint >= 9.x - -As of ESLint v9, all of the formatters have been removed. We suggest using [prettier](/code-quality/overview/linters/supported/prettier) to format Javascript and Typescript code. The extra package mentioned below is no longer needed for ESLint v9 and higher. - -## ESlint < 8.x - -Most ESLint users use several plugins, custom parsers, etc. Trunk has turned off sandboxing and caching for ESLint so it can use your repo's installed packages for ESLint plugins, and other required ESLint packages. Trunk controls the ESLint version, but otherwise, ESLint looks for all plugins, configs, etc. based on the path of the source file it is linting. **This all means you do need to have npm/yarn installed in your repo as a prerequisite before running ESLint via trunk**. - -We recommend you disable all Prettier rules in your ESLint config and let Trunk run Prettier automatically on your files. It's much nicer to just autoformat a file than to see a lint error for every missing space. - -You can easily do this by: - -* adding the `eslint-config-prettier` package -* adding `prettier` as the last element to the `extends` property in your ESLint config - -For example, your `extends` list might look like: - -```yaml -extends: - # Order matters, later configs purposefully override settings from earlier configs - - eslint:recommended - - airbnb - - plugin:@typescript-eslint/recommended - - plugin:import/recommended - - plugin:import/typescript - - plugin:node/recommended - - plugin:mocha/recommended - - plugin:react/recommended - - prettier # this actually turns OFF all Prettier rules running via ESLint -``` - -### Links - -* [ESLint site](https://eslint.org/) -* ESLint Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/eslint) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/flake8.mdx b/code-quality/overview/linters/supported/flake8.mdx deleted file mode 100644 index d3eb64a..0000000 --- a/code-quality/overview/linters/supported/flake8.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: "Flake8" -description: "Uncover Flake8, a versatile Python linter for code style and error checking. Flake 8 checks against PEP 8 and more, with plugin support for broader analysis." ---- -[**Flake8**](https://flake8.pycqa.org/en/latest/) is a linter for Python. - -You can enable the Flake8 linter with: - -```shell -trunk check enable flake8 -``` - - -![flake8 example output](/assets/flake8.gif) - - -## Auto Enabling - -Flake8 will be auto-enabled if a `.flake8` config file is present. - -## Settings - -Flake8 supports the following config files: - -* `.flake8` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. Trunk Code Quality provides a default `.flake8` if your project does not already have one. - -## Usage Notes - -Flake8 has a plugin architecture where if you install a plugin, it gets used. You can enable Flake8 plugins via: - -```yaml -enabled: - - flake8@3.9.2: - packages: - - flake8-bugbear@21.4.3 -``` - -`flake8-bugbear` is probably the most popular **flake8** plugin, we recommend it!. Here are a few other popular flake8 plugins you should consider. - -* **flake8-comprehensions**: Helps in identifying unnecessary comprehensions in your code. -* **flake8-docstrings**: Checks for compliance with Python docstring conventions. -* **flake8-import-order**: Checks the order of your imports according to various configurable ordering styles. - -Here's an updated code snippet with the above Plugins enabled: - -```undefined -enabled: - - flake8@3.9.2: - packages: - - flake8-bugbear@21.4.3 - - flake8-docstrings@1.7.0 - - flake8-import-order@0.18.2 - - flake8-comprehensions@3.14.0 -``` - -## Links - -* [Flake8 site](https://flake8.pycqa.org/en/latest/) -* Flake8 Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/flake8) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/git-diff-check.mdx b/code-quality/overview/linters/supported/git-diff-check.mdx deleted file mode 100644 index a4f4467..0000000 --- a/code-quality/overview/linters/supported/git-diff-check.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "git-diff-check" -description: "git-diff-check is a linter for All" ---- -[**git-diff-check**](https://git-scm.com/docs/git-diff) is a linter for All. - -You can enable the git-diff-check linter with: - -```shell -trunk check enable git-diff-check -``` - -## Auto Enabling - -git-diff-check will be auto-enabled if any _all_ files are present. - -## Links - -* [git-diff-check site](https://git-scm.com/docs/git-diff) -* git-diff-check Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/git-diff-check) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/gitleaks.mdx b/code-quality/overview/linters/supported/gitleaks.mdx deleted file mode 100644 index 337dda2..0000000 --- a/code-quality/overview/linters/supported/gitleaks.mdx +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: "Gitleaks" -description: "Explore Gitleaks, an open-source tool for identifying secrets in codebases. Learn about its file type support and integration with Trunk." ---- -[**Gitleaks**](https://gitleaks.io/) is a linter for All. - -You can enable the Gitleaks linter with: - -```shell -trunk check enable gitleaks -``` - - -![gitleaks example output](/assets/gitleaks.gif) - - -## Auto Enabling - -Gitleaks will be auto-enabled if any of its config files are present: _`.gitleaks.config`, `.gitleaks.toml`, `.gitleaksignore`_. - -## Settings - -Gitleaks supports the following config files: - -* `.gitleaks.config` -* `.gitleaks.toml` -* `.gitleaksignore` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Usage Notes - -Gitleaks v7 only works with Go 1.16, not Go 1.18 while Gitleaks v8 works with 1.18. We recommend using v8, but if you specifically need to use v7 you can override the go runtime version like so: - -```yaml -runtimes: - enabled: - - go@1.16.7 -``` - -Again, this is not recommended. Just use Gitleaks v8 or later with go 1.18 or later. - -## Links - -* [Gitleaks site](https://gitleaks.io/) -* Gitleaks Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/gitleaks) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/gofmt.mdx b/code-quality/overview/linters/supported/gofmt.mdx deleted file mode 100644 index 5eff07b..0000000 --- a/code-quality/overview/linters/supported/gofmt.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "Gofmt" -description: "Gofmt simplifies Go coding by automatically formatting code to match Go's style guidelines, enhancing readability and teamwork without the manual hassle." ---- -[**Gofmt**](https://github.com/rhysd/actionlint) is a formatter for Go. - -You can enable the Gofmt formatter with: - -```shell -trunk check enable gofmt -``` - -## Auto Enabling - -Gofmt will be auto-enabled if any _Go_ files are present. - -## Links - -* [Gofmt site](https://github.com/rhysd/actionlint) -* Gofmt Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/gofmt) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/gofumpt.mdx b/code-quality/overview/linters/supported/gofumpt.mdx deleted file mode 100644 index 1f5c689..0000000 --- a/code-quality/overview/linters/supported/gofumpt.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "gofumpt" -description: "gofumpt is a linter for Go" ---- -[**gofumpt**](https://pkg.go.dev/mvdan.cc/gofumpt) is a linter for Go. - -You can enable the gofumpt linter with: - -```shell -trunk check enable gofumpt -``` - -## Auto Enabling - -gofumpt will never be auto-enabled. It must be enabled manually. - -## Links - -* [gofumpt site](https://pkg.go.dev/mvdan.cc/gofumpt) -* gofumpt Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/gofumpt) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/goimports.mdx b/code-quality/overview/linters/supported/goimports.mdx deleted file mode 100644 index 7934fb4..0000000 --- a/code-quality/overview/linters/supported/goimports.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "goimports" -description: "goimports is a linter for Go" ---- -[**goimports**](https://pkg.go.dev/golang.org/x/tools/cmd/goimports) is a linter for Go. - -You can enable the goimports linter with: - -```shell -trunk check enable goimports -``` - -## Auto Enabling - -goimports will never be auto-enabled. It must be enabled manually. - -## Links - -* [goimports site](https://pkg.go.dev/golang.org/x/tools/cmd/goimports) -* goimports Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/goimports) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/gokart.mdx b/code-quality/overview/linters/supported/gokart.mdx deleted file mode 100644 index b947fd9..0000000 --- a/code-quality/overview/linters/supported/gokart.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "gokart" -description: "gokart is a linter for Go" ---- -[**gokart**](https://github.com/praetorian-inc/gokart) is a linter for Go. - -You can enable the gokart linter with: - -```shell -trunk check enable gokart -``` - -## Auto Enabling - -gokart will be auto-enabled if a `analyzers.yml` config file is present. - -## Settings - -gokart supports the following config files: - -* `analyzers.yml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. Trunk Code Quality provides a default `analyzers.yml` if your project does not already have one. - -## Links - -* [gokart site](https://github.com/praetorian-inc/gokart) -* gokart Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/gokart) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/golangci-lint.mdx b/code-quality/overview/linters/supported/golangci-lint.mdx deleted file mode 100644 index ba85387..0000000 --- a/code-quality/overview/linters/supported/golangci-lint.mdx +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "golangci-lint" -description: "Golangci-lint is a fast Go linters runner. Learn how to install, configure, and use golangci-lint effectively for Go projects." ---- -[**golangci-lint**](https://github.com/golangci/golangci-lint) is a linter for Go. - -You can enable the golangci-lint linter with: - -```shell -trunk check enable golangci-lint -``` - -## Auto Enabling - -golangci-lint will be auto-enabled if any _Go_ files are present. - -## Settings - -golangci-lint supports the following config files: - -* `.golangci.json` -* `.golangci.toml` -* `.golangci.yaml` -* `.golangci.yml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Usage Notes - -Make sure your go version in `go.mod` matches Trunk's go runtime version. At the time of this writing, Trunk's default go runtime version is `1.21.0`. You can find out what it is via `trunk print-config`, and look for the `runtime` section, and you can override the default version in your `trunk.yaml` via: - -```yaml -runtimes: - enabled: - - go@1.21.0 -``` - -## Links - -* [golangci-lint site](https://github.com/golangci/golangci-lint) -* golangci-lint Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/golangci-lint) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/golines.mdx b/code-quality/overview/linters/supported/golines.mdx deleted file mode 100644 index 651022d..0000000 --- a/code-quality/overview/linters/supported/golines.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "golines" -description: "golines is a linter for Go" ---- -[**golines**](https://pkg.go.dev/github.com/segmentio/golines) is a linter for Go. - -You can enable the golines linter with: - -```shell -trunk check enable golines -``` - -## Auto Enabling - -golines will never be auto-enabled. It must be enabled manually. - -## Links - -* [golines site](https://pkg.go.dev/github.com/segmentio/golines) -* golines Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/golines) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/google-java-format.mdx b/code-quality/overview/linters/supported/google-java-format.mdx deleted file mode 100644 index d0b5400..0000000 --- a/code-quality/overview/linters/supported/google-java-format.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "google-java-format" -description: "google-java-format is a linter for Java" ---- -[**google-java-format**](https://github.com/google/google-java-format#readme) is a linter for Java. - -You can enable the google-java-format linter with: - -```shell -trunk check enable google-java-format -``` - -## Auto Enabling - -google-java-format will never be auto-enabled. It must be enabled manually. - -## Links - -* [google-java-format site](https://github.com/google/google-java-format#readme) -* google-java-format Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/google-java-format) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/graphql-schema-linter.mdx b/code-quality/overview/linters/supported/graphql-schema-linter.mdx deleted file mode 100644 index 67e6fd1..0000000 --- a/code-quality/overview/linters/supported/graphql-schema-linter.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "graphql-schema-linter" -description: "graphql-schema-linter is a linter for GraphQL" ---- -[**graphql-schema-linter**](https://github.com/cjoudrey/graphql-schema-linter#readme) is a linter for GraphQL. - -You can enable the graphql-schema-linter linter with: - -```shell -trunk check enable graphql-schema-linter -``` - -## Auto Enabling - -graphql-schema-linter will be auto-enabled if any of its config files are present: _`.graphql-schema-linter.config.js`, `.graphql-schema-linterrc`_. - -## Settings - -graphql-schema-linter supports the following config files: - -* `.graphql-schema-linter.config.js` -* `.graphql-schema-linterrc` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [graphql-schema-linter site](https://github.com/cjoudrey/graphql-schema-linter#readme) -* graphql-schema-linter Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/graphql-schema-linter) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/hadolint.mdx b/code-quality/overview/linters/supported/hadolint.mdx deleted file mode 100644 index 02c01d5..0000000 --- a/code-quality/overview/linters/supported/hadolint.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "hadolint" -description: "hadolint is a linter for Docker" ---- -[**hadolint**](https://github.com/hadolint/hadolint#readme) is a linter for Docker. - -You can enable the hadolint linter with: - -```shell -trunk check enable hadolint -``` - -## Auto Enabling - -hadolint will be auto-enabled if any _Docker_ files are present. - -## Settings - -hadolint supports the following config files: - -* `.hadolint.yaml` -* `.hadolint.yml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. Trunk Code Quality provides a default `.hadolint.yaml` if your project does not already have one. - -## Links - -* [hadolint site](https://github.com/hadolint/hadolint#readme) -* hadolint Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/hadolint) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/haml-lint.mdx b/code-quality/overview/linters/supported/haml-lint.mdx deleted file mode 100644 index f5430e5..0000000 --- a/code-quality/overview/linters/supported/haml-lint.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "haml-lint" -description: "haml-lint is a linter for HAML" ---- -[**haml-lint**](https://github.com/sds/haml-lint#readme) is a linter for HAML. - -You can enable the haml-lint linter with: - -```shell -trunk check enable haml-lint -``` - -## Auto Enabling - -haml-lint will be auto-enabled if any _Haml_ files are present. - -## Settings - -haml-lint supports the following config files: - -* `.haml-lint.yml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [haml-lint site](https://github.com/sds/haml-lint#readme) -* haml-lint Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/haml-lint) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/index.mdx b/code-quality/overview/linters/supported/index.mdx deleted file mode 100644 index 92ed773..0000000 --- a/code-quality/overview/linters/supported/index.mdx +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: "Supported Linters" -description: "Trunk Code Quality supports over 100 linters and formatters" ---- - -#### 📘 Our linter integrations are open-source! - -You can find them at [`trunk-io/plugins`](https://github.com/trunk-io/plugins), contributions are welcome! - - -Enable any of the following tools with: - -``` -trunk check enable -``` - -| Technology | Linters | -| --------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| All | [codespell](./codespell), [cspell](./cspell), [git-diff-check](./git-diff-check), [gitleaks](./gitleaks), [pre-commit-hooks](./pre-commit-hooks) | -| Ansible | [ansible-lint](./ansible-lint) | -| Apex | [pmd](./pmd) | -| Bash | [shellcheck](./shellcheck), [shfmt](./shfmt) | -| Bazel, Starlark | [buildifier](./buildifier) | -| C# | [dotnet-format](./dotnet-format) | -| C, C++ | [clang-format](./clang-format), [clang-tidy](./clang-tidy), [cmake-format](./cmake-format), [iwyu](./iwyu), [pragma-once](./pragma-once) | -| CircleCI Config | [circleci](./circleci) | -| CloudFormation | [cfnlint](./cfnlint), [checkov](./checkov) | -| CSS, SCSS | [prettier](./prettier), [stylelint](./stylelint) | -| Cue | [cue-fmt](./cue-fmt) | -| Dart | [dart](./dart) | -| Docker | [checkov](./checkov), [hadolint](./hadolint) | -| Dotenv | [dotenv-linter](./dotenv-linter) | -| GitHub | [actionlint](./actionlint) | -| Go | [gofmt](./gofmt), [gofumpt](./gofumpt), [goimports](./goimports), [gokart](./gokart), [golangci-lint](./golangci-lint), [golines](./golines), [semgrep](./semgrep) | -| GraphQL | [graphql-schema-linter](./graphql-schema-linter), [prettier](./prettier) | -| HAML | [haml-lint](./haml-lint) | -| HTML Templates | [djlint](./djlint) | -| Java | [google-java-format](./google-java-format), [pmd](./pmd), [semgrep](./semgrep) | -| JavaScript | [biome](./biome), [deno](./deno), [eslint](./eslint), [prettier](./prettier), [rome](./rome), [semgrep](./semgrep) | -| JSON | [deno](./deno), [eslint](./eslint), [prettier](./prettier), [semgrep](./semgrep) | -| json | [biome](./biome) | -| jsx | [biome](./biome) | -| Kotlin | [detekt](./detekt), [ktlint](./ktlint) | -| Kubernetes | [kube-linter](./kube-linter) | -| Lua | [stylua](./stylua) | -| Markdown | [deno](./deno), [markdown-link-check](./markdown-link-check), [markdown-table-prettify](./markdown-table-prettify), [markdownlint](./markdownlint), [markdownlint-cli2](./markdownlint-cli2), [prettier](./prettier), [remark-lint](./remark-lint) | -| Nix | [nixpkgs-fmt](./nixpkgs-fmt) | -| package.json | [sort-package-json](./sort-package-json) | -| Perl | [perlcritic](./perlcritic), [perltidy](./perltidy) | -| PHP | [php-cs-fixer](./php-cs-fixer), [phpstan](./phpstan) | -| PNG | [oxipng](./oxipng) | -| PowerShell | [psscriptanalyzer](./psscriptanalyzer) | -| Prisma | [prisma](./prisma) | -| prose | [vale](./vale) | -| Protobuf | [buf](./buf), [clang-format](./clang-format), [clang-tidy](./clang-tidy) | -| Python | [autopep8](./autopep8), [bandit](./bandit), [black](./black), [flake8](./flake8), [isort](./isort), [mypy](./mypy), [pylint](./pylint), [pyright](./pyright), [ruff](./ruff), [semgrep](./semgrep), [sourcery](./sourcery), [yapf](./yapf) | -| Rego | [opa](./opa), [regal](./regal) | -| Renovate | [renovate](./renovate) | -| Ruby | [brakeman](./brakeman), [rubocop](./rubocop), [rufo](./rufo), [semgrep](./semgrep), [standardrb](./standardrb) | -| Rust | [clippy](./clippy), [rustfmt](./rustfmt) | -| Scala | [scalafmt](./scalafmt) | -| Security | [checkov](./checkov), [dustilock](./dustilock), [nancy](./nancy), [osv-scanner](./osv-scanner), [terrascan](./terrascan), [tfsec](./tfsec), [trivy](./trivy), [trufflehog](./trufflehog) | -| SQL | [sql-formatter](./sql-formatter), [sqlfluff](./sqlfluff), [sqlfmt](./sqlfmt), [squawk](./squawk) | -| SVG | [svgo](./svgo) | -| Swift | [stringslint](./stringslint), [swiftformat](./swiftformat), [swiftlint](./swiftlint) | -| Terraform | [checkov](./checkov), [terraform](./terraform), [terrascan](./terrascan), [tflint](./tflint), [tfsec](./tfsec), [tofu](./tofu) | -| Terragrunt | [terragrunt](./terragrunt) | -| Terrascan | [terrascan](./terrascan) | -| Textproto | [txtpbfmt](./txtpbfmt) | -| TOML | [taplo](./taplo) | -| TypeScript | [biome](./biome), [deno](./deno), [eslint](./eslint), [prettier](./prettier), [rome](./rome), [semgrep](./semgrep) | -| YAML | [prettier](./prettier), [semgrep](./semgrep), [yamllint](./yamllint) | - - -#### Can't find a linter you need? - -Chat with Trunk's engineers and suggest your ideas. Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/). - - diff --git a/code-quality/overview/linters/supported/isort.mdx b/code-quality/overview/linters/supported/isort.mdx deleted file mode 100644 index adfa5d8..0000000 --- a/code-quality/overview/linters/supported/isort.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: "isort" -description: "isort is a Python utility for sorting imports alphabetically and automatically separating them into sections and by type." ---- -[**isort**](https://pycqa.github.io/isort/) is a formatter for Python. - -You can enable the isort formatter with: - -```shell -trunk check enable isort -``` - - -![isort example output](/assets/isort.gif) - - -## Auto Enabling - -isort will be auto-enabled if any _Python_ files are present. - -## Settings - -isort supports the following config files: - -* `.isort.cfg` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. Trunk Code Quality provides a default `.isort.cfg` if your project does not already have one. - -## Links - -* [isort site](https://pycqa.github.io/isort/) -* isort Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/isort) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/iwyu.mdx b/code-quality/overview/linters/supported/iwyu.mdx deleted file mode 100644 index c124d8c..0000000 --- a/code-quality/overview/linters/supported/iwyu.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "iwyu" -description: "iwyu is a linter for C, C++" ---- -[**iwyu**](https://github.com/include-what-you-use/include-what-you-use#readme) is a linter for C, C++. - -You can enable the iwyu linter with: - -```shell -trunk check enable iwyu -``` - -## Auto Enabling - -iwyu will never be auto-enabled. It must be enabled manually. - -## Links - -* [iwyu site](https://github.com/include-what-you-use/include-what-you-use#readme) -* iwyu Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/iwyu) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/ktlint.mdx b/code-quality/overview/linters/supported/ktlint.mdx deleted file mode 100644 index 2f804a5..0000000 --- a/code-quality/overview/linters/supported/ktlint.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "ktlint" -description: "ktlint is a linter for Kotlin" ---- -[**ktlint**](https://github.com/pinterest/ktlint#readme) is a linter for Kotlin. - -You can enable the ktlint linter with: - -```shell -trunk check enable ktlint -``` - -## Auto Enabling - -ktlint will be auto-enabled if any _Kotlin_ files are present. - -## Links - -* [ktlint site](https://github.com/pinterest/ktlint#readme) -* ktlint Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/ktlint) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/kube-linter.mdx b/code-quality/overview/linters/supported/kube-linter.mdx deleted file mode 100644 index 31de63a..0000000 --- a/code-quality/overview/linters/supported/kube-linter.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "kube-linter" -description: "kube-linter is a linter for Kubernetes" ---- -[**kube-linter**](https://github.com/stackrox/kube-linter#readme) is a linter for Kubernetes. - -You can enable the kube-linter linter with: - -```shell -trunk check enable kube-linter -``` - -## Auto Enabling - -kube-linter will never be auto-enabled. It must be enabled manually. - -## Links - -* [kube-linter site](https://github.com/stackrox/kube-linter#readme) -* kube-linter Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/kube-linter) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/markdown-link-check.mdx b/code-quality/overview/linters/supported/markdown-link-check.mdx deleted file mode 100644 index 4af1212..0000000 --- a/code-quality/overview/linters/supported/markdown-link-check.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "markdown-link-check" -description: "markdown-link-check is a linter for Markdown" ---- -[**markdown-link-check**](https://github.com/tcort/markdown-link-check#readme) is a linter for Markdown. - -You can enable the markdown-link-check linter with: - -```shell -trunk check enable markdown-link-check -``` - -## Auto Enabling - -markdown-link-check will never be auto-enabled. It must be enabled manually. - -## Links - -* [markdown-link-check site](https://github.com/tcort/markdown-link-check#readme) -* markdown-link-check Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/markdown-link-check) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/markdown-table-prettify.mdx b/code-quality/overview/linters/supported/markdown-table-prettify.mdx deleted file mode 100644 index adbd96f..0000000 --- a/code-quality/overview/linters/supported/markdown-table-prettify.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "markdown-table-prettify" -description: "markdown-table-prettify is a linter for Markdown" ---- -[**markdown-table-prettify**](https://github.com/darkriszty/MarkdownTablePrettify-VSCodeExt#readme) is a linter for Markdown. - -You can enable the markdown-table-prettify linter with: - -```shell -trunk check enable markdown-table-prettify -``` - -## Auto Enabling - -markdown-table-prettify will never be auto-enabled. It must be enabled manually. - -## Links - -* [markdown-table-prettify site](https://github.com/darkriszty/MarkdownTablePrettify-VSCodeExt#readme) -* markdown-table-prettify Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/markdown-table-prettify) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/markdownlint-cli2.mdx b/code-quality/overview/linters/supported/markdownlint-cli2.mdx deleted file mode 100644 index 176d425..0000000 --- a/code-quality/overview/linters/supported/markdownlint-cli2.mdx +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: "markdownlint-cli2" -description: "markdownlint-cli2 is a linter for Markdown" ---- -[**markdownlint-cli2**](https://github.com/DavidAnson/markdownlint-cli2) is a linter for Markdown. - -You can enable the markdownlint-cli2 linter with: - -```shell -trunk check enable markdownlint-cli2 -``` - -## Auto Enabling - -markdownlint-cli2 will be auto-enabled if any of its config files are present: _`.markdownlint-cli2.jsonc`, `.markdownlint-cli2.yaml`, `.markdownlint-cli2.cjs`_. - -## Settings - -markdownlint-cli2 supports the following config files: - -* `.markdownlint-cli2.jsonc` -* `.markdownlint-cli2.yaml` -* `.markdownlint-cli2.cjs` -* `.markdownlint-cli2.mjs` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [markdownlint-cli2 site](https://github.com/DavidAnson/markdownlint-cli2) -* markdownlint-cli2 Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/markdownlint-cli2) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/markdownlint.mdx b/code-quality/overview/linters/supported/markdownlint.mdx deleted file mode 100644 index 27df7ac..0000000 --- a/code-quality/overview/linters/supported/markdownlint.mdx +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: "Markdownlint" -description: "Markdownlint is a tool designed to enforce consistency for Markdown files. It can include checks for headings, lists, line length, and syntax preferences." ---- -[**Markdownlint**](https://github.com/DavidAnson/markdownlint) is a linter for Markdown. - -You can enable the Markdownlint linter with: - -```shell -trunk check enable markdownlint -``` - -## Auto Enabling - -Markdownlint will be auto-enabled if any _Markdown_ files are present. - -## Settings - -Markdownlint supports the following config files: - -* `.markdownlint.json` -* `.markdownlint.yaml` -* `.markdownlint.yml` -* `.markdownlintrc` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. Trunk Code Quality provides a default `.markdownlint.yaml` if your project does not already have one. - -## Usage Notes - -Older versions of `markdownlint` had a bug where it printed plaintext output even when run with `--json`. We rely on JSON output so we can parse and ingest the results from markdownlint. The package we use for markdownlint is actually [markdownlint-cli ](https://www.npmjs.com/package/markdownlint-cli)`>= 0.29.0` is verified to work. - -## Links - -* [Markdownlint site](https://github.com/DavidAnson/markdownlint) -* Markdownlint Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/markdownlint) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/mypy.mdx b/code-quality/overview/linters/supported/mypy.mdx deleted file mode 100644 index 8210488..0000000 --- a/code-quality/overview/linters/supported/mypy.mdx +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: "mypy" -description: "mypy is a linter for Python" ---- -[**mypy**](https://github.com/python/mypy#readme) is a linter for Python. - -You can enable the mypy linter with: - -```shell -trunk check enable mypy -``` - - -![mypy example output](/assets/mypy.gif) - - -## Auto Enabling - -mypy will be auto-enabled if any of its config files are present: _`mypy.ini`, `.mypy.ini`_. - -## Settings - -mypy supports the following config files: - -* `mypy.ini` -* `.mypy.ini` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [mypy site](https://github.com/python/mypy#readme) -* mypy Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/mypy) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/nancy.mdx b/code-quality/overview/linters/supported/nancy.mdx deleted file mode 100644 index 10b2de1..0000000 --- a/code-quality/overview/linters/supported/nancy.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "nancy" -description: "nancy is a linter for Security" ---- -[**nancy**](https://github.com/sonatype-nexus-community/nancy#readme) is a linter for Security. - -You can enable the nancy linter with: - -```shell -trunk check enable nancy -``` - -## Auto Enabling - -nancy will never be auto-enabled. It must be enabled manually. - -## Links - -* [nancy site](https://github.com/sonatype-nexus-community/nancy#readme) -* nancy Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/nancy) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/nixpkgs-fmt.mdx b/code-quality/overview/linters/supported/nixpkgs-fmt.mdx deleted file mode 100644 index 0731e0f..0000000 --- a/code-quality/overview/linters/supported/nixpkgs-fmt.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "nixpkgs-fmt" -description: "nixpkgs-fmt is a linter for Nix" ---- -[**nixpkgs-fmt**](https://github.com/nix-community/nixpkgs-fmt) is a linter for Nix. - -You can enable the nixpkgs-fmt linter with: - -```shell -trunk check enable nixpkgs-fmt -``` - -## Auto Enabling - -nixpkgs-fmt will be auto-enabled if any _Nix_ files are present. - -## Links - -* [nixpkgs-fmt site](https://github.com/nix-community/nixpkgs-fmt) -* nixpkgs-fmt Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/nixpkgs-fmt) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/opa.mdx b/code-quality/overview/linters/supported/opa.mdx deleted file mode 100644 index 2e1b3ca..0000000 --- a/code-quality/overview/linters/supported/opa.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "opa" -description: "opa is a linter for Rego" ---- -[**opa**](https://www.openpolicyagent.org/docs/latest/cli/#opa-fmt) is a linter for Rego. - -You can enable the opa linter with: - -```shell -trunk check enable opa -``` - -## Auto Enabling - -opa will never be auto-enabled. It must be enabled manually. - -## Links - -* [opa site](https://www.openpolicyagent.org/docs/latest/cli/#opa-fmt) -* opa Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/opa) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/osv-scanner.mdx b/code-quality/overview/linters/supported/osv-scanner.mdx deleted file mode 100644 index f99c778..0000000 --- a/code-quality/overview/linters/supported/osv-scanner.mdx +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "OSV-Scanner" -description: "OSV-Scanner is an open-source tool created by Google to detect vulnerabilities in projects by scanning dependencies against the OSV database." ---- -[**OSV-Scanner**](https://github.com/google/osv-scanner) is a linter for Security. - -You can enable the OSV-Scanner linter with: - -```shell -trunk check enable osv-scanner -``` - -## Auto Enabling - -OSV-Scanner will be auto-enabled if any _Lockfile_ files are present. - -## Settings - -OSV-Scanner supports the following config files: - -* `osv-scanner.toml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - - -Moving `osv-scanner.toml` to `.trunk/configs` can cause issues because `osv-scanner.toml` is only applied to projects in the root folder by default. This can cause issues with any projects in subfolders, such as in a multi-module repository. - - -To properly configure OSV scanner if you decide to move its config file, you can specify the path to `osv-scanner.toml` using the `--config` flag.\ -\ -Example override to add to `trunk.yaml` : - -```yaml -commands: - - name: scan - run: | - osv-scanner \ - --lockfile=${target} \ - --format json \ - --config=.trunk/configs/osv-scanner.toml -``` - -## Links - -* [OSV-Scanner site](https://github.com/google/osv-scanner) -* [OSV-Scanner Configuration](https://google.github.io/osv-scanner/configuration/) -* OSV-Scanner Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/osv-scanner) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/oxipng.mdx b/code-quality/overview/linters/supported/oxipng.mdx deleted file mode 100644 index 4fb4d07..0000000 --- a/code-quality/overview/linters/supported/oxipng.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "Oxipng" -description: "Oxipng is an open-source, CLI utility designed for optimizing PNG files. It applies lossless compression techniques to reduce file size." ---- -[**Oxipng**](https://github.com/shssoichiro/oxipng) is a formatter for PNG. - -You can enable the Oxipng formatter with: - -```shell -trunk check enable oxipng -``` - -## Auto Enabling - -Oxipng will be auto-enabled if any _PNG_ files are present. - -## Links - -* [Oxipng site](https://github.com/shssoichiro/oxipng) -* Oxipng Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/oxipng) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/perlcritic.mdx b/code-quality/overview/linters/supported/perlcritic.mdx deleted file mode 100644 index 0fc09a5..0000000 --- a/code-quality/overview/linters/supported/perlcritic.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "perlcritic" -description: "perlcritic is a linter for Perl" ---- -[**perlcritic**](https://metacpan.org/pod/Perl::Critic) is a linter for Perl. - -You can enable the perlcritic linter with: - -```shell -trunk check enable perlcritic -``` - -## Auto Enabling - -perlcritic will be auto-enabled if a `.perlcriticrc` config file is present. - -## Settings - -perlcritic supports the following config files: - -* `.perlcriticrc` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. Trunk Code Quality provides a default `.perlcriticrc` if your project does not already have one. - -## Links - -* [perlcritic site](https://metacpan.org/pod/Perl::Critic) -* perlcritic Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/perlcritic) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/perltidy.mdx b/code-quality/overview/linters/supported/perltidy.mdx deleted file mode 100644 index 04d19c6..0000000 --- a/code-quality/overview/linters/supported/perltidy.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "perltidy" -description: "perltidy is a linter for Perl" ---- -[**perltidy**](https://metacpan.org/dist/Perl-Tidy/view/bin/perltidy) is a linter for Perl. - -You can enable the perltidy linter with: - -```shell -trunk check enable perltidy -``` - -## Auto Enabling - -perltidy will be auto-enabled if a `.perltidyrc` config file is present. - -## Settings - -perltidy supports the following config files: - -* `.perltidyrc` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. Trunk Code Quality provides a default `.perltidyrc` if your project does not already have one. - -## Links - -* [perltidy site](https://metacpan.org/dist/Perl-Tidy/view/bin/perltidy) -* perltidy Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/perltidy) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/php-cs-fixer.mdx b/code-quality/overview/linters/supported/php-cs-fixer.mdx deleted file mode 100644 index b7a8c5c..0000000 --- a/code-quality/overview/linters/supported/php-cs-fixer.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "php-cs-fixer" -description: "php-cs-fixer is a linter for PHP" ---- -[**php-cs-fixer**](https://github.com/PHP-CS-Fixer/PHP-CS-Fixer) is a linter for PHP. - -You can enable the php-cs-fixer linter with: - -```shell -trunk check enable php-cs-fixer -``` - -## Auto Enabling - -php-cs-fixer will be auto-enabled if a `.php-cs-fixer.dist.php` config file is present. - -## Settings - -php-cs-fixer supports the following config files: - -* `.php-cs-fixer.dist.php` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [php-cs-fixer site](https://github.com/PHP-CS-Fixer/PHP-CS-Fixer) -* php-cs-fixer Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/php-cs-fixer) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/phpstan.mdx b/code-quality/overview/linters/supported/phpstan.mdx deleted file mode 100644 index 1a00b50..0000000 --- a/code-quality/overview/linters/supported/phpstan.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: "phpstan" -description: "phpstan is a linter for PHP" ---- -[**phpstan**](https://phpstan.org/) is a linter for PHP. - -You can enable the phpstan linter with: - -```shell -trunk check enable phpstan -``` - -## Auto Enabling - -phpstan will never be auto-enabled. It must be enabled manually. - -## Settings - -phpstan supports the following config files: - -* `phpstan.neon` -* `phpstan.neon.dist` -* `phpstan.dist.neon` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [phpstan site](https://phpstan.org/) -* phpstan Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/phpstan) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/pmd.mdx b/code-quality/overview/linters/supported/pmd.mdx deleted file mode 100644 index 2a2b861..0000000 --- a/code-quality/overview/linters/supported/pmd.mdx +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: "pmd" -description: "pmd is a linter for Apex and Java" ---- -[**pmd**](https://pmd.github.io/) is a linter for Apex and Java. - -You can enable the pmd linter with: - -```shell -trunk check enable pmd -``` - - -![pmd example output](/assets/pmd.gif) - - -## Auto Enabling - -pmd will never be auto-enabled. It must be enabled manually. - -## Links - -* [pmd site](https://pmd.github.io/) -* pmd Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/pmd) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/pragma-once.mdx b/code-quality/overview/linters/supported/pragma-once.mdx deleted file mode 100644 index 6edbdd7..0000000 --- a/code-quality/overview/linters/supported/pragma-once.mdx +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: "pragma-once" -description: "pragma-once is a linter for C, C++" ---- -[**pragma-once**](https://github.com/trunk-io/plugins/blob/main/linters/pragma-once/README.md) is a linter for C, C++. - -You can enable the pragma-once linter with: - -```shell -trunk check enable pragma-once -``` - - -![pragma-once example output](/assets/pragma-once.gif) - - -## Auto Enabling - -pragma-once will never be auto-enabled. It must be enabled manually. - -## Links - -* [pragma-once site](https://github.com/trunk-io/plugins/blob/main/linters/pragma-once/README.md) -* pragma-once Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/pragma-once) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/pre-commit-hooks.mdx b/code-quality/overview/linters/supported/pre-commit-hooks.mdx deleted file mode 100644 index 7046f47..0000000 --- a/code-quality/overview/linters/supported/pre-commit-hooks.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "pre-commit-hooks" -description: "pre-commit-hooks is a linter for All" ---- -[**pre-commit-hooks**](https://pre-commit.com/hooks.html) is a linter for All. - -You can enable the pre-commit-hooks linter with: - -```shell -trunk check enable pre-commit-hooks -``` - -## Auto Enabling - -pre-commit-hooks will never be auto-enabled. It must be enabled manually. - -## Links - -* [pre-commit-hooks site](https://pre-commit.com/hooks.html) -* pre-commit-hooks Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/pre-commit-hooks) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/prettier.mdx b/code-quality/overview/linters/supported/prettier.mdx deleted file mode 100644 index 4010c67..0000000 --- a/code-quality/overview/linters/supported/prettier.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: "Prettier" -description: "Explore Prettier, the powerful code formatter. Learn how to install, configure, and effectively use Prettier to enhance your coding workflow." ---- -[**Prettier**](https://prettier.io/) is a formatter for CSS, SCSS, JavaScript, JSON, Markdown, TypeScript, GraphQL and YAML. - -You can enable the Prettier formatter with: - -```shell -trunk check enable prettier -``` - - -![prettier example output](/assets/prettier.gif) - - -## Auto Enabling - -Prettier will be auto-enabled if any _TypeScript, YAML, CSS, PostCSS, Sass, HTML, Markdown, JSON, JavaScript, GraphQL or Prettier\_supported\_configs_ files are present. - -## Settings - -Prettier supports the following config files: - -* `.prettierrc` -* `.prettierrc.json` -* `.prettierrc.yml` -* `.prettierrc.yaml` -* `.prettierrc.json5` -* `.prettierrc.js` -* `.prettierrc.cjs` -* `.prettierrc.mjs` -* `prettier.config.js` -* `prettier.config.cjs` -* `prettier.config.mjs` -* `.prettierrc.toml` -* `.prettierignore` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Usage Notes - -By default, Trunk uses Prettier to autoformat many languages/config formats, including markdown. To line wrap within markdown, you need to set the following in your [Prettier config](https://prettier.io/docs/en/configuration.html) `.prettierrc.yaml`, etc. - -```yaml -proseWrap: always -``` - -You may also want to configure `printWidth` to your liking. - -## Links - -* [Prettier site](https://prettier.io/) -* Prettier Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/prettier) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/prisma.mdx b/code-quality/overview/linters/supported/prisma.mdx deleted file mode 100644 index 401bab0..0000000 --- a/code-quality/overview/linters/supported/prisma.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "prisma" -description: "prisma is a linter for Prisma" ---- -[**prisma**](https://github.com/prisma/prisma#readme) is a linter for Prisma. - -You can enable the prisma linter with: - -```shell -trunk check enable prisma -``` - -## Auto Enabling - -prisma will never be auto-enabled. It must be enabled manually. - -## Links - -* [prisma site](https://github.com/prisma/prisma#readme) -* prisma Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/prisma) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/psscriptanalyzer.mdx b/code-quality/overview/linters/supported/psscriptanalyzer.mdx deleted file mode 100644 index 6276afb..0000000 --- a/code-quality/overview/linters/supported/psscriptanalyzer.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "psscriptanalyzer" -description: "psscriptanalyzer is a linter for PowerShell" ---- -[**psscriptanalyzer**](https://github.com/PowerShell/PSScriptAnalyzer) is a linter for PowerShell. - -You can enable the psscriptanalyzer linter with: - -```shell -trunk check enable psscriptanalyzer -``` - -## Auto Enabling - -psscriptanalyzer will be auto-enabled if a `PSScriptAnalyzerSettings.psd1` config file is present. - -## Settings - -psscriptanalyzer supports the following config files: - -* `PSScriptAnalyzerSettings.psd1` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [psscriptanalyzer site](https://github.com/PowerShell/PSScriptAnalyzer) -* psscriptanalyzer Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/psscriptanalyzer) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/pylint.mdx b/code-quality/overview/linters/supported/pylint.mdx deleted file mode 100644 index 803ab85..0000000 --- a/code-quality/overview/linters/supported/pylint.mdx +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: "Pylint" -description: "Learn about Pylint, the versatile Python linter for error detection, code smell elimination, and PEP 8 enforcement." ---- -[**Pylint**](https://pypi.org/project/pylint/) is a linter for Python. - -You can enable the Pylint linter with: - -```shell -trunk check enable pylint -``` - - -![pylint example output](/assets/pylint.gif) - - -## Auto Enabling - -Pylint will be auto-enabled if any of its config files are present: _`pylintrc`, `.pylintrc`_. - -## Settings - -Pylint supports the following config files: - -* `pylintrc` -* `.pylintrc` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Usage Notes - -You may specify additional pylint plugins in your `.pylintrc`, using the line `load-plugins=...` - -If you want to run the plugin `pylint-django` as part of your setup, you would add the line `load-plugins=pylint_django` to your `.pylintrc`, but you **also** need to tell trunk to install the package: - -```yaml -- pylint@2.11.0: - packages: - - pylint-django@2.4.4 -``` - -## Links - -* [Pylint site](https://pypi.org/project/pylint/) -* Pylint Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/pylint) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/pyright.mdx b/code-quality/overview/linters/supported/pyright.mdx deleted file mode 100644 index a645e3c..0000000 --- a/code-quality/overview/linters/supported/pyright.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "pyright" -description: "pyright is a linter for Python" ---- -[**pyright**](https://github.com/microsoft/pyright) is a linter for Python. - -You can enable the pyright linter with: - -```shell -trunk check enable pyright -``` - -## Auto Enabling - -pyright will be auto-enabled if a `pyrightconfig.json` config file is present. - -## Settings - -pyright supports the following config files: - -* `pyrightconfig.json` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [pyright site](https://github.com/microsoft/pyright) -* pyright Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/pyright) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/regal.mdx b/code-quality/overview/linters/supported/regal.mdx deleted file mode 100644 index 95a75c9..0000000 --- a/code-quality/overview/linters/supported/regal.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "regal" -description: "regal is a linter for Rego" ---- -[**regal**](https://github.com/StyraInc/regal) is a linter for Rego. - -You can enable the regal linter with: - -```shell -trunk check enable regal -``` - -## Auto Enabling - -regal will be auto-enabled if a `.regal/config.yaml` config file is present. - -## Settings - -regal supports the following config files: - -* `.regal/config.yaml` - -Unlike with most tools under `trunk check`, these files cannot be moved. - -## Links - -* [regal site](https://github.com/StyraInc/regal) -* regal Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/regal) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/remark-lint.mdx b/code-quality/overview/linters/supported/remark-lint.mdx deleted file mode 100644 index b48e07c..0000000 --- a/code-quality/overview/linters/supported/remark-lint.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: "remark-lint" -description: "remark-lint is a linter for Markdown" ---- -[**remark-lint**](https://github.com/remarkjs/remark-lint#readme) is a linter for Markdown. - -You can enable the remark-lint linter with: - -```shell -trunk check enable remark-lint -``` - -## Auto Enabling - -remark-lint will be auto-enabled if any of its config files are present: _`.remarkrc`, `.remarkrc.json`, `.remarkrc.cjs`_. - -## Settings - -remark-lint supports the following config files: - -* `.remarkrc` -* `.remarkrc.json` -* `.remarkrc.cjs` -* `.remarkrc.mjs` -* `.remarkrc.js` -* `.remarkrc.yaml` -* `.remarkrc.yml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. Trunk Code Quality provides a default `.remarkrc.yaml` if your project does not already have one. - -## Links - -* [remark-lint site](https://github.com/remarkjs/remark-lint#readme) -* remark-lint Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/remark-lint) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/renovate.mdx b/code-quality/overview/linters/supported/renovate.mdx deleted file mode 100644 index 3b41d38..0000000 --- a/code-quality/overview/linters/supported/renovate.mdx +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: "renovate" -description: "renovate is a linter for Renovate" ---- -[**renovate**](https://github.com/renovatebot/renovate#readme) is a linter for Renovate. - -You can enable the renovate linter with: - -```shell -trunk check enable renovate -``` - -## Auto Enabling - -renovate will be auto-enabled if any of its config files are present: _`renovate.json`, `renovate.json5`, `.github/renovate.json`_. - -## Settings - -renovate supports the following config files: - -* `renovate.json` -* `renovate.json5` -* `.github/renovate.json` -* `.github/renovate.json5` -* `.gitlab/renovate.json` -* `.gitlab/renovate.json5` -* `.renovaterc` -* `.renovaterc.json` -* `.renovaterc.json5` - -Unlike with most tools under `trunk check`, these files cannot be moved. - -## Links - -* [renovate site](https://github.com/renovatebot/renovate#readme) -* renovate Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/renovate) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/rome.mdx b/code-quality/overview/linters/supported/rome.mdx deleted file mode 100644 index 5a5f913..0000000 --- a/code-quality/overview/linters/supported/rome.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "rome" -description: "rome is a linter for JavaScript and TypeScript" ---- -[**rome**](https://github.com/rome/tools#readme) is a linter for JavaScript and TypeScript. - -You can enable the rome linter with: - -```shell -trunk check enable rome -``` - -## Auto Enabling - -rome will never be auto-enabled. It must be enabled manually. - -## Settings - -rome supports the following config files: - -* `rome.json` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [rome site](https://github.com/rome/tools#readme) -* rome Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/rome) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/rubocop.mdx b/code-quality/overview/linters/supported/rubocop.mdx deleted file mode 100644 index 6f15e2b..0000000 --- a/code-quality/overview/linters/supported/rubocop.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "rubocop" -description: "rubocop is a linter for Ruby" ---- -[**rubocop**](https://github.com/rubocop/rubocop#readme) is a linter for Ruby. - -You can enable the rubocop linter with: - -```shell -trunk check enable rubocop -``` - -## Auto Enabling - -rubocop will be auto-enabled if a `.rubocop.yml` config file is present. - -## Settings - -rubocop supports the following config files: - -* `.rubocop.yml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [rubocop site](https://github.com/rubocop/rubocop#readme) -* rubocop Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/rubocop) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/ruff.mdx b/code-quality/overview/linters/supported/ruff.mdx deleted file mode 100644 index 55e0938..0000000 --- a/code-quality/overview/linters/supported/ruff.mdx +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: "Ruff" -description: "Discover Ruff, a speedy Python linter for large codebases. Integrates with CI/IDEs and supports .py, .pyi, and Jupyter Notebooks." ---- -[**Ruff**](https://github.com/astral-sh/ruff) is a linter for Python. - -ruff is composed of several linter commands. - -`ruff` is for formatting general python code. - -You can enable the `ruff` linter with: - -```shell -trunk check enable ruff -``` - -`ruff-nbqa` is for extra support for Jupyter notebooks. - -You can enable the `ruff-nbqa` linter with: - -```shell -trunk check enable ruff-nbqa -``` - -## Auto Enabling - -Ruff will be auto-enabled if any _Python, Python-interface, Jupyter, Python, Python-interface, Python, Python-interface, Python, Python-interface, Python or Python-interface_ files are present. - -## Settings - -Ruff supports the following config files: - -* `ruff.toml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. Trunk Code Quality provides a default `ruff.toml` if your project does not already have one. - -## Links - -* [Ruff site](https://github.com/astral-sh/ruff) -* Ruff Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/ruff) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/rufo.mdx b/code-quality/overview/linters/supported/rufo.mdx deleted file mode 100644 index b381efa..0000000 --- a/code-quality/overview/linters/supported/rufo.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "rufo" -description: "rufo is a linter for Ruby" ---- -[**rufo**](https://github.com/ruby-formatter/rufo#readme) is a linter for Ruby. - -You can enable the rufo linter with: - -```shell -trunk check enable rufo -``` - -## Auto Enabling - -rufo will be auto-enabled if a `.rufo` config file is present. - -## Settings - -rufo supports the following config files: - -* `.rufo` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [rufo site](https://github.com/ruby-formatter/rufo#readme) -* rufo Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/rufo) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/rustfmt.mdx b/code-quality/overview/linters/supported/rustfmt.mdx deleted file mode 100644 index baa78a4..0000000 --- a/code-quality/overview/linters/supported/rustfmt.mdx +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: "rustfmt" -description: "Rustfmt is a code formatting tool for Rust that helps ensure your code adheres to the community-driven coding standards and style guidelines." ---- -[**rustfmt**](https://github.com/rust-lang/rustfmt) is a formatter for Rust. - -You can enable the rustfmt formatter with: - -```shell -trunk check enable rustfmt -``` - -## Auto Enabling - -rustfmt will be auto-enabled if any _Rust_ files are present. - -## Settings - -rustfmt supports the following config files: - -* `rustfmt.toml` -* `.rustfmt.toml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. Trunk Code Quality provides a default `.rustfmt.toml` if your project does not already have one. - -## Usage Notes - -We currently use the version of `rustfmt` packaged with rust, so for `rustfmt` version, specify your Rust version (for example `rustfmt@1.61.0`). - -If you have `edition` in your `cargo.toml`, `rustfmt` also needs the same information in `.rustfmt.toml` in your repo root. For example, your `.rustfmt.toml` might contain: - -```toml -edition = "2021" -``` - -## Links - -* [rustfmt site](https://github.com/rust-lang/rustfmt) -* rustfmt Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/rustfmt) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/scalafmt.mdx b/code-quality/overview/linters/supported/scalafmt.mdx deleted file mode 100644 index bd3c557..0000000 --- a/code-quality/overview/linters/supported/scalafmt.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "scalafmt" -description: "scalafmt is a linter for Scala" ---- -[**scalafmt**](https://github.com/scalameta/scalafmt#readme) is a linter for Scala. - -You can enable the scalafmt linter with: - -```shell -trunk check enable scalafmt -``` - -## Auto Enabling - -scalafmt will be auto-enabled if a `.scalafmt.conf` config file is present. - -## Settings - -scalafmt supports the following config files: - -* `.scalafmt.conf` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [scalafmt site](https://github.com/scalameta/scalafmt#readme) -* scalafmt Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/scalafmt) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/semgrep.mdx b/code-quality/overview/linters/supported/semgrep.mdx deleted file mode 100644 index b9e0ad9..0000000 --- a/code-quality/overview/linters/supported/semgrep.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "semgrep" -description: "semgrep is a linter for Go, Java, JavaScript, JSON, Python, Ruby, TypeScript and YAML" ---- -[**semgrep**](https://github.com/returntocorp/semgrep#readme) is a linter for Go, Java, JavaScript, JSON, Python, Ruby, TypeScript and YAML. - -You can enable the semgrep linter with: - -```shell -trunk check enable semgrep -``` - -## Auto Enabling - -semgrep will be auto-enabled if any of its config files are present: _`.semgrep.yaml`, `.semgrep.yml`_. - -## Settings - -semgrep supports the following config files: - -* `.semgrep.yaml` -* `.semgrep.yml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [semgrep site](https://github.com/returntocorp/semgrep#readme) -* semgrep Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/semgrep) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/shellcheck.mdx b/code-quality/overview/linters/supported/shellcheck.mdx deleted file mode 100644 index 70ffdb5..0000000 --- a/code-quality/overview/linters/supported/shellcheck.mdx +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: "ShellCheck" -description: "ShellCheck is a static analysis tool designed to identify and report syntax errors and potential issues in shell scripts" ---- -[**ShellCheck**](https://www.shellcheck.net/) is a linter for Bash. - -You can enable the ShellCheck linter with: - -```shell -trunk check enable shellcheck -``` - - -![shellcheck example output](/assets/shellcheck.gif) - - -## Auto Enabling - -ShellCheck will be auto-enabled if any _Shell_ files are present. - -## Settings - -ShellCheck supports the following config files: - -* `.shellcheckrc` -* `shellcheckrc` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. Trunk Code Quality provides a default `.shellcheckrc` if your project does not already have one. - -## Links - -* [ShellCheck site](https://www.shellcheck.net/) -* ShellCheck Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/shellcheck) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/shfmt.mdx b/code-quality/overview/linters/supported/shfmt.mdx deleted file mode 100644 index fc8e171..0000000 --- a/code-quality/overview/linters/supported/shfmt.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "shfmt" -description: "shfmt is a linter for Bash" ---- -[**shfmt**](https://github.com/mvdan/sh#readme) is a linter for Bash. - -You can enable the shfmt linter with: - -```shell -trunk check enable shfmt -``` - -## Auto Enabling - -shfmt will be auto-enabled if any _Shell_ files are present. - -## Links - -* [shfmt site](https://github.com/mvdan/sh#readme) -* shfmt Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/shfmt) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/sort-package-json.mdx b/code-quality/overview/linters/supported/sort-package-json.mdx deleted file mode 100644 index c1bee65..0000000 --- a/code-quality/overview/linters/supported/sort-package-json.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "sort-package-json" -description: "sort-package-json is a linter for package.json" ---- -[**sort-package-json**](https://github.com/keithamus/sort-package-json#readme) is a linter for package.json. - -You can enable the sort-package-json linter with: - -```shell -trunk check enable sort-package-json -``` - -## Auto Enabling - -sort-package-json will never be auto-enabled. It must be enabled manually. - -## Links - -* [sort-package-json site](https://github.com/keithamus/sort-package-json#readme) -* sort-package-json Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/sort-package-json) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/sourcery.mdx b/code-quality/overview/linters/supported/sourcery.mdx deleted file mode 100644 index 41b0bca..0000000 --- a/code-quality/overview/linters/supported/sourcery.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "sourcery" -description: "sourcery is a linter for Python" ---- -[**sourcery**](https://sourcery.ai/) is a linter for Python. - -You can enable the sourcery linter with: - -```shell -trunk check enable sourcery -``` - -## Auto Enabling - -sourcery will never be auto-enabled. It must be enabled manually. - -## Settings - -sourcery supports the following config files: - -* `.sourcery.yaml` -* `sourcery.yaml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [sourcery site](https://sourcery.ai/) -* sourcery Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/sourcery) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/sql-formatter.mdx b/code-quality/overview/linters/supported/sql-formatter.mdx deleted file mode 100644 index d8010de..0000000 --- a/code-quality/overview/linters/supported/sql-formatter.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "sql-formatter" -description: "sql-formatter is a linter for SQL" ---- -[**sql-formatter**](https://github.com/sql-formatter-org/sql-formatter#readme) is a linter for SQL. - -You can enable the sql-formatter linter with: - -```shell -trunk check enable sql-formatter -``` - -## Auto Enabling - -sql-formatter will never be auto-enabled. It must be enabled manually. - -## Links - -* [sql-formatter site](https://github.com/sql-formatter-org/sql-formatter#readme) -* sql-formatter Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/sql-formatter) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/sqlfluff.mdx b/code-quality/overview/linters/supported/sqlfluff.mdx deleted file mode 100644 index bdb0d76..0000000 --- a/code-quality/overview/linters/supported/sqlfluff.mdx +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: "SQLFluff" -description: "SQLFluff is a dialect-flexible and configurable SQL linter." ---- -[**SQLFluff**](https://github.com/sqlfluff/sqlfluff) is a linter for SQL. - -You can enable the SQLFluff linter with: - -```shell -trunk check enable sqlfluff -``` - -## Auto Enabling - -SQLFluff will be auto-enabled if a `.sqlfluff` config file is present. - -## Settings - -SQLFluff supports the following config files: - -* `.sqlfluff` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. Trunk Code Quality provides a default `.sqlfluff` if your project does not already have one. - -## Usage Notes - -Sqlfluff is only configured as a linter by default because its formatting capabilities are limited. To turn sqlfluff formatting on, enable its subcommand: - -```yaml -lint: - enabled: - - sqlfluff@: - commands: [lint, fix] -``` - -## Links - -* [SQLFluff site](https://github.com/sqlfluff/sqlfluff) -* SQLFluff Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/sqlfluff) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/sqlfmt.mdx b/code-quality/overview/linters/supported/sqlfmt.mdx deleted file mode 100644 index b0ba481..0000000 --- a/code-quality/overview/linters/supported/sqlfmt.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "sqlfmt" -description: "sqlfmt is a linter for SQL" ---- -[**sqlfmt**](https://github.com/tconbeer/sqlfmt#readme) is a linter for SQL. - -You can enable the sqlfmt linter with: - -```shell -trunk check enable sqlfmt -``` - -## Auto Enabling - -sqlfmt will never be auto-enabled. It must be enabled manually. - -## Links - -* [sqlfmt site](https://github.com/tconbeer/sqlfmt#readme) -* sqlfmt Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/sqlfmt) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/squawk.mdx b/code-quality/overview/linters/supported/squawk.mdx deleted file mode 100644 index 859c881..0000000 --- a/code-quality/overview/linters/supported/squawk.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "Squawk" -description: "squawk is a linter for SQL" ---- -[**Squawk**](https://github.com/sbdchd/squawk) is a linter for SQL. - -You can enable the Squawk linter with: - -```shell -trunk check enable squawk -``` - -## Auto Enabling - -Squawk will be auto-enabled if a `.squawk.toml` config file is present. - -## Settings - -Squawk supports the following config files: - -* `.squawk.toml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [Squawk site](https://github.com/sbdchd/squawk) -* Squawk Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/squawk) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/standardrb.mdx b/code-quality/overview/linters/supported/standardrb.mdx deleted file mode 100644 index 34758c4..0000000 --- a/code-quality/overview/linters/supported/standardrb.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "standardrb" -description: "standardrb is a linter for Ruby" ---- -[**standardrb**](https://github.com/testdouble/standard#readme) is a linter for Ruby. - -You can enable the standardrb linter with: - -```shell -trunk check enable standardrb -``` - -## Auto Enabling - -standardrb will be auto-enabled if a `.standard.yml` config file is present. - -## Settings - -standardrb supports the following config files: - -* `.standard.yml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [standardrb site](https://github.com/testdouble/standard#readme) -* standardrb Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/standardrb) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/stringslint.mdx b/code-quality/overview/linters/supported/stringslint.mdx deleted file mode 100644 index c2359bd..0000000 --- a/code-quality/overview/linters/supported/stringslint.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: "stringslint" -description: "stringslint is a linter for Swift" ---- -[**stringslint**](https://github.com/dral3x/StringsLint#readme) is a linter for Swift. - -You can enable the stringslint linter with: - -```shell -trunk check enable stringslint -``` - -## Auto Enabling - -stringslint will be auto-enabled if any of its config files are present: _`.stringslint.yml`, `.stringslint.yaml`, `.stringslint`_. - -## Settings - -stringslint supports the following config files: - -* `.stringslint.yml` -* `.stringslint.yaml` -* `.stringslint` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [stringslint site](https://github.com/dral3x/StringsLint#readme) -* stringslint Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/stringslint) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/stylelint.mdx b/code-quality/overview/linters/supported/stylelint.mdx deleted file mode 100644 index 1750b1c..0000000 --- a/code-quality/overview/linters/supported/stylelint.mdx +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: "stylelint" -description: "stylelint is a linter for CSS, SCSS" ---- -[**stylelint**](https://github.com/stylelint/stylelint#readme) is a linter for CSS, SCSS. - -You can enable the stylelint linter with: - -```shell -trunk check enable stylelint -``` - -## Auto Enabling - -stylelint will be auto-enabled if any of its config files are present: _`stylelint.config.js`, `.stylelintrc.js`, `stylelint.config.mjs`_. - -## Settings - -stylelint supports the following config files: - -* `stylelint.config.js` -* `.stylelintrc.js` -* `stylelint.config.mjs` -* `.stylelintrc.mjs` -* `stylelint.config.cjs` -* `.stylelintrc.cjs` -* `.stylelintrc.json` -* `.stylelintrc.yml` -* `.stylelintrc.yaml` -* `.stylelintrc` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [stylelint site](https://github.com/stylelint/stylelint#readme) -* stylelint Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/stylelint) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/stylua.mdx b/code-quality/overview/linters/supported/stylua.mdx deleted file mode 100644 index caeb14c..0000000 --- a/code-quality/overview/linters/supported/stylua.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "stylua" -description: "stylua is a linter for Lua" ---- -[**stylua**](https://github.com/JohnnyMorganz/StyLua/tree/main) is a linter for Lua. - -You can enable the stylua linter with: - -```shell -trunk check enable stylua -``` - -## Auto Enabling - -stylua will be auto-enabled if any of its config files are present: _`stylua.toml`, `.stylua.toml`_. - -## Settings - -stylua supports the following config files: - -* `stylua.toml` -* `.stylua.toml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. Trunk Code Quality provides a default `stylua.toml` if your project does not already have one. - -## Links - -* [stylua site](https://github.com/JohnnyMorganz/StyLua/tree/main) -* stylua Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/stylua) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/svgo.mdx b/code-quality/overview/linters/supported/svgo.mdx deleted file mode 100644 index c443117..0000000 --- a/code-quality/overview/linters/supported/svgo.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: "SVGO" -description: "SVGO, or Scalable Vector Graphics Optimizer, is a tool designed to optimize SVG files, making them smaller and more efficient without compromising on quality." ---- -[**SVGO**](https://github.com/svg/svgo) is a linter for SVG. - -You can enable the SVGO linter with: - -```shell -trunk check enable svgo -``` - -## Auto Enabling - -SVGO will be auto-enabled if any _SVG_ files are present. - -## Settings - -SVGO supports the following config files: - -* `svgo.config.js` -* `svgo.config.mjs` -* `svgo.config.cjs` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. Trunk Code Quality provides a default `svgo.config.js` if your project does not already have one. - -## Links - -* [SVGO site](https://github.com/svg/svgo) -* SVGO Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/svgo) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/swiftformat.mdx b/code-quality/overview/linters/supported/swiftformat.mdx deleted file mode 100644 index 5346141..0000000 --- a/code-quality/overview/linters/supported/swiftformat.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "swiftformat" -description: "swiftformat is a linter for Swift" ---- -[**swiftformat**](https://github.com/nicklockwood/SwiftFormat#readme) is a linter for Swift. - -You can enable the swiftformat linter with: - -```shell -trunk check enable swiftformat -``` - -## Auto Enabling - -swiftformat will be auto-enabled if a `.swiftformat` config file is present. - -## Settings - -swiftformat supports the following config files: - -* `.swiftformat` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [swiftformat site](https://github.com/nicklockwood/SwiftFormat#readme) -* swiftformat Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/swiftformat) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/swiftlint.mdx b/code-quality/overview/linters/supported/swiftlint.mdx deleted file mode 100644 index c9928c2..0000000 --- a/code-quality/overview/linters/supported/swiftlint.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: "swiftlint" -description: "swiftlint is a linter for Swift" ---- -[**swiftlint**](https://github.com/realm/SwiftLint#readme) is a linter for Swift. - -You can enable the swiftlint linter with: - -```shell -trunk check enable swiftlint -``` - -## Auto Enabling - -swiftlint will be auto-enabled if any of its config files are present: _`.swiftlint.yml`, `.swiftlint.yaml`, `.swiftlint`_. - -## Settings - -swiftlint supports the following config files: - -* `.swiftlint.yml` -* `.swiftlint.yaml` -* `.swiftlint` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [swiftlint site](https://github.com/realm/SwiftLint#readme) -* swiftlint Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/swiftlint) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/taplo.mdx b/code-quality/overview/linters/supported/taplo.mdx deleted file mode 100644 index 5f2e296..0000000 --- a/code-quality/overview/linters/supported/taplo.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "taplo" -description: "taplo is a linter for TOML" ---- -[**taplo**](https://github.com/tamasfe/taplo#readme) is a linter for TOML. - -You can enable the taplo linter with: - -```shell -trunk check enable taplo -``` - -## Auto Enabling - -taplo will be auto-enabled if any _TOML_ files are present. - -## Settings - -taplo supports the following config files: - -* `.taplo.toml` -* `taplo.toml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [taplo site](https://github.com/tamasfe/taplo#readme) -* taplo Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/taplo) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/terraform.mdx b/code-quality/overview/linters/supported/terraform.mdx deleted file mode 100644 index f147f71..0000000 --- a/code-quality/overview/linters/supported/terraform.mdx +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: "Terraform" -description: "The command line interface to Terraform is the terraform command, which accepts a variety of subcommands such as terraform validate or terraform fmt" ---- -[**Terraform**](https://developer.hashicorp.com/terraform/cli/commands) is a formatter for Terraform. - -You can enable the Terraform formatter with: - -```shell -trunk check enable terraform -``` - -## Auto Enabling - -Terraform will never be auto-enabled. It must be enabled manually. - -## Usage Notes - -We currently support `terraform validate` and `terraform fmt`, but only `fmt` is enabled by default when you add `terraform` to your enabled list in `trunk.yaml`. To enable `validate`, add this to your `trunk.yaml`: - -```yaml -lint: - enabled: - - terraform@: - commands: [validate, fmt] -``` - -Note: you must run `terraform init` before running `trunk check` with `terraform validate` enabled (both locally, or on CI). - -## Links - -* [Terraform site](https://developer.hashicorp.com/terraform/cli/commands) -* Terraform Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/terraform) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/terragrunt.mdx b/code-quality/overview/linters/supported/terragrunt.mdx deleted file mode 100644 index e43e0ac..0000000 --- a/code-quality/overview/linters/supported/terragrunt.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "terragrunt" -description: "terragrunt is a linter for Terragrunt" ---- -[**terragrunt**](https://terragrunt.gruntwork.io/docs/getting-started/quick-start/) is a linter for Terragrunt. - -You can enable the terragrunt linter with: - -```shell -trunk check enable terragrunt -``` - -## Auto Enabling - -terragrunt will never be auto-enabled. It must be enabled manually. - -## Links - -* [terragrunt site](https://terragrunt.gruntwork.io/docs/getting-started/quick-start/) -* terragrunt Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/terragrunt) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/terrascan.mdx b/code-quality/overview/linters/supported/terrascan.mdx deleted file mode 100644 index e0fde54..0000000 --- a/code-quality/overview/linters/supported/terrascan.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "terrascan" -description: "terrascan is a linter for Terrascan, Security and Terraform" ---- -[**terrascan**](https://github.com/tenable/terrascan#readme) is a linter for Terrascan, Security and Terraform. - -You can enable the terrascan linter with: - -```shell -trunk check enable terrascan -``` - -## Auto Enabling - -terrascan will never be auto-enabled. It must be enabled manually. - -## Links - -* [terrascan site](https://github.com/tenable/terrascan#readme) -* terrascan Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/terrascan) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/tflint.mdx b/code-quality/overview/linters/supported/tflint.mdx deleted file mode 100644 index 380f220..0000000 --- a/code-quality/overview/linters/supported/tflint.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "TFLint" -description: "TFLint is an essential linter designed for Terraform. It helps improve code quality, maintainability, and security in infrastructure as code (IaC) projects." ---- -[**TFLint**](https://github.com/rhysd/actionlint) is a linter for Terraform. - -You can enable the TFLint linter with: - -```shell -trunk check enable tflint -``` - -## Auto Enabling - -TFLint will be auto-enabled if any _Terraform_ files are present. - -## Settings - -TFLint supports the following config files: - -* `.tflint.hcl` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [TFLint site](https://github.com/rhysd/actionlint) -* TFLint Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/tflint) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/tfsec.mdx b/code-quality/overview/linters/supported/tfsec.mdx deleted file mode 100644 index ada8e68..0000000 --- a/code-quality/overview/linters/supported/tfsec.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: "tfsec" -description: "tfsec is a linter for Security and Terraform" ---- -[**tfsec**](https://github.com/aquasecurity/tfsec) is a linter for Security and Terraform. - -You can enable the tfsec linter with: - -```shell -trunk check enable tfsec -``` - -## Auto Enabling - -tfsec will never be auto-enabled. It must be enabled manually. - -## Settings - -tfsec supports the following config files: - -* `tfsec.yml` -* `tfsec.yaml` -* `.tfsec/config.json` -* `.tfsec/config.yml` -* `.tfsec/config.yaml` - -Unlike with most tools under `trunk check`, these files cannot be moved. - -## Links - -* [tfsec site](https://github.com/aquasecurity/tfsec) -* tfsec Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/tfsec) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/tofu.mdx b/code-quality/overview/linters/supported/tofu.mdx deleted file mode 100644 index aa4e39e..0000000 --- a/code-quality/overview/linters/supported/tofu.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "tofu" -description: "tofu is a linter for Terraform" ---- -[**tofu**](https://github.com/opentofu/opentofu) is a linter for Terraform. - -You can enable the tofu linter with: - -```shell -trunk check enable tofu -``` - -## Auto Enabling - -tofu will never be auto-enabled. It must be enabled manually. - -## Links - -* [tofu site](https://github.com/opentofu/opentofu) -* tofu Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/tofu) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/trivy.mdx b/code-quality/overview/linters/supported/trivy.mdx deleted file mode 100644 index 2a6cf93..0000000 --- a/code-quality/overview/linters/supported/trivy.mdx +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "Trivy" -description: "Explore our guide on Trivy, the comprehensive vulnerability scanner. Learn about its features, installation, and configuration." ---- -[**Trivy**](https://github.com/aquasecurity/trivy) is a linter for Security. - -You can enable the Trivy linter with: - -```shell -trunk check enable trivy -``` - - -![trivy example output](/assets/trivy.gif) - - -## Auto Enabling - -Trivy will be auto-enabled if any of its config files are present: _`trivy.yaml`, `.trivyignore`, `.trivyignore.yaml`_. - -## Settings - -Trivy supports the following config files: - -* `trivy.yaml` -* `.trivyignore` -* `.trivyignore.yaml` -* `trivy-secret.yaml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Usage Notes - -Trivy has the following subcommands: - -* `config` -* Runs `trivy config` ([docs) ](https://aquasecurity.github.io/trivy/latest/docs/scanner/misconfiguration/))to scan for misconfigurations in infrastructure-as-code files. Enabled by default -* `fx-vuln` -* Runs `trivy fs --scanners vuln` ([docs](https://aquasecurity.github.io/trivy/latest/docs/target/filesystem/)) to scan for security vulnerabilities. Disabled by default. -* `fs-secret` -* Runs `trivy fs --scanners secret` ([docs](https://aquasecurity.github.io/trivy/latest/docs/target/filesystem/)) to scan for secrets. Disabled by default. - -To enable/disable these, add the subcommands you want enabled in your `.trunk/trunk.yaml` as such: - -```yaml -lint: - enabled: - - trivy@0.45.1: - commands: [config, fs-vuln] -``` - -## Links - -* [Trivy site](https://github.com/aquasecurity/trivy) -* Trivy Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/trivy) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/trufflehog.mdx b/code-quality/overview/linters/supported/trufflehog.mdx deleted file mode 100644 index 03386fa..0000000 --- a/code-quality/overview/linters/supported/trufflehog.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: "Trufflehog" -description: "Discover Trufflehog with our detailed guide. Learn installation, configuration, usage, and how to integrate it with other linters for optimal code security." ---- -[**Trufflehog**](https://github.com/trufflesecurity/trufflehog) is a linter for Security. - -trufflehog is composed of several linter commands. - -`trufflehog` runs trufflehog normally. - -You can enable the `trufflehog` linter with: - -```shell -trunk check enable trufflehog -``` - -`trufflehog-git` also runs trufflehog on the git history. - -You can enable the `trufflehog-git` linter with: - -```shell -trunk check enable trufflehog-git -``` - -## Auto Enabling - -Trufflehog will be auto-enabled if any _all_ files are present. - -## Links - -* [Trufflehog site](https://github.com/trufflesecurity/trufflehog) -* Trufflehog Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/trufflehog) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/txtpbfmt.mdx b/code-quality/overview/linters/supported/txtpbfmt.mdx deleted file mode 100644 index 712859d..0000000 --- a/code-quality/overview/linters/supported/txtpbfmt.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "txtpbfmt" -description: "txtpbfmt is a linter for Textproto" ---- -[**txtpbfmt**](https://github.com/protocolbuffers/txtpbfmt/) is a linter for Textproto. - -You can enable the txtpbfmt linter with: - -```shell -trunk check enable txtpbfmt -``` - -## Auto Enabling - -txtpbfmt will be auto-enabled if any _Textproto_ files are present. - -## Links - -* [txtpbfmt site](https://github.com/protocolbuffers/txtpbfmt/) -* txtpbfmt Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/txtpbfmt) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/vale.mdx b/code-quality/overview/linters/supported/vale.mdx deleted file mode 100644 index 569716d..0000000 --- a/code-quality/overview/linters/supported/vale.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "vale" -description: "vale is a linter for prose" ---- -[**vale**](https://vale.sh/) is a linter for prose. - -You can enable the vale linter with: - -```shell -trunk check enable vale -``` - -## Auto Enabling - -vale will be auto-enabled if a `.vale.ini` config file is present. - -## Settings - -vale supports the following config files: - -* `.vale.ini` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. Trunk Code Quality provides a default `.vale.ini` if your project does not already have one. - -## Links - -* [vale site](https://vale.sh/) -* vale Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/vale) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/yamllint.mdx b/code-quality/overview/linters/supported/yamllint.mdx deleted file mode 100644 index 2a670c3..0000000 --- a/code-quality/overview/linters/supported/yamllint.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: "Yamllint" -description: "Yamllint is a linter that checks for formatting discrepancies, key-value pair issues, and syntax errors, ensuring your YAML files are syntactically correct." ---- -[**Yamllint**](https://github.com/adrienverge/yamllint) is a linter for YAML. - -You can enable the Yamllint linter with: - -```shell -trunk check enable yamllint -``` - -## Auto Enabling - -Yamllint will be auto-enabled if any _Yaml_ files are present. - -## Settings - -Yamllint supports the following config files: - -* `.yamllint` -* `.yamllint.yaml` -* `.yamllint.yml` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. Trunk Code Quality provides a default `.yamllint.yaml` if your project does not already have one. - -## Links - -* [Yamllint site](https://github.com/adrienverge/yamllint) -* Yamllint Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/yamllint) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/supported/yapf.mdx b/code-quality/overview/linters/supported/yapf.mdx deleted file mode 100644 index 2a7d9b6..0000000 --- a/code-quality/overview/linters/supported/yapf.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "yapf" -description: "yapf is a linter for Python" ---- -[**yapf**](https://github.com/google/yapf#readme) is a linter for Python. - -You can enable the yapf linter with: - -```shell -trunk check enable yapf -``` - -## Auto Enabling - -yapf will be auto-enabled if any of its config files are present: _`.style.yapf`, `.yapfignore`_. - -## Settings - -yapf supports the following config files: - -* `.style.yapf` -* `.yapfignore` - -You can move these files to `.trunk/configs` and `trunk check` will still find them. See [Moving Linters](../configure-linters#moving-linters) for more info. - -## Links - -* [yapf site](https://github.com/google/yapf#readme) -* yapf Trunk Code Quality [integration source](https://github.com/trunk-io/plugins/tree/main/linters/yapf) -* Trunk Code Quality's [open source plugins repo](https://github.com/trunk-io/plugins/tree/main) diff --git a/code-quality/overview/linters/upgrades.mdx b/code-quality/overview/linters/upgrades.mdx deleted file mode 100644 index 7027f0e..0000000 --- a/code-quality/overview/linters/upgrades.mdx +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: "Upgrades" ---- -Run `trunk upgrade` to update the Trunk CLI and all your plugins, linters, tools, and runtimes. - -#### Upgrade scopes - -Upgrades can be filtered to different scopes by adding them to `trunk upgrade `. The scopes available are: - -| Scope | Description | -|---|---| -| cli | Only upgrade the Trunk CLI to the latest version. | -| plugins | Upgrade any that you have sourced to their latest public release. The latest version must be compatible with your current `cli` version in order for the upgrade to be applied. | -| check | Upgrade any linters that you have enabled. Linters will be upgraded to the latest validated version that have passed tests in our [plugins](https://github.com/trunk-io/plugins) repo. Additional recommended linters can also be enabled by running with `-y`. | -| tools | Upgrade any that you have enabled. Tools will be upgraded to their latest public release. Note that any enabled linters that share a name with an enabled tool must keep their versions synced. | -| runtimes | Upgrade any that you have enabled. Runtimes will be upgraded to their recommended version for running linters, as specified by Trunk. | - -#### Automatic upgrades - -When running locally, Trunk automatically checks for upgrades in the background on a regular cadence. You'll see notifications for these upgrades appear in the VSCode Extension or at the end of a `trunk check` run. To stop seeing these notifications, you can run `trunk actions disable trunk-upgrade-available`. - -When running in single-player mode, Trunk will automatically upgrade itself in the background and stay up to date. - -#### Automatic upgrades with GitHub Actions - -You can configure a GitHub workflow to create PRs with the latest Trunk and tool versions automatically. Here's a sample GitHub Action: - -```yaml -name: Nightly -on: - schedule: - - cron: 0 8 * * 1-5 - workflow_dispatch: {} -permissions: read-all -jobs: - trunk_upgrade: - name: Upgrade Trunk - runs-on: ubuntu-latest - permissions: - contents: write # For trunk to create PRs - pull-requests: write # For trunk to create PRs - steps: - - name: Checkout - uses: actions/checkout@v3 - # >>> Install your own deps here (npm install, etc) <<< - - name: Trunk Upgrade - uses: trunk-io/trunk-action/upgrade@v1 -``` - -Then, provide permissions for this GitHub Action to [create and approve pull requests](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/enabling-features-for-your-repository/managing-github-actions-settings-for-a-repository#preventing-github-actions-from-creating-or-approving-pull-requests) by navigating to your repo's **Settings** > **Actions** > **General** > **Workflow permissions** > **Allow GitHub Actions to create and approve pull requests**.\ -\ -You can also set the `arguments` field to filter particular scopes to upgrade and set `base` to define the branch to create a PR against (default `main`). - - -**Triggering further workflow runs** - -PRs created with this GitHub Action will not trigger further workflows by default. If you need the PRs created to trigger further GitHub Action Workflows, [follow the workarounds described here](https://github.com/peter-evans/create-pull-request/blob/main/docs/concepts-guidelines.md#triggering-further-workflow-runs). - - -#### Pinning versions - -If you don't want a linter, tool, or runtime to be upgraded, you can pin its version by appending `!` to the version in your `.trunk/trunk.yaml`. For example: - -```yaml -lint: - enabled: - - pylint@2.17.5! -``` - -#### Plugin repositories and user.yaml - -By default, upgrades are only applied to your repo's `.trunk/trunk.yaml`. If you're using a plugin repo that enables linters/tools, or if you would like upgrades to be applied to your `.trunk/user.yaml` file, you can run `trunk upgrade --apply-to ` to see upgrades applied there. diff --git a/code-quality/overview/prevent-new-issues/index.mdx b/code-quality/overview/prevent-new-issues/index.mdx deleted file mode 100644 index 61a96ac..0000000 --- a/code-quality/overview/prevent-new-issues/index.mdx +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: "Linting in CI" ---- -Trunk Code Quality can be run in CI to prevent new issues form being introduced by PRs and on a nightly/scheduled cadence to report on existing issues. - -### Configuring base branch - -Trunk operates in **hold-the-line** mode by default. This means Trunk will run linters only on the **files that have changed** according to Git, by comparing it to the appropriate upstream branch. - -If you're not using `main` or `master` as the base branch, make sure it's specified in `.trunk/trunk.yaml`. - -```yaml -version: 0.1 -cli: - version: 1.22.2 -repo: - # specify the base branch for hold-the-line - trunk_branch: develop -``` - -### Linting on pull requests - -```yaml -name: Trunk Code Quality -on: - push: - branches: main - pull_request: - branches: main -jobs: - test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - # ... other setup steps - - name: Trunk Code Quality - uses: trunk-io/trunk-action@v1 - with: - post-annotations: true - # ... other CI steps -``` - -This step will automatically run Trunk Code Quality to reveal problems found when comparing the branch to `main` or another base branch you configured. - -If you want to run the `trunk check` command directly in your workflow, or you're not using GitHub, you can run the following commands: - -```sh -curl -fsSLO --retry 3 https://trunk.io/releases/trunk \ -chmod +x trunk \ -./trunk check --ci -``` - -Trunk Code Quality can be run in CI to prevent new issues form being introduced by PRs and on a nightly/scheduled cadence to report on existing issues. - -### Configuring base branch - -Trunk operates in **hold-the-line** mode by default. This means Trunk will run linters only on the **files that have changed** according to Git, by comparing it to the appropriate upstream branch. - -If you're not using `main` or `master` as the base branch, make sure it's specified in `.trunk/trunk.yaml`. - -```yaml -version: 0. -cli: - version: 1.22.2 -repo: - # specify the base branch for hold-the-line - trunk_branch: develop -``` - -#### Manual configuration and Non-GitHub CI - -If you want to run the `trunk check` command directly in your workflow, or you're not using GitHub, you can run the following commands: - -``` -curl -fsSLO --retry 3 https://trunk.io/releases/trunk \ -chmod +x trunk \ -./trunk check --ci -``` - -#### Skipping Trunk Code Quality on pull requests - -You can include `/trunk skip-check` in the body of a PR description (i.e. the first comment on a given PR) to mark Trunk Code Quality as "skipped". Trunk Code Quality will still run on your PR and report issues, but this will allow the PR to pass a GitHub-required status check on `Trunk Check`. - -This can be helpful if Code Quality is flagging known issues in a given PR that you don't want to ignore, which can come in handy if you're doing a large refactor. - -### Caching and persistence - -* Trunk caches the version of `trunk` itself, linters, formatters, and lint results in `~/.cache/trunk` -* If your build machines are persistent, make sure this directory is not wiped out between CI jobs for best performance. If Trunk has to re-download every linter for every job because this directory is wiped out, it will be very slow. -* If your build machines are ephemeral, there are a few options for caching: - * CI systems have support for caching between CI jobs on ephemeral runners: - * [GitHub Actions](https://github.com/actions/cache) - * [CircleCI](https://circleci.com/docs/caching/) - * [Travis CI](https://docs.travis-ci.com/user/caching/) - * You can include a seeded trunk cache in a regularly updated image used for CI by running `trunk check download`, which will download all requirements to `~/.cache/trunk` - -### Hourly and nightly builds - -If you'd like to set Code Quality to run on an hourly/nightly CI, you can run - -```yaml -name: Trunk Code Quality -on: - schedule: - # Run at 4 PM UTC daily (cron uses UTC time) - - cron: '0 16 * * *' - -jobs: - test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - # ... other setup steps - - name: Trunk Code Quality - uses: trunk-io/trunk-action@v1 - with: - check-mode: all - # ... other CI steps -``` - -You can do the same without Trunk's GitHub Action using the following command: - -```bash -curl -fsSLO --retry 3 https://trunk.io/releases/trunk \ -chmod +x trunk \ -./trunk check --all --ci-progress --monitor=false -``` - -`--ci-progress` will print out the tool's progress every 30 seconds, whereas `--no-progress` will suppress any progress reporting. - -You can also explicitly set the upstream branch if needed via `--upstream`, but we do detect your main branch by default. diff --git a/code-quality/overview/prevent-new-issues/migration-guide.mdx b/code-quality/overview/prevent-new-issues/migration-guide.mdx deleted file mode 100644 index 48909d8..0000000 --- a/code-quality/overview/prevent-new-issues/migration-guide.mdx +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: "Migration Guide" -hidden: true ---- -### What's deprecated - -The Code Quality dashboard in the Trunk web app has been deprecated, along with the ability to configure and manage CI workflows in the web app. - -### Breaking Changes - -⚠️ **The `--upload` flag has been removed from the Trunk CLI** - -If you're using `--upload` in your scripts or CI configurations, you must remove it. This flag was previously used to send data to our backend for the deprecated Code Quality dashboard but is no longer supported. - -**Action required:** Remove `--upload` from any `trunk check` commands in your CI pipelines or scripts. - -### Migrating nightly and PR jobs - -Nightly and PR jobs configured through the Trunk web app will no longer be supported. However, you can still run these checks by migrating these workflows to run as a step in your existing CI pipelines. - -#### Run on PRs on GitHub Actions - -Trunk provides a [GitHub action](https://github.com/trunk-io/trunk-action) to help you lint your code in CI. You add it as a step to your workflows. To run on pull requests or on a schedule, you can configure the appropriate triggers for your workflow. - -```yaml -name: Trunk Code Quality -on: - push: - branches: - - main - pull_request: - branches: - - main - -jobs: - test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - # ... other setup steps - - name: Trunk Code Quality - uses: trunk-io/trunk-action@v1 - # ... other CI steps -``` - -You will still receive inline comments about your errors if you run the action with the `post-annotations` argument. - -#### Run on PRs using other CI providers - -You can also set up checks on PR without using the provided GitHub action. Download the CLI in line and run the Code Quality CLI in CI mode to check a PR. Note that you will not receive inline arguments with this approach. - -```sh -curl -fsSLO --retry 3 https://trunk.io/releases/trunk -chmod +x trunk -./trunk check --ci -``` - -Here’s an example of the commands in a GitHub Actions workflow, but you can do the same in virtually any CI pipeline. - -```yaml -name: Trunk Code Quality -on: - push: - branches: - - main - pull_request: - branches: - - main - -jobs: - test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - # ... other setup steps - - run: | - curl -fsSLO --retry 3 https://trunk.io/releases/trunk - chmod +x trunk - ./trunk check --ci --ci-progress - # ... other CI steps -``` - -#### Run nightly on GitHub Actions - -To run Trunk’s [GitHub action](https://github.com/trunk-io/trunk-action) to lint your entire code base nightly or on a schedule, you can specify the \`check-mode: all\` argument when running the action. - -```yaml -name: Trunk Code Quality -on: - schedule: - # Run at 4 PM UTC daily (cron uses UTC time) - - cron: '0 16 * * *' - -jobs: - test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - # ... other setup steps - - name: Trunk Code Quality - uses: trunk-io/trunk-action@v1 - with: - check-mode: all - # ... other CI steps -``` - -#### Run nightly using other CI providers - -Specify the `--all` flag on your `trunk check` command to run on your entire codebase. Trunk is Git aware and checks only files changed in a PR by default. Specifying `--all` will instead check the whole code base. - -```sh -curl -fsSLO --retry 3 https://trunk.io/releases/trunk -chmod +x trunk -./trunk check --all --ci --ci-progress -``` - -Here’s an example of the command in a GitHub Actions workflow. This command will also work in any other CI provider. - -```yaml -name: Trunk Code Quality -on: - schedule: - # Run at 4 PM UTC daily (cron uses UTC time) - - cron: '0 16 * * *' - -jobs: - test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - # ... other setup steps - - run: | - curl -fsSLO --retry 3 https://trunk.io/releases/trunk - chmod +x trunk - ./trunk check --all --ci --ci-progress - # ... other CI steps -``` - -### Caching - -You can cache Trunk’s binary and install tools to speed up your CI runs. Trunk caches the version of `trunk` itself, linters, formatters, and lint results in the `~/.cache/trunk` folder. Consult the documentation for your CI provider to learn about caching this folder. diff --git a/code-quality/overview/setup-and-installation/github-integration.mdx b/code-quality/overview/setup-and-installation/github-integration.mdx deleted file mode 100644 index 020f199..0000000 --- a/code-quality/overview/setup-and-installation/github-integration.mdx +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: "Nightly report (Deprecated)" -hidden: true ---- - -**Deprecation Warning**\ -The Trunk Code Quality web app, Code Quality on PRs, and Code Quality Nightly will soon be deprecated. Follow our [migration guide](../prevent-new-issues/migration-guide) to migrate off these features. - - -Trunk Code Quality can post its results to the [Trunk Code Quality web app](https://app.trunk.io/login?intent=code%20quality). This allows you to view your repository's Code Quality history over time, track quality trends, and browse issues to help prioritize fixes. - -### Connect your Trunk organization to GitHub - -Sign up at [app.trunk.io](https://app.trunk.io/signup?intent=code%20quality), create a Trunk organization, and connect it to your repositories. You will need to grant the following GitHub App permissions. - - - -### Set Up Trunk Code Quality - -Once your Trunk organization is connected to GitHub, create a .trunk repo in your account or organization and grant Trunk permissions to access the repo. The .trunk repo will hold the workflows to scan your codebase and pull requests. [Learn more about the .trunk repo](./github-integration#uploading-results). - - - -### Configure Slack Notifications (optional) - -If you would like to receive notifications for new issues Trunk finds in your repo, you can configure Trunk to be connected to Slack. - - - -### **How Trunk Uploads Results** - -The upload feature of Trunk Code Quality will upload all of the issues found by Trunk to the Trunk services. To get an accurate picture of the state of your repository, you'll want to upload all of the Trunk Code Quality issues for your whole repository. - -Generally, this is done within your Continuous Integration system (CI) automatically whenever **pull requests are filed or pushed to a specific branch** in your repo. Trunk Code Quality can also **run periodically** to check for new vulnerabilities in your dependencies. - -#### **How Does It Work?** - -Under the hood, the GitHub integration does the following for your organization to enable Trunk Code Quality in GitHub Actions Workflows: - -* An installation of the Trunk.io GitHub app in your GitHub organization -* A `.trunk` repository in your GitHub organization. - -#### **What is a `.trunk` repository?** - -The `.trunk` repository contains the workflows run to scan your codebase and pull requests. We recommend creating a `.trunk` repository in your GitHub organization using [this template repository](https://github.com/trunk-io/.trunk-template). - -Your `.trunk` repository must be added to your Trunk GitHub app installation. You can verify this by navigating to: `https://github.com/organizations//settings/installations`, clicking "configure" next to Trunk-io, and verifying that the repository access is either "All repositories" or that your `.trunk` repository is selected. - -To find Code Quality issues in your repositories and pull requests, we dispatch GitHub Actions workflows in your `.trunk` repository, which checks out your repositories and pull requests and then run `trunk check` in them. This strategy allows you to: - -* start using Trunk Code Quality in all your repositories without any configuration, and -* be in full control over the environment where we analyze your code, since we're running on your GitHub Actions runners. - - -🚧 `.trunk` should have private visibility - -Since we use workflow runs in `.trunk` to analyze any repository in your organization and record Code Quality findings, you should think carefully about who has permission to view workflow runs in your `.trunk` repository. For most organizations, simply making your `.trunk` repository private will be sufficient. - - -#### (optional) Custom setup logic - -If you need to do some setup before `trunk check` runs in `your-org/your-repo`, you can [define a GitHub composite action](https://docs.github.com/en/actions/creating-actions/creating-a-composite-action) in `.trunk/setup-ci/action.yaml` in `your-repo`. This can be important if, for example, a linter needs some generated code to be present before it can run: - -```yaml -name: Trunk Code Quality setup -description: Set up dependencies for Trunk Code Quality - -runs: - using: composite - steps: - - name: Build required trunk check inputs - shell: bash - run: bazel build ... --build_tag_filters=pre-lint - - - name: Install eslint dependencies - shell: bash - run: npm install -``` - -Read more in the documentation for [our GitHub Action](https://github.com/trunk-io/trunk-action#custom-setup). diff --git a/code-quality/overview/setup-and-installation/index.mdx b/code-quality/overview/setup-and-installation/index.mdx deleted file mode 100644 index 791509f..0000000 --- a/code-quality/overview/setup-and-installation/index.mdx +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: "Setup and installation" -hidden: true ---- -Trunk Code Quality is easy to adopt for new and legacy projects alike. You can run Trunk Code Quality using your existing linter configurations, incrementally address existing problems, and prevent new issues from being committed to your repo. - -You can start using Code Quality in your repos in 4 steps. - - - - Initialize Trunk in your repo to generate Trunk config files and get linter recommendations based on your project's files. - - - Check for existing issues in your project. You can address problems up front, use hold-the-line to fix them incrementally, and configure ignores for irrelevant issues. - - - Set up automated runs on commits, before pushes, and on PRs to prevent new issues from appearing in your repo. - - diff --git a/docs.json b/docs.json index 2390efe..50d94fc 100644 --- a/docs.json +++ b/docs.json @@ -12,9 +12,6 @@ "light": "/logo/light.svg", "dark": "/logo/dark.svg" }, - "background": { - "decoration": "gradient" - }, "fonts": { "heading": { "family": "Neue", @@ -43,119 +40,88 @@ "navigation": { "tabs": [ { - "tab": "Overview", - "pages": [ - "index" + "tab": "Home", + "groups": [ + { + "group": "Platform", + "pages": ["introduction"] + } ] }, { "tab": "Merge Queue", "groups": [ { - "group": "Merge Queue", + "group": "Overview", + "pages": ["merge-queue/merge-queue"] + }, + { + "group": "Getting Started", + "pages": [ + "merge-queue/getting-started", + "merge-queue/getting-started/install-and-create-your-queue", + "merge-queue/getting-started/configure-branch-protection", + "merge-queue/getting-started/configure-ci-status-checks", + "merge-queue/getting-started/test-your-setup", + "merge-queue/migrating-from-github-merge-queue" + ] + }, + { + "group": "Optimizations", + "pages": [ + "merge-queue/optimizations", + "merge-queue/optimizations/predictive-testing", + "merge-queue/optimizations/anti-flake-protection", + "merge-queue/optimizations/batching", + "merge-queue/optimizations/priority-merging", + "merge-queue/optimizations/optimistic-merging", + "merge-queue/optimizations/pending-failure-depth", + "merge-queue/optimizations/direct-merge-to-main" + ] + }, + { + "group": "Parallel Queues", + "pages": [ + "merge-queue/optimizations/parallel-queues", + "merge-queue/optimizations/parallel-queues/bazel", + "merge-queue/optimizations/parallel-queues/nx", + "merge-queue/optimizations/parallel-queues/api" + ] + }, + { + "group": "Using the Queue", + "pages": [ + "merge-queue/using-the-queue", + "merge-queue/using-the-queue/reference", + "merge-queue/using-the-queue/monitor-queue-status", + "merge-queue/using-the-queue/handle-failed-pull-requests", + "merge-queue/using-the-queue/stacked-pull-requests", + "merge-queue/using-the-queue/emergency-pull-requests" + ] + }, + { + "group": "Integrations", "pages": [ - "merge-queue/merge-queue", - { - "group": "Getting Started", - "root": "merge-queue/getting-started/index", - "pages": [ - "merge-queue/getting-started/install-and-create-your-queue", - "merge-queue/getting-started/configure-branch-protection", - "merge-queue/getting-started/configure-ci-status-checks", - "merge-queue/getting-started/test-your-setup" - ] - }, - "merge-queue/migrating-from-github-merge-queue", - { - "group": "Optimizations", - "root": "merge-queue/optimizations/index", - "pages": [ - "merge-queue/optimizations/predictive-testing", - "merge-queue/optimizations/anti-flake-protection", - { - "group": "Parallel queues", - "root": "merge-queue/optimizations/parallel-queues/index", - "pages": [ - "merge-queue/optimizations/parallel-queues/bazel", - "merge-queue/optimizations/parallel-queues/nx", - "merge-queue/optimizations/parallel-queues/api" - ] - }, - "merge-queue/optimizations/batching", - "merge-queue/optimizations/priority-merging", - "merge-queue/optimizations/optimistic-merging", - "merge-queue/optimizations/pending-failure-depth", - "merge-queue/optimizations/direct-merge-to-main" - ] - }, - { - "group": "Using the Queue", - "root": "merge-queue/using-the-queue/index", - "pages": [ - "merge-queue/using-the-queue/reference", - "merge-queue/using-the-queue/monitor-queue-status", - "merge-queue/using-the-queue/handle-failed-pull-requests", - "merge-queue/using-the-queue/emergency-pull-requests", - "merge-queue/using-the-queue/force-merge" - ] - }, "merge-queue/integration-for-slack", - "merge-queue/chrome-extension", - "merge-queue/webhooks", - { - "group": "Administration", - "root": "merge-queue/administration/index", - "pages": [ - "merge-queue/administration/advanced-settings", - "merge-queue/administration/terraform", - "merge-queue/administration/metrics" - ] - }, - { - "group": "Reference", - "root": "merge-queue/reference/index", - "pages": [ - "merge-queue/reference/merge-queue-cli-reference", - { - "group": "API reference", - "root": "/merge-queue/reference/merge", - "openapi": { - "directory": "/merge-queue/reference/merge", - "source": "/openapi.json" - }, - "pages": [ - { - "group": "Pull Request Endpoints", - "pages": [ - "POST /cancelPullRequest", - "POST /getSubmittedPullRequest", - "POST /restartTestsOnPullRequest", - "POST /setImpactedTargets", - "POST /submitPullRequest", - "POST /getMergeQueueTestingDetails" - ] - }, - { - "group": "Metrics Endpoints", - "pages": [ - "GET /getMergeQueueMetrics" - ] - }, - { - "group": "Queue Endpoints", - "pages": [ - "POST /createQueue", - "POST /deleteQueue", - "POST /getQueue", - "POST /updateQueue" - ] - } - ] - }, - "merge-queue/reference/common-problems", - "merge-queue/reference/troubleshooting" - ] - } + "merge-queue/webhooks" + ] + }, + { + "group": "Administration", + "pages": [ + "merge-queue/administration", + "merge-queue/administration/advanced-settings", + "merge-queue/administration/metrics" + ] + }, + { + "group": "Reference", + "pages": [ + "merge-queue/reference", + "merge-queue/reference/merge-queue-cli-reference", + "merge-queue/reference/merge", + "merge-queue/reference/common-problems", + "merge-queue/reference/troubleshooting" ] } ] @@ -164,183 +130,119 @@ "tab": "Flaky Tests", "groups": [ { - "group": "Flaky Tests", + "group": "Overview", + "pages": [ + "flaky-tests/overview", + "flaky-tests/get-started", + "flaky-tests/managing-detected-flaky-tests", + "flaky-tests/dashboard" + ] + }, + { + "group": "Test Frameworks", + "pages": [ + "flaky-tests/get-started/frameworks", + "flaky-tests/get-started/frameworks/android", + "flaky-tests/get-started/frameworks/bazel", + "flaky-tests/get-started/frameworks/behave", + "flaky-tests/get-started/frameworks/cypress", + "flaky-tests/get-started/frameworks/dart-test", + "flaky-tests/get-started/frameworks/googletest", + "flaky-tests/get-started/frameworks/gotestsum", + "flaky-tests/get-started/frameworks/gradle", + "flaky-tests/get-started/frameworks/jasmine", + "flaky-tests/get-started/frameworks/jest", + "flaky-tests/get-started/frameworks/karma", + "flaky-tests/get-started/frameworks/kotest", + "flaky-tests/get-started/frameworks/maven", + "flaky-tests/get-started/frameworks/minitest", + "flaky-tests/get-started/frameworks/mocha", + "flaky-tests/get-started/frameworks/nightwatch", + "flaky-tests/get-started/frameworks/nunit", + "flaky-tests/get-started/frameworks/other-test-frameworks", + "flaky-tests/get-started/frameworks/pest", + "flaky-tests/get-started/frameworks/phpunit", + "flaky-tests/get-started/frameworks/playwright", + "flaky-tests/get-started/frameworks/pytest", + "flaky-tests/get-started/frameworks/robot-framework", + "flaky-tests/get-started/frameworks/rspec", + "flaky-tests/get-started/frameworks/rspec/manual-uploads", + "flaky-tests/get-started/frameworks/rust", + "flaky-tests/get-started/frameworks/swift-testing", + "flaky-tests/get-started/frameworks/testplan", + "flaky-tests/get-started/frameworks/vitest", + "flaky-tests/get-started/frameworks/xctest" + ] + }, + { + "group": "CI Providers", + "pages": [ + "flaky-tests/get-started/ci-providers", + "flaky-tests/get-started/ci-providers/atlassian-bamboo", + "flaky-tests/get-started/ci-providers/azure-devops-pipelines", + "flaky-tests/get-started/ci-providers/bitbucket-pipelines", + "flaky-tests/get-started/ci-providers/buildkite", + "flaky-tests/get-started/ci-providers/circleci", + "flaky-tests/get-started/ci-providers/droneci", + "flaky-tests/get-started/ci-providers/github-actions", + "flaky-tests/get-started/ci-providers/gitlab", + "flaky-tests/get-started/ci-providers/google-cloud-build", + "flaky-tests/get-started/ci-providers/jenkins", + "flaky-tests/get-started/ci-providers/otherci", + "flaky-tests/get-started/ci-providers/semaphoreci", + "flaky-tests/get-started/ci-providers/travisci", + "flaky-tests/get-started/multiple-repositories" + ] + }, + { + "group": "Detection & Operations", + "pages": [ + "flaky-tests/detection", + "flaky-tests/detection/pass-on-retry-monitor", + "flaky-tests/detection/threshold-monitor", + "flaky-tests/detection/flag-as-flaky", + "flaky-tests/infrastructure-failure-protection", + "flaky-tests/the-importance-of-pr-test-results", + "flaky-tests/quarantining", + "flaky-tests/quarantine-service-availability", + "flaky-tests/github-pull-request-comments" + ] + }, + { + "group": "Ticketing Integrations", "pages": [ - { - "group": "Overview", - "root": "flaky-tests/overview", - "pages": [ - "flaky-tests/dashboard" - ] - }, - { - "group": "Getting Started", - "root": "flaky-tests/get-started/index", - "pages": [ - { - "group": "Test frameworks", - "root": "flaky-tests/get-started/frameworks/index", - "pages": [ - "flaky-tests/get-started/frameworks/android", - "flaky-tests/get-started/frameworks/bazel", - "flaky-tests/get-started/frameworks/behave", - "flaky-tests/get-started/frameworks/rust", - "flaky-tests/get-started/frameworks/cypress", - "flaky-tests/get-started/frameworks/dart-test", - "flaky-tests/get-started/frameworks/gotestsum", - "flaky-tests/get-started/frameworks/googletest", - "flaky-tests/get-started/frameworks/gradle", - "flaky-tests/get-started/frameworks/jasmine", - "flaky-tests/get-started/frameworks/jest", - "flaky-tests/get-started/frameworks/karma", - "flaky-tests/get-started/frameworks/kotest", - "flaky-tests/get-started/frameworks/maven", - "flaky-tests/get-started/frameworks/minitest", - "flaky-tests/get-started/frameworks/mocha", - "flaky-tests/get-started/frameworks/nightwatch", - "flaky-tests/get-started/frameworks/nunit", - "flaky-tests/get-started/frameworks/pest", - "flaky-tests/get-started/frameworks/phpunit", - "flaky-tests/get-started/frameworks/playwright", - "flaky-tests/get-started/frameworks/pytest", - "flaky-tests/get-started/frameworks/robot-framework", - { - "group": "RSpec", - "root": "flaky-tests/get-started/frameworks/rspec/index", - "pages": [ - "flaky-tests/get-started/frameworks/rspec/manual-uploads" - ] - }, - "flaky-tests/get-started/frameworks/swift-testing", - "flaky-tests/get-started/frameworks/testplan", - "flaky-tests/get-started/frameworks/vitest", - "flaky-tests/get-started/frameworks/xctest", - "flaky-tests/get-started/frameworks/other-test-frameworks" - ] - }, - { - "group": "CI Providers", - "root": "flaky-tests/get-started/ci-providers/index", - "pages": [ - "flaky-tests/get-started/ci-providers/atlassian-bamboo", - "flaky-tests/get-started/ci-providers/azure-devops-pipelines", - "flaky-tests/get-started/ci-providers/bitbucket-pipelines", - "flaky-tests/get-started/ci-providers/buildkite", - "flaky-tests/get-started/ci-providers/circleci", - "flaky-tests/get-started/ci-providers/droneci", - "flaky-tests/get-started/ci-providers/github-actions", - "flaky-tests/get-started/ci-providers/gitlab", - "flaky-tests/get-started/ci-providers/google-cloud-build", - "flaky-tests/get-started/ci-providers/jenkins", - "flaky-tests/get-started/ci-providers/semaphoreci", - "flaky-tests/get-started/ci-providers/travisci", - "flaky-tests/get-started/ci-providers/otherci" - ] - }, - "flaky-tests/get-started/multiple-repositories" - ] - }, - { - "group": "Flaky test detection", - "root": "flaky-tests/detection/index", - "pages": [ - "flaky-tests/detection/pass-on-retry-monitor", - "flaky-tests/detection/failure-rate-monitor", - "flaky-tests/detection/failure-count-monitor", - "flaky-tests/detection/flag-as-flaky", - "flaky-tests/detection/the-importance-of-pr-test-results", - "flaky-tests/detection/infrastructure-failure-protection" - ] - }, - { - "group": "Flaky test management", - "root": "flaky-tests/management/index", - "pages": [ - "flaky-tests/management/managing-detected-flaky-tests", - "flaky-tests/management/test-labels", - "flaky-tests/management/github-pull-request-comments", - { - "group": "Ticketing", - "root": "flaky-tests/management/ticketing/index", - "pages": [ - "flaky-tests/management/ticketing/jira-integration", - "flaky-tests/management/ticketing/linear-integration", - "flaky-tests/management/ticketing/other-ticketing-platforms" - ] - } - ] - }, - { - "group": "Quarantining", - "root": "flaky-tests/quarantining/index", - "pages": [ - "flaky-tests/quarantining/quarantine-service-availability" - ] - }, - { - "group": "Webhooks", - "root": "flaky-tests/webhooks/index", - "pages": [ - "flaky-tests/webhooks/slack-integration", - "flaky-tests/webhooks/microsoft-teams-integration", - "flaky-tests/webhooks/github-issues-integration", - "flaky-tests/webhooks/linear-integration", - "flaky-tests/webhooks/jira-integration" - ] - }, - { - "group": "Agents", - "root": "flaky-tests/agents/index", - "pages": [ - "flaky-tests/agents/autofix-flaky-tests", - "flaky-tests/agents/autofix-ci-failures", - "links/mcp-reference" - ] - }, - { - "group": "Reference", - "root": "flaky-tests/reference/index", - "pages": [ - "flaky-tests/reference/cli-reference", - { - "group": "API reference", - "root": "flaky-tests/reference/api-reference", - "openapi": { - "directory": "/flaky-tests/reference/api-reference", - "source": "/openapi.json" - }, - "pages": [ - "POST /flaky-tests/get-test-details", - "POST /flaky-tests/link-ticket-to-test-case", - "POST /flaky-tests/list-failing-tests", - "POST /flaky-tests/list-unhealthy-tests", - "POST /flaky-tests/list-quarantined-tests" - ] - }, - { - "group": "MCP reference", - "root": "flaky-tests/reference/mcp-reference/index", - "pages": [ - { - "group": "Configuration", - "root": "flaky-tests/reference/mcp-reference/configuration/index", - "pages": [ - "flaky-tests/reference/mcp-reference/configuration/cursor-ide", - "flaky-tests/reference/mcp-reference/configuration/github-copilot-ide", - "flaky-tests/reference/mcp-reference/configuration/claude-code-cli", - "flaky-tests/reference/mcp-reference/configuration/claude-code-plugin", - "flaky-tests/reference/mcp-reference/configuration/gemini-cli", - "flaky-tests/reference/mcp-reference/configuration/bearer-auth" - ] - }, - "flaky-tests/reference/mcp-reference/search-test", - "flaky-tests/reference/mcp-reference/fix-flaky-test", - "flaky-tests/reference/mcp-reference/set-up-test-uploads", - "flaky-tests/reference/mcp-reference/investigate-ci-failure" - ] - } - ] - } + "flaky-tests/ticketing-integrations", + "flaky-tests/ticketing-integrations/jira-integration", + "flaky-tests/ticketing-integrations/linear-integration", + "flaky-tests/ticketing-integrations/other-ticketing-platforms" + ] + }, + { + "group": "Webhooks", + "pages": [ + "flaky-tests/webhooks", + "flaky-tests/webhooks/slack-integration", + "flaky-tests/webhooks/microsoft-teams-integration", + "flaky-tests/webhooks/github-issues-integration", + "flaky-tests/webhooks/linear-integration" + ] + }, + { + "group": "APIs & CLI", + "pages": ["flaky-tests/flaky-tests", "flaky-tests/uploader"] + }, + { + "group": "MCP Server", + "pages": [ + "flaky-tests/use-mcp-server", + "flaky-tests/use-mcp-server/configuration", + "flaky-tests/use-mcp-server/configuration/cursor-ide", + "flaky-tests/use-mcp-server/configuration/github-copilot-ide", + "flaky-tests/use-mcp-server/configuration/claude-code-cli", + "flaky-tests/use-mcp-server/configuration/gemini-cli", + "flaky-tests/use-mcp-server/mcp-tool-reference", + "flaky-tests/use-mcp-server/mcp-tool-reference/get-root-cause-analysis", + "flaky-tests/use-mcp-server/mcp-tool-reference/set-up-test-uploads" ] } ] @@ -349,1452 +251,42 @@ "tab": "Setup & Administration", "groups": [ { - "group": "Setup & Administration", + "group": "Account Setup", + "pages": ["setup-and-administration/connecting-to-trunk"] + }, + { + "group": "Administration", "pages": [ - "setup-and-administration/connecting-to-trunk", "setup-and-administration/managing-your-organization", "setup-and-administration/github-app-permissions", - "setup-and-administration/trunk-sudo-app", "setup-and-administration/support", "setup-and-administration/billing", - "setup-and-administration/security", - { - "group": "API Reference", - "root": "setup-and-administration/apis/index", - "pages": [ - "links/flaky-tests-api", - "links/merge-queue-api", - "setup-and-administration/apis/webhooks" - ] - } + "setup-and-administration/security" ] - } - ] - }, - { - "tab": "Code Quality CLI", - "groups": [ + }, { - "group": "Code Quality CLI", + "group": "API Reference", "pages": [ - { - "group": "Overview", - "root": "code-quality/overview/index", - "pages": [ - "code-quality/overview/initialize-trunk", - "code-quality/overview/deal-with-existing-issues", - "code-quality/overview/prevent-new-issues/index", - { - "group": "IDE integrations", - "root": "code-quality/overview/ide-integration/index", - "pages": [ - "code-quality/overview/ide-integration/vscode", - "code-quality/overview/ide-integration/openai-codex-support", - "code-quality/overview/ide-integration/neovim", - "code-quality/overview/ide-integration/github-codespaces" - ] - }, - { - "group": "Code Quality CLI", - "root": "code-quality/overview/getting-started/index", - "pages": [ - "code-quality/overview/getting-started/code-quality", - "code-quality/overview/getting-started/install", - "code-quality/overview/getting-started/tools", - { - "group": "Actions", - "root": "code-quality/overview/getting-started/actions/index", - "pages": [ - "code-quality/overview/getting-started/actions/git-hooks" - ] - }, - "code-quality/overview/getting-started/announce", - "code-quality/overview/getting-started/compatibility", - "code-quality/overview/getting-started/caching", - { - "group": "Commands reference", - "root": "code-quality/overview/getting-started/commands-reference/index", - "pages": [ - "code-quality/overview/getting-started/commands-reference/code-quality", - "code-quality/overview/getting-started/commands-reference/actions" - ] - }, - { - "group": "Configuration", - "root": "code-quality/overview/getting-started/configuration/index", - "pages": [ - { - "group": "Plugins", - "root": "code-quality/overview/getting-started/configuration/plugins/index", - "pages": [ - "code-quality/overview/getting-started/configuration/plugins/external-repositories", - "code-quality/overview/getting-started/configuration/plugins/exported-configs" - ] - }, - "code-quality/overview/getting-started/configuration/runtimes", - "code-quality/overview/getting-started/configuration/tools", - { - "group": "Lint", - "root": "code-quality/overview/getting-started/configuration/lint/index", - "pages": [ - "code-quality/overview/getting-started/configuration/lint/definitions", - "code-quality/overview/getting-started/configuration/lint/commands", - "code-quality/overview/getting-started/configuration/lint/output", - "code-quality/overview/getting-started/configuration/lint/output-parsing", - "code-quality/overview/getting-started/configuration/lint/files-and-caching", - "code-quality/overview/getting-started/configuration/lint/dependencies", - "code-quality/overview/getting-started/configuration/lint/auto-enable" - ] - }, - { - "group": "Actions", - "root": "code-quality/overview/getting-started/configuration/actions/index", - "pages": [ - "code-quality/overview/getting-started/configuration/actions/notifications", - "code-quality/overview/getting-started/configuration/actions/logging-and-troubleshooting" - ] - }, - "code-quality/overview/getting-started/configuration/merge", - "code-quality/overview/getting-started/configuration/telemetry", - "code-quality/overview/getting-started/configuration/per-user-overrides" - ] - } - ] - }, - { - "group": "Linters", - "root": "code-quality/overview/linters/index", - "pages": [ - { - "group": "Supported linters", - "root": "code-quality/overview/linters/supported/index", - "pages": [ - "code-quality/overview/linters/supported/actionlint", - "code-quality/overview/linters/supported/ansible-lint", - "code-quality/overview/linters/supported/autopep8", - "code-quality/overview/linters/supported/bandit", - "code-quality/overview/linters/supported/biome", - "code-quality/overview/linters/supported/black", - "code-quality/overview/linters/supported/brakeman", - "code-quality/overview/linters/supported/buf", - "code-quality/overview/linters/supported/buildifier", - "code-quality/overview/linters/supported/cfnlint", - "code-quality/overview/linters/supported/checkov", - "code-quality/overview/linters/supported/circleci", - "code-quality/overview/linters/supported/clang-format", - "code-quality/overview/linters/supported/clang-tidy", - "code-quality/overview/linters/supported/clippy", - "code-quality/overview/linters/supported/cmake-format", - "code-quality/overview/linters/supported/codespell", - "code-quality/overview/linters/supported/cspell", - "code-quality/overview/linters/supported/cue-fmt", - "code-quality/overview/linters/supported/dart", - "code-quality/overview/linters/supported/deno", - "code-quality/overview/linters/supported/detekt", - "code-quality/overview/linters/supported/djlint", - "code-quality/overview/linters/supported/dotenv-linter", - "code-quality/overview/linters/supported/dotnet-format", - "code-quality/overview/linters/supported/dustilock", - "code-quality/overview/linters/supported/eslint", - "code-quality/overview/linters/supported/flake8", - "code-quality/overview/linters/supported/git-diff-check", - "code-quality/overview/linters/supported/gitleaks", - "code-quality/overview/linters/supported/gofmt", - "code-quality/overview/linters/supported/gofumpt", - "code-quality/overview/linters/supported/goimports", - "code-quality/overview/linters/supported/gokart", - "code-quality/overview/linters/supported/golangci-lint", - "code-quality/overview/linters/supported/golines", - "code-quality/overview/linters/supported/google-java-format", - "code-quality/overview/linters/supported/graphql-schema-linter", - "code-quality/overview/linters/supported/hadolint", - "code-quality/overview/linters/supported/haml-lint", - "code-quality/overview/linters/supported/isort", - "code-quality/overview/linters/supported/iwyu", - "code-quality/overview/linters/supported/ktlint", - "code-quality/overview/linters/supported/kube-linter", - "code-quality/overview/linters/supported/markdown-link-check", - "code-quality/overview/linters/supported/markdown-table-prettify", - "code-quality/overview/linters/supported/markdownlint", - "code-quality/overview/linters/supported/markdownlint-cli2", - "code-quality/overview/linters/supported/mypy", - "code-quality/overview/linters/supported/nancy", - "code-quality/overview/linters/supported/nixpkgs-fmt", - "code-quality/overview/linters/supported/opa", - "code-quality/overview/linters/supported/osv-scanner", - "code-quality/overview/linters/supported/oxipng", - "code-quality/overview/linters/supported/perlcritic", - "code-quality/overview/linters/supported/perltidy", - "code-quality/overview/linters/supported/php-cs-fixer", - "code-quality/overview/linters/supported/phpstan", - "code-quality/overview/linters/supported/pmd", - "code-quality/overview/linters/supported/pragma-once", - "code-quality/overview/linters/supported/pre-commit-hooks", - "code-quality/overview/linters/supported/prettier", - "code-quality/overview/linters/supported/prisma", - "code-quality/overview/linters/supported/psscriptanalyzer", - "code-quality/overview/linters/supported/pylint", - "code-quality/overview/linters/supported/pyright", - "code-quality/overview/linters/supported/regal", - "code-quality/overview/linters/supported/remark-lint", - "code-quality/overview/linters/supported/renovate", - "code-quality/overview/linters/supported/rome", - "code-quality/overview/linters/supported/rubocop", - "code-quality/overview/linters/supported/ruff", - "code-quality/overview/linters/supported/rufo", - "code-quality/overview/linters/supported/rustfmt", - "code-quality/overview/linters/supported/scalafmt", - "code-quality/overview/linters/supported/semgrep", - "code-quality/overview/linters/supported/shellcheck", - "code-quality/overview/linters/supported/shfmt", - "code-quality/overview/linters/supported/sort-package-json", - "code-quality/overview/linters/supported/sourcery", - "code-quality/overview/linters/supported/sql-formatter", - "code-quality/overview/linters/supported/sqlfluff", - "code-quality/overview/linters/supported/sqlfmt", - "code-quality/overview/linters/supported/squawk", - "code-quality/overview/linters/supported/standardrb", - "code-quality/overview/linters/supported/stringslint", - "code-quality/overview/linters/supported/stylelint", - "code-quality/overview/linters/supported/stylua", - "code-quality/overview/linters/supported/svgo", - "code-quality/overview/linters/supported/swiftformat", - "code-quality/overview/linters/supported/swiftlint", - "code-quality/overview/linters/supported/taplo", - "code-quality/overview/linters/supported/terraform", - "code-quality/overview/linters/supported/terragrunt", - "code-quality/overview/linters/supported/terrascan", - "code-quality/overview/linters/supported/tflint", - "code-quality/overview/linters/supported/tfsec", - "code-quality/overview/linters/supported/tofu", - "code-quality/overview/linters/supported/trivy", - "code-quality/overview/linters/supported/trufflehog", - "code-quality/overview/linters/supported/txtpbfmt", - "code-quality/overview/linters/supported/vale", - "code-quality/overview/linters/supported/yamllint", - "code-quality/overview/linters/supported/yapf" - ] - }, - "code-quality/overview/linters/run-linters", - "code-quality/overview/linters/manage-linters", - "code-quality/overview/linters/configure-linters", - "code-quality/overview/linters/ignoring-issues-and-files", - "code-quality/overview/linters/custom-linters", - "code-quality/overview/linters/shared-configs", - "code-quality/overview/linters/upgrades" - ] - }, - "code-quality/overview/debugging", - "code-quality/overview/licensing" - ] - } + "setup-and-administration/apis", + "setup-and-administration/apis/webhooks" ] } ] } - ], - "global": { - "anchors": [ - { - "href": "https://app.trunk.io", - "icon": "play", - "anchor": "Open app" - }, - { - "href": "https://slack.trunk.io", - "icon": "slack", - "anchor": "Slack community" - }, - { - "href": "https://trunk.io/changelog", - "icon": "bullhorn", - "anchor": "Changelog" - }, - { - "href": "https://features.trunk.io", - "icon": "circle-star", - "anchor": "Feature requests" - } - ] - } + ] }, "contextual": { - "options": [ - "copy", - "view", - "chatgpt", - "claude" - ] + "options": ["copy", "view", "chatgpt", "claude"] }, "footer": { "socials": { "x": "https://x.com/trunk_io", "github": "https://github.com/trunk-io" - }, - "links": [ - { - "items": [ - { - "href": "https://app.trunk.io", - "label": "Open app" - }, - { - "href": "https://slack.trunk.io", - "label": "Slack community" - }, - { - "href": "https://trunk.io/changelog", - "label": "Changelog" - }, - { - "href": "https://features.trunk.io", - "label": "Feature requests" - } - ] - } - ] + } }, "seo": { "metatags": { "robots": "noindex" } - }, - "redirects": [ - { - "source": "/adminstration", - "destination": "/setup-and-administration/managing-your-organization" - }, - { - "source": "/check", - "destination": "/code-quality/overview" - }, - { - "source": "/check/cli", - "destination": "/code-quality/overview/cli/getting-started" - }, - { - "source": "/check/configuration", - "destination": "/code-quality/overview/cli/getting-started/configuration" - }, - { - "source": "/check/get-started", - "destination": "/code-quality/overview/setup-and-installation" - }, - { - "source": "/check/usage", - "destination": "/code-quality/overview/cli/getting-started" - }, - { - "source": "/check/performance", - "destination": "/code-quality/overview" - }, - { - "source": "/check/supported-linters", - "destination": "/code-quality/overview/linters/supported" - }, - { - "source": "/check/tools/configuration", - "destination": "/code-quality/overview/cli/getting-started/configuration" - }, - { - "source": "/check/under-the-hood", - "destination": "/code-quality/overview" - }, - { - "source": "/check/github-integration", - "destination": "/code-quality/overview/setup-and-installation/github-integration" - }, - { - "source": "/check/configuration/supported", - "destination": "/code-quality/overview/linters/supported" - }, - { - "source": "/check/configuration/supported/ansible-lint", - "destination": "/code-quality/overview/linters/supported/ansible-lint" - }, - { - "source": "/check/configuration/supported/codespell", - "destination": "/code-quality/overview/linters/supported/codespell" - }, - { - "source": "/check/configuration/supported/cspell", - "destination": "/code-quality/overview/linters/supported/cspell" - }, - { - "source": "/check/configuration/supported/gitleaks", - "destination": "/code-quality/overview/linters/supported/gitleaks" - }, - { - "source": "/check/configuration/supported/git-diff-check", - "destination": "/code-quality/overview/linters/supported/git-diff-check" - }, - { - "source": "/check/configuration/supported/pre-commit-hooks", - "destination": "/code-quality/overview/linters/supported/pre-commit-hooks" - }, - { - "source": "/check/configuration/supported/pmd", - "destination": "/code-quality/overview/linters/supported/pmd" - }, - { - "source": "/check/configuration/supported/shellcheck", - "destination": "/code-quality/overview/linters/supported/shellcheck" - }, - { - "source": "/check/configuration/supported/shfmt", - "destination": "/code-quality/overview/linters/supported/shfmt" - }, - { - "source": "/check/configuration/supported/buildifier", - "destination": "/code-quality/overview/linters/supported/buildifier" - }, - { - "source": "/check/configuration/supported/iwyu", - "destination": "/code-quality/overview/linters/supported/iwyu" - }, - { - "source": "/check/configuration/supported/pragma-once", - "destination": "/code-quality/overview/linters/supported/pragma-once" - }, - { - "source": "/check/configuration/supported/dotnet-format", - "destination": "/code-quality/overview/linters/supported/dotnet-format" - }, - { - "source": "/check/configuration/supported/circleci", - "destination": "/code-quality/overview/linters/supported/circleci" - }, - { - "source": "/check/configuration/supported/cfnlint", - "destination": "/code-quality/overview/linters/supported/cfnlint" - }, - { - "source": "/check/configuration/supported/checkov", - "destination": "/code-quality/overview/linters/supported/checkov" - }, - { - "source": "/check/configuration/supported/stylelint", - "destination": "/code-quality/overview/linters/supported/stylelint" - }, - { - "source": "/check/configuration/supported/prettier", - "destination": "/code-quality/overview/linters/supported/prettier" - }, - { - "source": "/check/configuration/supported/cue-fmt", - "destination": "/code-quality/overview/linters/supported/cue-fmt" - }, - { - "source": "/check/configuration/supported/hadolint", - "destination": "/code-quality/overview/linters/supported/hadolint" - }, - { - "source": "/check/configuration/supported/dotenv-linter", - "destination": "/code-quality/overview/linters/supported/dotenv-linter" - }, - { - "source": "/check/configuration/supported/actionlint", - "destination": "/code-quality/overview/linters/supported/actionlint" - }, - { - "source": "/check/configuration/supported/gofmt", - "destination": "/code-quality/overview/linters/supported/gofmt" - }, - { - "source": "/check/configuration/supported/gofumpt", - "destination": "/code-quality/overview/linters/supported/gofumpt" - }, - { - "source": "/check/configuration/supported/goimports", - "destination": "/code-quality/overview/linters/supported/goimports" - }, - { - "source": "/check/configuration/supported/gokart", - "destination": "/code-quality/overview/linters/supported/gokart" - }, - { - "source": "/check/configuration/supported/golangci-lint", - "destination": "/code-quality/overview/linters/supported/golangci-lint" - }, - { - "source": "/check/configuration/supported/golines", - "destination": "/code-quality/overview/linters/supported/golines" - }, - { - "source": "/check/configuration/supported/semgrep", - "destination": "/code-quality/overview/linters/supported/semgrep" - }, - { - "source": "/check/configuration/supported/graphql-schema-linter", - "destination": "/code-quality/overview/linters/supported/graphql-schema-linter" - }, - { - "source": "/check/configuration/supported/haml-lint", - "destination": "/code-quality/overview/linters/supported/haml-lint" - }, - { - "source": "/check/configuration/supported/djlint", - "destination": "/code-quality/overview/linters/supported/djlint" - }, - { - "source": "/check/configuration/supported/google-java-format", - "destination": "/code-quality/overview/linters/supported/google-java-format" - }, - { - "source": "/check/configuration/supported/deno", - "destination": "/code-quality/overview/linters/supported/deno" - }, - { - "source": "/check/configuration/supported/eslint", - "destination": "/code-quality/overview/linters/supported/eslint" - }, - { - "source": "/check/configuration/supported/rome", - "destination": "/code-quality/overview/linters/supported/rome" - }, - { - "source": "/check/configuration/supported/detekt", - "destination": "/code-quality/overview/linters/supported/detekt" - }, - { - "source": "/check/configuration/supported/ktlint", - "destination": "/code-quality/overview/linters/supported/ktlint" - }, - { - "source": "/check/configuration/supported/kube-linter", - "destination": "/code-quality/overview/linters/supported/kube-linter" - }, - { - "source": "/check/configuration/supported/stylua", - "destination": "/code-quality/overview/linters/supported/stylua" - }, - { - "source": "/check/configuration/supported/markdownlint", - "destination": "/code-quality/overview/linters/supported/markdownlint" - }, - { - "source": "/check/configuration/supported/remark-lint", - "destination": "/code-quality/overview/linters/supported/remark-lint" - }, - { - "source": "/check/configuration/supported/markdown-link-check", - "destination": "/code-quality/overview/linters/supported/markdown-link-check" - }, - { - "source": "/check/configuration/supported/nixpkgs-fmt", - "destination": "/code-quality/overview/linters/supported/nixpkgs-fmt" - }, - { - "source": "/check/configuration/supported/sort-package-json", - "destination": "/code-quality/overview/linters/supported/sort-package-json" - }, - { - "source": "/check/configuration/supported/perlcritic", - "destination": "/code-quality/overview/linters/supported/perlcritic" - }, - { - "source": "/check/configuration/supported/perltidy", - "destination": "/code-quality/overview/linters/supported/perltidy" - }, - { - "source": "/check/configuration/supported/oxipng", - "destination": "/code-quality/overview/linters/supported/oxipng" - }, - { - "source": "/check/configuration/supported/prisma", - "destination": "/code-quality/overview/linters/supported/prisma" - }, - { - "source": "/check/configuration/supported/buf", - "destination": "/code-quality/overview/linters/supported/buf" - }, - { - "source": "/check/configuration/supported/clang-format", - "destination": "/code-quality/overview/linters/supported/clang-format" - }, - { - "source": "/check/configuration/supported/clang-tidy", - "destination": "/code-quality/overview/linters/supported/clang-tidy" - }, - { - "source": "/check/configuration/supported/autopep8", - "destination": "/code-quality/overview/linters/supported/autopep8" - }, - { - "source": "/check/configuration/supported/bandit", - "destination": "/code-quality/overview/linters/supported/bandit" - }, - { - "source": "/check/configuration/supported/black", - "destination": "/code-quality/overview/linters/supported/black" - }, - { - "source": "/check/configuration/supported/flake8", - "destination": "/code-quality/overview/linters/supported/flake8" - }, - { - "source": "/check/configuration/supported/isort", - "destination": "/code-quality/overview/linters/supported/isort" - }, - { - "source": "/check/configuration/supported/mypy", - "destination": "/code-quality/overview/linters/supported/mypy" - }, - { - "source": "/check/configuration/supported/pylint", - "destination": "/code-quality/overview/linters/supported/pylint" - }, - { - "source": "/check/configuration/supported/pyright", - "destination": "/code-quality/overview/linters/supported/pyright" - }, - { - "source": "/check/configuration/supported/yapf", - "destination": "/code-quality/overview/linters/supported/yapf" - }, - { - "source": "/check/configuration/supported/ruff", - "destination": "/code-quality/overview/linters/supported/ruff" - }, - { - "source": "/check/configuration/supported/sourcery", - "destination": "/code-quality/overview/linters/supported/sourcery" - }, - { - "source": "/check/configuration/supported/renovate", - "destination": "/code-quality/overview/linters/supported/renovate" - }, - { - "source": "/check/configuration/supported/brakeman", - "destination": "/code-quality/overview/linters/supported/brakeman" - }, - { - "source": "/check/configuration/supported/rubocop", - "destination": "/code-quality/overview/linters/supported/rubocop" - }, - { - "source": "/check/configuration/supported/rufo", - "destination": "/code-quality/overview/linters/supported/rufo" - }, - { - "source": "/check/configuration/supported/standardrb", - "destination": "/code-quality/overview/linters/supported/standardrb" - }, - { - "source": "/check/configuration/supported/clippy", - "destination": "/code-quality/overview/linters/supported/clippy" - }, - { - "source": "/check/configuration/supported/rustfmt", - "destination": "/code-quality/overview/linters/supported/rustfmt" - }, - { - "source": "/check/configuration/supported/scalafmt", - "destination": "/code-quality/overview/linters/supported/scalafmt" - }, - { - "source": "/check/configuration/supported/dustilock", - "destination": "/code-quality/overview/linters/supported/dustilock" - }, - { - "source": "/check/configuration/supported/nancy", - "destination": "/code-quality/overview/linters/supported/nancy" - }, - { - "source": "/check/configuration/supported/osv-scanner", - "destination": "/code-quality/overview/linters/supported/osv-scanner" - }, - { - "source": "/check/configuration/supported/tfsec", - "destination": "/code-quality/overview/linters/supported/tfsec" - }, - { - "source": "/check/configuration/supported/trivy", - "destination": "/code-quality/overview/linters/supported/trivy" - }, - { - "source": "/check/configuration/supported/trufflehog", - "destination": "/code-quality/overview/linters/supported/trufflehog" - }, - { - "source": "/check/configuration/supported/terrascan", - "destination": "/code-quality/overview/linters/supported/terrascan" - }, - { - "source": "/check/configuration/supported/sqlfluff", - "destination": "/code-quality/overview/linters/supported/sqlfluff" - }, - { - "source": "/check/configuration/supported/sqlfmt", - "destination": "/code-quality/overview/linters/supported/sqlfmt" - }, - { - "source": "/check/configuration/supported/sql-formatter", - "destination": "/code-quality/overview/linters/supported/sql-formatter" - }, - { - "source": "/check/configuration/supported/svgo", - "destination": "/code-quality/overview/linters/supported/svgo" - }, - { - "source": "/check/configuration/supported/stringslint", - "destination": "/code-quality/overview/linters/supported/stringslint" - }, - { - "source": "/check/configuration/supported/swiftlint", - "destination": "/code-quality/overview/linters/supported/swiftlint" - }, - { - "source": "/check/configuration/supported/swiftformat", - "destination": "/code-quality/overview/linters/supported/swiftformat" - }, - { - "source": "/check/configuration/supported/terraform", - "destination": "/code-quality/overview/linters/supported/terraform" - }, - { - "source": "/check/configuration/supported/tflint", - "destination": "/code-quality/overview/linters/supported/tflint" - }, - { - "source": "/check/configuration/supported/terragrunt", - "destination": "/code-quality/overview/linters/supported/terragrunt" - }, - { - "source": "/check/configuration/supported/txtpbfmt", - "destination": "/code-quality/overview/linters/supported/txtpbfmt" - }, - { - "source": "/check/configuration/supported/taplo", - "destination": "/code-quality/overview/linters/supported/taplo" - }, - { - "source": "/check/configuration/supported/yamllint", - "destination": "/code-quality/overview/linters/supported/yamllint" - }, - { - "source": "/check/configuration/supported/biome", - "destination": "/code-quality/overview/linters/supported/biome" - }, - { - "source": "/check/configuration/supported/markdown-table-prettify", - "destination": "/code-quality/overview/linters/supported/markdown-table-prettify" - }, - { - "source": "/check/configuration/supported/psscriptanalyzer", - "destination": "/code-quality/overview/linters/supported/psscriptanalyzer" - }, - { - "source": "/check/configuration/supported/cmake-format", - "destination": "/code-quality/overview/linters/supported/cmake-format" - }, - { - "source": "/check/configuration/supported/dart", - "destination": "/code-quality/overview/linters/supported/dart" - }, - { - "source": "/check/configuration/supported/opa", - "destination": "/code-quality/overview/linters/supported/opa" - }, - { - "source": "/check/configuration/supported/phpstan", - "destination": "/code-quality/overview/linters/supported/phpstan" - }, - { - "source": "/check/configuration/supported/regal", - "destination": "/code-quality/overview/linters/supported/regal" - }, - { - "source": "/check/configuration/supported/tofu", - "destination": "/code-quality/overview/linters/supported/tofu" - }, - { - "source": "/check/configuration/supported/vale", - "destination": "/code-quality/overview/linters/supported/vale" - }, - { - "source": "/check/configuration/supported/php-cs-fixer", - "destination": "/code-quality/overview/linters/supported/php-cs-fixer" - }, - { - "source": "/cli/windows-beta", - "destination": "/code-quality/overview/cli/getting-started/compatibility" - }, - { - "source": "/docs", - "destination": "/welcome" - }, - { - "source": "/docs/actions-config", - "destination": "/code-quality/overview/cli/getting-started/configuration/actions" - }, - { - "source": "/docs/actions-git-hooks", - "destination": "/code-quality/overview/cli/getting-started/configuration/actions" - }, - { - "source": "/docs/actions", - "destination": "/code-quality/overview/cli/getting-started/configuration/actions" - }, - { - "source": "/docs/check-cli", - "destination": "/code-quality/overview/cli/getting-started" - }, - { - "source": "/docs/check-config", - "destination": "/code-quality/overview/cli/getting-started/configuration" - }, - { - "source": "/docs/check-custom-linters", - "destination": "/code-quality/overview/linters/supported" - }, - { - "source": "/docs/check-debugging", - "destination": "/code-quality/overview/debugging" - }, - { - "source": "/docs/check-get-started", - "destination": "/code-quality/overview/setup-and-installation" - }, - { - "source": "/docs/check-github-integration", - "destination": "/code-quality/overview/deal-with-existing-issues" - }, - { - "source": "/docs/check-supported-linters", - "destination": "/code-quality/overview/linters/supported" - }, - { - "source": "/docs/check", - "destination": "/code-quality/overview" - }, - { - "source": "/docs/check/github-integration", - "destination": "/code-quality/overview/setup-and-installation/github-integration" - }, - { - "source": "/docs/cli", - "destination": "/code-quality/overview/cli/getting-started" - }, - { - "source": "/docs/code-quality", - "destination": "/code-quality/overview" - }, - { - "source": "/docs/get-started", - "destination": "/welcome" - }, - { - "source": "/docs/getting-started", - "destination": "/welcome" - }, - { - "source": "/docs/github-app-permissions", - "destination": "/setup-and-administration/github-app-permissions" - }, - { - "source": "/docs/github-codespaces", - "destination": "/code-quality/overview/ide-integration/github-codespaces" - }, - { - "source": "/docs/ignoring-issues", - "destination": "/code-quality/overview/deal-with-existing-issues" - }, - { - "source": "/docs/initialize-trunk-in-a-git-repo", - "destination": "/code-quality/overview/cli/getting-started" - }, - { - "source": "/docs/install", - "destination": "/code-quality/overview/cli/getting-started/install" - }, - { - "source": "/docs/merge-get-started", - "destination": "/merge-queue/getting-started" - }, - { - "source": "/docs/merge-getting-started", - "destination": "/merge-queue/getting-started" - }, - { - "source": "/docs/merge", - "destination": "/merge-queue/merge-queue" - }, - { - "source": "/docs/overview", - "destination": "/welcome" - }, - { - "source": "/docs/plugins", - "destination": "/code-quality/overview/linters" - }, - { - "source": "/docs/reference-trunk-yaml", - "destination": "/code-quality/overview/cli/getting-started/configuration" - }, - { - "source": "/docs/shell-hooks", - "destination": "/code-quality/overview/cli/getting-started/configuration/tools" - }, - { - "source": "/docs/tools", - "destination": "/code-quality/overview/cli/getting-started/configuration/tools" - }, - { - "source": "/docs/trunk-app-for-slack", - "destination": "/merge-queue/integration-for-slack" - }, - { - "source": "/docs/vscode", - "destination": "/code-quality/overview/ide-integration/vscode" - }, - { - "source": "/docs/what-is-trunk", - "destination": "/welcome" - }, - { - "source": "/docs/windows-beta", - "destination": "/code-quality/overview/cli/getting-started/compatibility" - }, - { - "source": "/flaky-tests/generating-junit-reports", - "destination": "/flaky-tests/get-started/frameworks" - }, - { - "source": "/get-started", - "destination": "/welcome" - }, - { - "source": "/merge-graph", - "destination": "/merge-queue/merge-queue" - }, - { - "source": "/merge-graph/configuration", - "destination": "/merge-queue/merge-queue" - }, - { - "source": "/merge-graph/set-up-trunk-merge", - "destination": "/merge-queue/merge-queue" - }, - { - "source": "/merge/getting-started", - "destination": "/merge-queue/getting-started" - }, - { - "source": "/reference/reference-trunk-yaml", - "destination": "/code-quality/overview/cli/getting-started/configuration" - }, - { - "source": "/reference/trunk-yaml", - "destination": "/code-quality/overview/cli/getting-started/configuration" - }, - { - "source": "/test-analytics", - "destination": "/flaky-tests/overview" - }, - { - "source": "/test-analytics/ci-providers", - "destination": "/flaky-tests/get-started/ci-providers" - }, - { - "source": "/test-analytics/ci-providers/buildkite-quickstart", - "destination": "/flaky-tests/get-started/ci-providers/buildkite" - }, - { - "source": "/test-analytics/ci-providers/circleci-quickstart", - "destination": "/flaky-tests/get-started/ci-providers/circleci" - }, - { - "source": "/test-analytics/ci-providers/github-actions-quickstart", - "destination": "/flaky-tests/get-started/ci-providers/github-actions" - }, - { - "source": "/test-analytics/ci-providers/gitlab", - "destination": "/flaky-tests/get-started/ci-providers/gitlab" - }, - { - "source": "/test-analytics/ci-providers/other-ci-providers-quickstart", - "destination": "/flaky-tests/get-started/ci-providers/otherci" - }, - { - "source": "/test-analytics/ci-providers/semaphore-ci-quickstart", - "destination": "/flaky-tests/get-started/ci-providers/semaphoreci" - }, - { - "source": "/test-analytics/frameworks", - "destination": "/flaky-tests/get-started/frameworks" - }, - { - "source": "/test-analytics/frameworks/android", - "destination": "/flaky-tests/get-started/frameworks/android" - }, - { - "source": "/test-analytics/frameworks/cargo-nextest", - "destination": "/flaky-tests/get-started/frameworks/rust" - }, - { - "source": "/test-analytics/frameworks/cypress", - "destination": "/flaky-tests/get-started/frameworks/cypress" - }, - { - "source": "/test-analytics/frameworks/jasmine", - "destination": "/flaky-tests/get-started/frameworks/jasmine" - }, - { - "source": "/test-analytics/frameworks/jest", - "destination": "/flaky-tests/get-started/frameworks/jest" - }, - { - "source": "/test-analytics/frameworks/minitest", - "destination": "/flaky-tests/get-started/frameworks/minitest" - }, - { - "source": "/test-analytics/frameworks/mocha", - "destination": "/flaky-tests/get-started/frameworks/mocha" - }, - { - "source": "/test-analytics/frameworks/playwright", - "destination": "/flaky-tests/get-started/frameworks/playwright" - }, - { - "source": "/test-analytics/frameworks/pytest", - "destination": "/flaky-tests/get-started/frameworks/pytest" - }, - { - "source": "/test-analytics/frameworks/rspec", - "destination": "/flaky-tests/get-started/frameworks" - }, - { - "source": "/test-analytics/frameworks/swift-testing", - "destination": "/flaky-tests/get-started/frameworks/swift-testing" - }, - { - "source": "/test-analytics/frameworks/xctest", - "destination": "/flaky-tests/get-started/frameworks/xctest" - }, - { - "source": "/test-analytics/github-pull-request-comments", - "destination": "/flaky-tests/management/github-pull-request-comments" - }, - { - "source": "/runtimes", - "destination": "/code-quality/overview/cli/getting-started/configuration/runtimes" - }, - { - "source": "/plugins", - "destination": "/code-quality/overview/linters" - }, - { - "source": "/ci/get-started/github-integration", - "destination": "/code-quality/overview/setup-and-installation/github-integration" - }, - { - "source": "/code-quality/usage", - "destination": "/code-quality/overview/initialize-trunk" - }, - { - "source": "/code-quality/ci/get-started", - "destination": "/code-quality/overview/setup-and-installation" - }, - { - "source": "/code-quality/ci/get-started/github-integration", - "destination": "/code-quality/overview/setup-and-installation/github-integration" - }, - { - "source": "/code-quality/configuration/debugging", - "destination": "/code-quality/overview/debugging" - }, - { - "source": "/check/check-cloud-ci-integration/get-started/github-integration", - "destination": "/code-quality/overview/setup-and-installation/github-integration" - }, - { - "source": "/tools", - "destination": "/code-quality/overview/cli/getting-started/configuration/tools" - }, - { - "source": "/flaky-tests/ci-providers/buildkite-quickstart", - "destination": "/flaky-tests/get-started/ci-providers/buildkite" - }, - { - "source": "/flaky-tests/ci-providers/circleci-quickstart", - "destination": "/flaky-tests/get-started/ci-providers/circleci" - }, - { - "source": "/flaky-tests/ci-providers/droneci", - "destination": "/flaky-tests/get-started/ci-providers/droneci" - }, - { - "source": "/flaky-tests/ci-providers/github-actions-quickstart", - "destination": "/flaky-tests/get-started/ci-providers/github-actions" - }, - { - "source": "/flaky-tests/ci-providers/gitlab", - "destination": "/flaky-tests/get-started/ci-providers/gitlab" - }, - { - "source": "/flaky-tests/ci-providers/other-ci-providers-quickstart", - "destination": "/flaky-tests/get-started/ci-providers/otherci" - }, - { - "source": "/flaky-tests/ci-providers/semaphore-ci-quickstart", - "destination": "/flaky-tests/get-started/ci-providers/semaphoreci" - }, - { - "source": "/flaky-tests/ci-providers/travisci", - "destination": "/flaky-tests/get-started/ci-providers/travisci" - }, - { - "source": "/flaky-tests/frameworks/android", - "destination": "/flaky-tests/get-started/frameworks/android" - }, - { - "source": "/flaky-tests/frameworks/bazel", - "destination": "/flaky-tests/get-started/frameworks/bazel" - }, - { - "source": "/flaky-tests/frameworks/cargo-nextest", - "destination": "/flaky-tests/get-started/frameworks/rust" - }, - { - "source": "/flaky-tests/frameworks/cypress", - "destination": "/flaky-tests/get-started/frameworks/cypress" - }, - { - "source": "/flaky-tests/frameworks/go-junit-report", - "destination": "/flaky-tests/get-started/frameworks/gotestsum" - }, - { - "source": "/flaky-tests/frameworks/gotestsum", - "destination": "/flaky-tests/get-started/frameworks/gotestsum" - }, - { - "source": "/flaky-tests/frameworks/jasmine", - "destination": "/flaky-tests/get-started/frameworks/jasmine" - }, - { - "source": "/flaky-tests/frameworks/jest", - "destination": "/flaky-tests/get-started/frameworks/jest" - }, - { - "source": "/flaky-tests/frameworks/karma", - "destination": "/flaky-tests/get-started/frameworks/karma" - }, - { - "source": "/flaky-tests/frameworks/maven", - "destination": "/flaky-tests/get-started/frameworks/maven" - }, - { - "source": "/flaky-tests/frameworks/minitest", - "destination": "/flaky-tests/get-started/frameworks/minitest" - }, - { - "source": "/flaky-tests/frameworks/mocha", - "destination": "/flaky-tests/get-started/frameworks/mocha" - }, - { - "source": "/flaky-tests/frameworks/phpunit", - "destination": "/flaky-tests/get-started/frameworks/phpunit" - }, - { - "source": "/flaky-tests/frameworks/playwright", - "destination": "/flaky-tests/get-started/frameworks/playwright" - }, - { - "source": "/flaky-tests/frameworks/pytest", - "destination": "/flaky-tests/get-started/frameworks/pytest" - }, - { - "source": "/flaky-tests/frameworks/rspec", - "destination": "/flaky-tests/get-started/frameworks" - }, - { - "source": "/flaky-tests/frameworks/swift-testing", - "destination": "/flaky-tests/get-started/frameworks/swift-testing" - }, - { - "source": "/flaky-tests/frameworks/vitest", - "destination": "/flaky-tests/get-started/frameworks/vitest" - }, - { - "source": "/flaky-tests/frameworks/xctest", - "destination": "/flaky-tests/get-started/frameworks/xctest" - }, - { - "source": "/administration", - "destination": "/setup-and-administration/managing-your-organization" - }, - { - "source": "/apis/api", - "destination": "/setup-and-administration/apis" - }, - { - "source": "/apis", - "destination": "/setup-and-administration/apis" - }, - { - "source": "/cli/configuration", - "destination": "/code-quality/overview/cli/getting-started/configuration" - }, - { - "source": "/cli/configuration/actions", - "destination": "/code-quality/overview/cli/getting-started/configuration/actions" - }, - { - "source": "/cli/configuration/lint", - "destination": "/code-quality/overview/cli/getting-started/configuration/lint" - }, - { - "source": "/cli/configuration/plugins", - "destination": "/code-quality/overview/cli/getting-started/configuration/plugins" - }, - { - "source": "/cli/commands-reference", - "destination": "/code-quality/overview/cli/getting-started/commands-reference" - }, - { - "source": "/cli/getting-started/actions", - "destination": "/code-quality/overview/cli/getting-started/actions" - }, - { - "source": "/cli/getting-started", - "destination": "/code-quality/overview/cli/getting-started" - }, - { - "source": "/cli", - "destination": "/code-quality/overview/cli/getting-started" - }, - { - "source": "/merge-queue/parallel-queues", - "destination": "/merge-queue/optimizations/parallel-queues" - }, - { - "source": "/code-quality/linters/supported/sqlfluff", - "destination": "/code-quality/overview/linters/supported/sqlfluff" - }, - { - "source": "/code-quality/linters/supported/sourcery", - "destination": "/code-quality/overview/linters/supported/sourcery" - }, - { - "source": "/administration/github-app-permissions", - "destination": "/setup-and-administration/github-app-permissions" - }, - { - "source": "/administration/billing", - "destination": "/setup-and-administration/billing" - }, - { - "source": "/apis/flaky-tests", - "destination": "/setup-and-administration/apis" - }, - { - "source": "/cli/configuration/lint/dependencies", - "destination": "/code-quality/overview/cli/getting-started/configuration/lint/dependencies" - }, - { - "source": "/cli/configuration/lint/output-parsing", - "destination": "/code-quality/overview/cli/getting-started/configuration/lint/output-parsing" - }, - { - "source": "/cli/configuration/lint/commands", - "destination": "/code-quality/overview/cli/getting-started/configuration/lint/commands" - }, - { - "source": "/cli/configuration/tools", - "destination": "/code-quality/overview/cli/getting-started/configuration/tools" - }, - { - "source": "/cli/configuration/plugins/exported-configs", - "destination": "/code-quality/overview/cli/getting-started/configuration/plugins/exported-configs" - }, - { - "source": "/cli/getting-started/actions/git-hooks", - "destination": "/code-quality/overview/cli/getting-started/actions/git-hooks" - }, - { - "source": "/cli/install", - "destination": "/code-quality/overview/cli/getting-started/install" - }, - { - "source": "/merge-queue/command-line", - "destination": "/merge-queue/reference/merge-queue-cli-reference" - }, - { - "source": "/merge-queue/optimistic-merging", - "destination": "/merge-queue/optimizations/optimistic-merging" - }, - { - "source": "/merge-queue", - "destination": "/merge-queue/merge-queue" - }, - { - "source": "/code-quality/overview", - "destination": "/code-quality/overview" - }, - { - "source": "/code-quality", - "destination": "/code-quality/overview" - }, - { - "source": "/flaky-tests", - "destination": "/flaky-tests/overview" - }, - { - "source": "/flaky-tests/use-mcp-server/mcp-tool-reference", - "destination": "/flaky-tests/reference/mcp-reference" - }, - { - "source": "/flaky-tests/autofix-ci-failures", - "destination": "/flaky-tests/agents/autofix-ci-failures" - }, - { - "source": "/flaky-tests/autofix-flaky-tests", - "destination": "/flaky-tests/agents/autofix-flaky-tests" - }, - { - "source": "/flaky-tests/infrastructure-failure-protection", - "destination": "/flaky-tests/detection/infrastructure-failure-protection" - }, - { - "source": "/flaky-tests/managing-detected-flaky-tests", - "destination": "/flaky-tests/management/managing-detected-flaky-tests" - }, - { - "source": "/flaky-tests/quarantine-service-availability", - "destination": "/flaky-tests/quarantining/quarantine-service-availability" - }, - { - "source": "/flaky-tests/quarantining", - "destination": "/flaky-tests/quarantining" - }, - { - "source": "/flaky-tests/test-collections", - "destination": "/flaky-tests/get-started/test-collections" - }, - { - "source": "/flaky-tests/test-labels", - "destination": "/flaky-tests/management/test-labels" - }, - { - "source": "/flaky-tests/the-importance-of-pr-test-results", - "destination": "/flaky-tests/detection/the-importance-of-pr-test-results" - }, - { - "source": "/flaky-tests/ticketing-integrations", - "destination": "/flaky-tests/management/ticketing" - }, - { - "source": "/flaky-tests/uploader", - "destination": "/flaky-tests/reference/cli-reference" - }, - { - "source": "/flaky-tests/ticketing-integrations/jira-integration", - "destination": "/flaky-tests/management/ticketing/jira-integration" - }, - { - "source": "/flaky-tests/ticketing-integrations/linear-integration", - "destination": "/flaky-tests/management/ticketing/linear-integration" - }, - { - "source": "/flaky-tests/use-mcp-server", - "destination": "/flaky-tests/reference/mcp-reference" - }, - { - "source": "/flaky-tests/use-mcp-server/investigate-ci-failures", - "destination": "/flaky-tests/reference/mcp-reference/investigate-ci-failure" - }, - { - "source": "/merge-queue/set-up-trunk-merge", - "destination": "/merge-queue/getting-started" - }, - { - "source": "/merge-queue/concepts-and-optimizations", - "destination": "/merge-queue/optimizations" - }, - { - "source": "/merge-queue/concepts-and-optimizations/parallel-queues", - "destination": "/merge-queue/optimizations/parallel-queues" - }, - { - "source": "/merge-queue/concepts-and-optimizations/optimistic-merging", - "destination": "/merge-queue/optimizations/optimistic-merging" - }, - { - "source": "/merge-queue/concepts-and-optimizations/batch-merging", - "destination": "/merge-queue/optimizations/batching" - }, - { - "source": "/merge-queue/concepts-and-optimizations/batching", - "destination": "/merge-queue/optimizations/batching" - }, - { - "source": "/merge-queue/concepts-and-optimizations/pending-failure-depth", - "destination": "/merge-queue/optimizations/pending-failure-depth" - }, - { - "source": "/merge-queue/concepts-and-optimizations/predictive-testing", - "destination": "/merge-queue/optimizations/predictive-testing" - }, - { - "source": "/merge-queue/concepts/parallel-queues", - "destination": "/merge-queue/optimizations/parallel-queues" - }, - { - "source": "/merge-queue/concepts/optimistic-merging", - "destination": "/merge-queue/optimizations/optimistic-merging" - }, - { - "source": "/merge-queue/concepts/batch-merging", - "destination": "/merge-queue/optimizations/batching" - }, - { - "source": "/merge-queue/concepts/pending-failure-depth", - "destination": "/merge-queue/optimizations/pending-failure-depth" - }, - { - "source": "/merge-queue/concepts/predictive-testing", - "destination": "/merge-queue/optimizations/predictive-testing" - }, - { - "source": "/merge-queue/managing-merge-queue/advanced-settings", - "destination": "/merge-queue/optimizations" - }, - { - "source": "/merge-queue/managing-merge-queue/reference", - "destination": "/merge-queue/using-the-queue/reference" - }, - { - "source": "/merge-queue/set-up-trunk-merge/branch-protection-and-required-status-checks", - "destination": "/merge-queue/getting-started/configure-branch-protection" - }, - { - "source": "/merge-queue/set-up-trunk-merge/integration-for-slack", - "destination": "/merge-queue/integration-for-slack" - }, - { - "source": "/merge-queue/managing-merge-queue/metrics", - "destination": "/merge-queue/administration/metrics" - }, - { - "source": "/merge-queue/webhooks", - "destination": "/merge-queue/webhooks" - }, - { - "source": "/merge-queue/concepts-and-optimizations/pr-prioritization", - "destination": "/merge-queue/optimizations/priority-merging" - }, - { - "source": "/merge-queue/managing-merge-queue", - "destination": "/merge-queue/using-the-queue/reference" - }, - { - "source": "/merge-queue/managing-merge-queue/using-the-webapp", - "destination": "/merge-queue/using-the-queue/reference" - }, - { - "source": "/merge-queue/references/apis/merge", - "destination": "/merge-queue/reference/merge" - } - ] -} \ No newline at end of file + } +} diff --git a/favicon.svg b/favicon.svg index d521b54..d50ceed 100644 --- a/favicon.svg +++ b/favicon.svg @@ -1,13 +1,5 @@ - - - - - - - - - - - - + + + + diff --git a/fix-asset-spaces.sh b/fix-asset-spaces.sh deleted file mode 100755 index ac55edd..0000000 --- a/fix-asset-spaces.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -ASSETS_DIR="$(dirname "$0")/assets" -CONTENT_DIR="$(dirname "$0")" - -# Collect all asset files with spaces -mapfile -d '' FILES < <(find "$ASSETS_DIR" -name "* *" -print0) - -if [[ ${#FILES[@]} -eq 0 ]]; then - echo "No files with spaces found." - exit 0 -fi - -echo "Found ${#FILES[@]} file(s) with spaces. Processing..." - -for OLD_PATH in "${FILES[@]}"; do - OLD_NAME="$(basename "$OLD_PATH")" - NEW_NAME="${OLD_NAME// /_}" - - if [[ "$OLD_NAME" == "$NEW_NAME" ]]; then - continue - fi - - NEW_PATH="$(dirname "$OLD_PATH")/$NEW_NAME" - - # Rename the file - mv "$OLD_PATH" "$NEW_PATH" - - # Update references in all content files - # Escape special chars for sed: ( ) . [ ] - OLD_REF="/assets/${OLD_NAME}" - NEW_REF="/assets/${NEW_NAME}" - - OLD_SED="$(printf '%s\n' "$OLD_REF" | sed 's/[][()\.]/\\&/g')" - NEW_SED="$(printf '%s\n' "$NEW_REF" | sed 's/[&/\]/\\&/g')" - - while IFS= read -r CONTENT_FILE; do - sed -i "s|${OLD_SED}|${NEW_SED}|g" "$CONTENT_FILE" - echo " Updated: $CONTENT_FILE" - done < <(grep -rl --include="*.mdx" --include="*.md" --include="*.json" \ - "$OLD_REF" "$CONTENT_DIR" 2>/dev/null || true) - - echo "Renamed: $OLD_NAME -> $NEW_NAME" -done - -echo "Done." diff --git a/flaky-tests/agents/autofix-ci-failures.mdx b/flaky-tests/agents/autofix-ci-failures.mdx deleted file mode 100644 index b03965f..0000000 --- a/flaky-tests/agents/autofix-ci-failures.mdx +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: "Autofix CI Failures" ---- -Trunk can return targeted information about CI failures, enabling AI agents and automation tools to analyze and fix issues automatically. - -### Prerequisites - -To use the Autofix CI Failures feature, you'll need to have: - -- Your repository set up to [upload test results to Trunk](../get-started/index) - -### Cursor CI Autofix - -You can set up a [Cursor Automation](https://cursor.com/automations) to automatically fix CI failures by connecting to Trunk's CI failure investigation data via MCP. This is an extension of the Cursor `CI Autofix` template. - - - -Set up the Trunk MCP using [Bearer Authentication](../reference/mcp-reference/configuration/bearer-auth). - - -```json -{ - "name": "CI Autofix v1", - "description": "Detect CI failures on main and automatically open PRs", - "triggers": [ - { - "git": { - "ciCompleted": { - "repos": [ - "https://github.com/" - ], - "condition": 1, - "ignoreBaseFailures": true - } - } - } - ], - "actions": [ - { - "gitPr": {} - }, - { - "mcp": { - "server": { - "name": "trunk" - } - } - } - ], - "prompts": [ - { - "prompt": "Your task is to fix CI failures on PRs.\n\n# Deduplication\n\nTo avoid racing against other agents, before any investigation:\n1. Collect the names of ALL failing CI jobs/checks from the CI Status Report above.\n2. Calculate your memory filename: sort the failing jobs alphabetically, join with \"_\", then remove any characters that are not letters, digits, hyphens, underscores, or dots. Prepend \"ci-fail-\" and truncate to 64 characters total. This is the filename.\n3. Read the memory file with this filename.\n - If it exists and the timestamp inside is less than 30 minutes old, stop immediately — no branch, no Slack, no output.\n4. Else, write the memory file with the current unix timestamp.\n - If the write SUCCEEDS: you claimed this failure. Proceed with the investigation below.\n - If the write FAILS (version conflict): another agent claimed it first. Stop immediately — no branch, no Slack, no output.\n\n# Investigation\n\nRoot cause the CI failure. Call investigate-ci-failure on the trunk MCP in order to get information about the failing test by passing in the workflow URL. Use that to identify which tests to fix. Look at the error output returned by this tool. ONLY IF you need additional information, look at the CI run's logs.\n\n- If the CI failure is due to a bug introduced on that commit, create a new PR that fixes the bug. The PR should be stacked on the PR with the failure. Modify/ensure the base branch of the PR you create is the branch of the PR you are fixing.\n- If the CI failure is due to a flaky test, create a new PR that skips that test.\n- If you are not confident in either of these outcomes, then do nothing.\n\n# Output\n\nOutput your results in the following format:\n**CI Autofix Automation**\n\n**Failure logs**: \n**Broken by**: (cc @prAuthor)\n**Reason**: <1-2 sentence explanation of why CI broke>\n**Fixed by**: <1-2 sentence explanation of what fixed it>\n\nMake sure to push the PR but don't include a PR link in your output — the system will generate that for you." - } - ], - "memoryEnabled": true, - "scope": "team_editable_user", - "templateId": "ci-autofix" -} -``` - - -We recommend the following conventions: -- Version your Automation names for more clarity (e.g., "CI Autofix v1") -- Refine the prompt to avoid scanning GitHub logs in order to save time and tokens -- Be specific about your repository's conventions and common failure patterns - - -Currently Cursor will create a pull request with a base of `main`. You will need to adjust the pull request base if you want to merge the fix into your PR. - - -### Claude Code Routines - - -**Coming soon.** Set up Claude Routines to autofix CI failures - diff --git a/flaky-tests/agents/autofix-flaky-tests.mdx b/flaky-tests/agents/autofix-flaky-tests.mdx deleted file mode 100644 index 7ade075..0000000 --- a/flaky-tests/agents/autofix-flaky-tests.mdx +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: "Autofix Flaky Tests" ---- -Trunk can automatically investigate flaky tests in your codebase and raise fix pull requests with suggested solutions. - -### Prerequisites - -To use the Autofix Flaky Tests feature, you'll need: - -1. Beta access via waitlist (reach out to us on [Slack](https://slack.trunk.io)) -2. The "Investigate Flaky Tests" setting enabled in your workspace -3. Active installation of the [Trunk GitHub App](../../setup-and-administration/github-app-permissions) - - -The "Investigate Flaky Tests" setting can only be changed by organization admins. - - - - -### Auto-Investigate Flaky Tests - -Once enabled, any time that Trunk [detects a flaky test](../detection/index), Trunk analyzes the failure patterns, failure output, and git history of the test to provide a number of insights. - - - -Flaky tests can also be analyzed manually via the UI and via the [MCP server](../reference/mcp-reference/fix-flaky-test). - -### Autofix with Cursor Automations - -Whenever an investigation is completed, Trunk will emit a [webhook](../webhooks/index) for `test_case.investigation_completed`. Enable webhooks via [Svix](../webhooks/index). - -You can then set up a [Cursor Automation](https://cursor.com/automations) to trigger when webhooks are received. - - - - -```json -{ - "name": "Autofix Flaky Tests v1", - "triggers": [ - { - "webhook": {} - } - ], - "actions": [], - "prompts": [ - { - "prompt": "Your task is to fix flaky tests in this repo using provided insights.\n\n# Filter\n\nIf the test does not include the repository html_url \"https://github.com/\", exit early and do nothing.\n\n# Root Cause\n\nThe payload will include metadata about the failing test as well as some insights about the flakiness.\n\n1. The markdown_summary field includes the most important insights and the first steps you should take to root cause the flaky tests.\n2. The facts field includes more findings from historical data about running the test.\n3. Remember that the test is flaky. Sometimes it passes and sometimes it fails. Use the investigation payload to target your analysis.\n4. Use the memory tool to capture any important findings as you analyze the codebase to root cause the flakiness, such as codebase structure or test patterns.\n\n## Antipatterns\n\n1. Identify the root cause of the flakiness of the test. Do not simply increase the test's timeout or change the assertion to be more generic.\n2. Do not attempt to fix flakiness in other tests, limit your analysis to this single test.\n3. Do not add new tests, fix the flaky test in the payload.\n4. If the test is not present on your stable branch, exit early.\n5. When modifying end to end tests, do not wait on internal API calls to resolve. Focus on the page state and what the end user sees.\n6. There may be additional reasons for test flakiness, such as nondeterministic seed data, noisy neighbors, or test order issues. Conduct a deep analysis for necessary evidence, do not terminate your analysis early.\n\n## Output\n\n1. Once you have identified the root cause of the test's flakiness, open a pull request to fix the PR.\n2. Title the Pull Request: \"[Cursor Fix Flaky Test]: \".\n3. Include 1 short paragraph about the fix and the supporting evidence in the pull request body. Include links to relevant files/pages that were relevant from the webhook payload and its facts.\n4. In a collapsible summary of the PR description, include the entire webhook payload you received." - } - ], - "memoryEnabled": true, - "scope": "private", - "gitConfig": { - "repo": "https://github.com/", - "repos": [ - "https://github.com/" - ], - "branch": "main" - } -} -``` - - -We recommend the following conventions: -- Version your Automation names for more clarity. -- Configure the Svix endpoint with the Cursor Bearer token. -- Webhooks are configured for your entire organization, so you will need to use [Svix transformations](https://docs.svix.com/transformations) or filter out events that are not for your intended repository. -- Be specific about conventions and antipatterns for your repository. You will need to refine the Automation prompt to suit your needs. -- If your CI setup allows it, prompt Cursor to run the tests to verify them. - -### What's next? - -- Continue to monitor your tests to confirm the flaky test fixes are effective -- Investigations can be triggered and applied via [MCP](../reference/mcp-reference/fix-flaky-test) - - -**Coming soon.** Set up Claude Routines to autofix flaky tests - diff --git a/flaky-tests/agents/index.mdx b/flaky-tests/agents/index.mdx deleted file mode 100644 index a62c25c..0000000 --- a/flaky-tests/agents/index.mdx +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: "Agents" -description: "Use agents and AI workflows to investigate and fix flaky tests and CI failures." ---- - - - - - - diff --git a/flaky-tests/dashboard.mdx b/flaky-tests/dashboard.mdx index 43e819d..e434a23 100644 --- a/flaky-tests/dashboard.mdx +++ b/flaky-tests/dashboard.mdx @@ -2,45 +2,15 @@ title: "Dashboard" description: "Learn to find flaky tests and understand their impact using the Flaky Tests dashboard" --- -Trunk Flaky Tests detects flaky tests by analyzing test results. The health of your tests is displayed in the Flaky Tests dashboard. - -### Repositories overview - -When you navigate to `//flaky-tests`, you land on a repositories overview showing all monitored repositories at a glance. - -Each repository row displays: - -| Column | Description | -|--------|-------------| -| **Tests** | Total tracked test cases in the repository (60-day window) | -| **Flaky** | Number of currently flaky test cases, with a 10-day trend sparkline | -| **Broken** | Number of currently broken test cases, with a 10-day trend sparkline | -| **Runs / Day** | Bar chart of test run volume over the last 10 days, with per-day tooltips | - -A quarantine status icon appears next to each repository name when quarantining is configured: - -| Icon | Meaning | -|------|---------| -| Shield | Quarantining is enabled for this repository — auto-quarantine is off | -| Shield with checkmark | Auto-quarantine is enabled — flaky tests are quarantined automatically | - -Active repositories (with test data in the last 30 days) appear at the top of the list. Repositories with no recent data are collapsed under an **Inactive Repositories** section that you can expand to view. - -Selecting a repository opens its detailed dashboard. If your organization has no repositories connected yet, the page redirects to onboarding. See [Quarantining](./quarantining/) to learn how to configure quarantine settings. +Trunk Flaky Tests detect flaky tests by analyzing test results. The health of your tests is displayed in the Flaky Tests dashboard. ### Key repository metrics - - - +

Key repo metrics

-Trunk Flaky Tests provides key repo metrics based on the detected health status of your tests. You'll find metrics for the following information at the top of the Flaky Tests dashboard. +Trunk Flaky Test provides key repo metrics based on the detected health status of your tests. You'll find metrics for the following information at the top of the Flaky Test dashboard. -| Metric | Description | -|---|---| -| Flaky tests | Number of flaky test cases in your repo. | -| PRs blocked by failed tests | PRs that have been blocked by failed tests in CI. | +
MetricDescription
Flaky testsNumber of flaky test cases in your repo.
PRs blocked by failed testsPRs that have been blocked by failed tests in CI.
These numbers are important for understanding the overall health of your repo’s tests, how flaky tests impact your developer productivity, and the developer hours saved from quarantining tests. You can also view the trends in these numbers in the trend charts. @@ -48,24 +18,15 @@ The trend charts display the New Test Cases added by day, as well as Test Transi ### Tests cases overview - - - - +
You can view a table of all your test cases and their current status in Trunk Flaky Tests. Filters can also be set on the table to narrow test results down by test status, quarantine setting, ticket status, or by the name, file, or suite name of the test case. -The table is sorted by default by the number of PRs impacted by the case, which is the best way to measure the impact of a flaky test. You can click on each test case to view [the test case’s details](./dashboard#test-case-details). +The table is sorted by default by the number of PRs impacted by the case, which is the best way to measure the impact of a flaky test. You can click on each test case to view [the test case’s details](#test-case-details). -| Column | Description | -|---|---| -| Tests | The variant, file path, and name of the test case. | -| Status | The health status of the test case: **Healthy**, **Flaky**, or **Broken**. Broken indicates consistent high-rate failures; Flaky indicates intermittent failures. | -| Failure Rate | The percentage of CI runs failed due to this test case. | -| PRs Impacted | The number of PRs that have been affected by this test case failing in CI. | -| Last Run | The most recent timestamp for an upload test run. | +
ColumnDescription
TestsThe variant, file path, and name of the test case.
StatusThe health status of the test case: Healthy, Flaky, or Broken. Broken indicates consistent high-rate failures; Flaky indicates intermittent failures.
Failure RateThe percentage of CI runs failed due to this test case.
PRs ImpactedThe number of PRs that have been affected by this test case failing in CI.
Last RunThe most recent timestamp for an upload test run.
Test Deletion & History @@ -76,125 +37,49 @@ Test Deletion & History ### Test case details -You can _click_ on any of the test cases listed on the Flaky Tests dashboard to access the test case’s details. The test details page uses a tabbed layout: +
-* **Summary**: Run result charts and failure types grouped by unique failure reason. -* **Test History**: A searchable, paginated table of every individual test run with filtering and a detail panel. -* **Monitors**: Detection monitors configured for this test (visible when the detection engine is enabled). -* **Events**: A timeline of detection events, quarantine actions, ticketing events, and status transitions (Healthy, Flaky, Broken) for this test (visible when the detection engine is enabled). Use the category filter to scope to **Flake Detection** events to see which monitor triggered each transition. +You can *click* on any of the test cases listed on the Flaky Test dashboard to access the test case’s details. On a test's details page, you can find: -In addition to the tabbed content, the test details page shows the test’s current status (Healthy, Flaky, or Broken), ticket status, and codeowner information. +* The test's current status (Healthy, Flaky, or Broken) +* Which monitors are currently active for the test, and which monitor triggered each status change +* Visualizations and a timeline detailing the test's health history +* A table of unique failure types for this test + +This is in addition to information like ticket status and the current codeowner. ### **Code owners** If you have a codeowners file configured in your repos, you will see who owns each flaky test in the test details view. We support code owners for [GitHub](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners) and [GitLab](https://docs.gitlab.com/ee/user/project/codeowners/) repos. - - - - - -This information will also be provided when creating a ticket with the [Jira integration](./management/ticketing/jira-integration) or [webhooks](./webhooks/). - -### Summary tab - - - - - +

You can find the code owners of each test on the top right of the test details screen.

-The Summary tab shows an overview of the test’s recent run results and groups past failures by unique failure type. +This information will also be provided when creating a ticket with the [Jira integration](/flaky-tests/ticketing-integrations/jira-integration) or [webhooks](/flaky-tests/webhooks). -#### Failure types +### **Failure types** - - - - +
The Failure Types table shows the history of past test runs grouped by unique failure types. The Failure Type is a summary of the stack trace of the test run. You can click on the failure type to see a list of test runs labeled by branch, PR, Author, CI Job link, duration, and time. -#### Failure details +### Failure details You can click on any of these test runs to see the detailed stack trace: - - - - +
You can flip through the stack traces of similar failures across different test runs by clicking the left and right arrow buttons. You can also see other similar failures on this and other tests. -##### Go to the CI job logs - -If you want to see full logging of the original CI job for an individual test failure, you can click **Logs** in the expanded failure details panel to go to the job’s page in your CI provider. - - - - - - -### Test History tab - -The Test History tab gives you full visibility into every individual run of a test. Use it to investigate patterns across branches, find specific failing runs, and drill into error details. - -#### Daily runs chart - -A stacked bar chart at the top of the tab shows daily test run counts. The legend identifies four categories: - -* **Green**: Pass -* **Red**: Fail -* **Blue**: Quarantined -* **Gray**: Skipped - -Click and drag on the chart to select a date range, which scopes the table below to runs from the selected days. The selected range appears next to the legend with an X button to clear just the range. The **Reset** button on the filter bar clears all filters at once, including the date range. - -The **Result** and **Quarantined** filters from the filter bar also apply to the chart bars. When you filter to only passing runs, for example, the chart shows only green (Pass) bars. The chart and table always reflect the same set of runs. - -#### Filters - -A filter bar below the chart provides four independent controls: - -| Filter | Description | -|---|---| -| Result | Segmented control with **All**, **Pass**, and **Fail** to scope the table to a specific outcome. | -| Quarantined | Segmented control with **Include** (default), **Exclude**, and **Only** to control whether quarantined runs are mixed in, hidden, or shown exclusively. | -| SHA | Filter by commit hash. Matches runs whose SHA starts with the entered text. | -| Branch | Filter by branch name. Accepts exact names or glob patterns. Use `*` to match any sequence of characters and `?` to match a single character. | - -Branch filter examples: - -| Pattern | Matches | -|---|---| -| `main` | The branch named `main` exactly | -| `release/*` | All release branches, e.g. `release/1.0`, `release/2.3` | -| `feature-??` | Feature branches with a two-character suffix, e.g. `feature-v2` | -| `trunk-merge/*` | All merge queue branches | - -All filters combine using AND logic, so you can use them together. For example, set **Result** to **Fail** and **Quarantined** to **Only** to surface only quarantined failures. The **Reset** button clears every filter at once, including the chart date range. - -Filter state is saved in the URL, so you can share or bookmark a filtered view. The Result filter accepts `result=pass` or `result=fail`. The Quarantined filter accepts `quarantined=include`, `quarantined=exclude`, or `quarantined=only`. - -#### Runs table - -The runs table displays a paginated list of individual test runs (25 per page) with the following columns: +#### Go to the CI job logs -| Column | Description | -|---|---| -| Timestamp | When the test ran, displayed in your local time zone. | -| Duration | How long the test took to execute. | -| PR | The pull request number associated with the run, e.g. `#1234`. Empty for runs that aren't tied to a PR. | -| Branch | The branch the test ran against, e.g. `main`, `feature/x`, or `trunk-merge/pr-1234/...` for merge queue branches. | -| Commit | The first 7 characters of the commit SHA. | +If you want to see full logging of the original CI job for an individual test failure, you can click **Logs** in the expanded failure details panel to go to the job's page in your CI provider. -Each row has a colored left border indicating the run's outcome. Quarantined runs always show blue, regardless of whether the run passed or failed. For non-quarantined runs, the border is green for pass, red for fail, orange for error, and a neutral gray for any other state. +
-#### Run detail panel +### **Test history** -Click any row in the runs table to open a detail panel on the right side of the page. The panel shows: +
-* **Run header**: Timestamp, a result badge (Pass, Fail, Error, or Quarantined), and run duration. -* **Source control**: A CI job link (with the provider's icon, the job name, and the CI duration), the linked pull request, branch, and commit. Merge queue runs also include a **View in Merge Queue** link. -* **Error details**: For failed, errored, or quarantined runs, an optional AI summary of the failure followed by the raw error text or stack trace. +Tests may transition between Healthy, Flaky, and Broken states multiple times over their lifetime. You can see previous status changes in Test History, as well as an explanation for why each transition occurred — including which monitor triggered it. diff --git a/flaky-tests/detection.mdx b/flaky-tests/detection.mdx new file mode 100644 index 0000000..27839e1 --- /dev/null +++ b/flaky-tests/detection.mdx @@ -0,0 +1,83 @@ +--- +title: "Flaky test detection" +description: "Learn how Trunk detects and labels flaky and broken tests" +--- +Flake Detection automatically identifies problematic tests in your test suite by monitoring test behavior over time. Instead of a single set of built-in detection rules, Trunk uses **monitors**, independent detectors that each watch for a specific pattern. When any monitor flags a test, it's marked as flaky or broken. When all monitors agree the test has recovered, it returns to healthy. + +## How Monitors Work + +Each monitor independently observes your test runs and tracks two states per test: **active** (problematic behavior detected) or **inactive** (no problematic behavior). A test's overall status is determined by combining all of its monitors, with the most severe status winning: + +| Priority | Status | Condition | +| -------- | ----------- | --------------------------------------------------------------------- | +| Highest | **Broken** | Any enabled broken-type threshold monitor is active for this test | +| Middle | **Flaky** | Any enabled flaky-type monitor (threshold or pass-on-retry) is active | +| Lowest | **Healthy** | No active monitors | + +If a test triggers both a broken monitor and a flaky monitor simultaneously, it shows as **Broken**. When the broken monitor resolves (e.g., you fix the regression and the failure rate drops), the test transitions to **Flaky** if a flaky monitor is still active, or to **Healthy** if no monitors remain active. + +A test stays in its detected state until every relevant monitor that flagged it has independently resolved. + +### Disabling or Deleting a Monitor + +When you disable or delete a monitor, it is immediately set to **resolved** for every test case in the repo. This triggers a status re-evaluation for all affected tests. If the disabled monitor was the only active monitor for a test, that test transitions to healthy. If other monitors are still active, the test remains in the most severe active state. + +For example, if you have a broken threshold monitor and a flaky pass-on-retry monitor, and you disable the broken monitor, any test that was only flagged by the broken monitor will become healthy. A test flagged by both will transition from broken to flaky (because pass-on-retry is still active). + +## Monitor Types + +| Monitor | What it detects | Detection type | Plan availability | Default state | +| -------------------------------------------------------------------------------------- | ----------------------------------------------------------------- | --------------- | ----------------- | ------------- | +| [**Pass-on-Retry**](/flaky-tests/detection/pass-on-retry-monitor) | A test fails then passes on the same commit (retry after failure) | Flaky | Team and above | Enabled | +| [**Threshold**](/flaky-tests/detection/threshold-monitor) | Failure rate exceeds a configured percentage over a time window | Flaky or Broken | Paid plans | Disabled | + +You can run multiple monitors simultaneously. For example, you might use pass-on-retry to catch classic retry-based flakiness while also running threshold monitors scoped to different branches. A common pattern is to pair a broken-type threshold monitor (catching consistently failing tests) with a flaky-type threshold monitor (catching intermittently failing tests). See [Threshold Monitor: Recommended Configurations](/flaky-tests/detection/threshold-monitor#recommended-configurations) for details. + +If you need to manually flag a test that automated monitors haven't caught, use [Flag as Flaky](/flaky-tests/detection/flag-as-flaky) from the test detail page. + +## Branch-Aware Detection + +Tests often behave differently depending on where they run. Failures on `main` are usually unexpected and signal flakiness. Failures on PR branches may be expected during active development. Merge queue failures are suspicious because the code has already passed PR checks. + +Rather than applying a single set of branch rules automatically, Trunk gives you control over how detection treats different branches through **branch scoping** on threshold monitors. You can create separate monitors with different thresholds and windows for your stable branch, PR branches, and merge queue branches. See [Threshold Monitor: Recommended configurations](/flaky-tests/detection/threshold-monitor#recommended-configurations) for specific guidance. + +Pass-on-retry detection is branch-agnostic. It flags any test that fails and passes on the same commit, regardless of which branch the test ran on. + +## Muting Monitors + +You can temporarily mute a monitor for a specific test case. A muted monitor continues to run and record detections, but it won't contribute to the test's flaky status until the mute expires. + +This is useful when you know a test is flaky but want to suppress the signal temporarily, for example while a fix is in progress or during a known infrastructure issue. Unlike [Flag as Flaky](/flaky-tests/detection/flag-as-flaky), which is a persistent user override, muting preserves the detection history and automatically re-enables itself after the mute period. + +### How Muting Works + +You can mute a monitor from the test case view in the Trunk app. When muting, you choose a duration: + +| Duration | +| -------- | +| 1 hour | +| 4 hours | +| 24 hours | +| 7 days | +| 30 days | + +While muted, the monitor is excluded from the test's status calculation. If the muted monitor was the only active monitor, the test transitions from flaky to healthy for the duration of the mute. When the mute expires, the monitor is automatically included in the next status evaluation. If it's still active, the test will be flagged as flaky again. + +You can also unmute a monitor early from the test case view. + + +You can only mute a monitor that has already detected flaky behavior for a test. If a monitor has never been active for a test, the mute option is disabled. + + +### When to Mute vs. Other Options + +| Situation | Recommended action | +| ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------ | +| Fix is in progress and you want to suppress noise temporarily | **Mute** the monitor for a few days | +| Test is flaky but no automated monitor has caught it | Use [**Flag as Flaky**](/flaky-tests/detection/flag-as-flaky) to mark it as flaky | +| You want to stop a monitor from evaluating a test permanently | Adjust the monitor's branch scope or thresholds instead | +| You want to suppress all flaky signals for a test | Mute each active monitor individually, or address the root cause | + +## Variants + +If you run the same tests across different environments or architectures, you can use [variants](/flaky-tests/uploader) to separate these runs into distinct test cases. This lets monitors detect environment-specific flakes. For example, a test might be flaky on iOS but stable on Android. Using variants, monitors isolate flakes on the iOS variant instead of marking the test as flaky across all environments. See the [Trunk Analytics CLI docs](/flaky-tests/uploader) for details on how to upload with variants. diff --git a/flaky-tests/detection/failure-count-monitor.mdx b/flaky-tests/detection/failure-count-monitor.mdx deleted file mode 100644 index 4dccbd7..0000000 --- a/flaky-tests/detection/failure-count-monitor.mdx +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: "Failure Count Monitor" -description: "Detect flaky or broken tests as soon as they accumulate a configured number of failures" ---- -The failure count monitor flags a test the moment it accumulates a configured number of failures on monitored branches within a rolling time window. Unlike the failure rate monitor, which requires a failure *rate* calculated over many runs, the failure count monitor reacts to individual failures without needing a minimum sample size or a percentage calculation. - -This makes it well-suited for stable branches like `main` where any test failure is unexpected and worth investigating immediately. - -## When to Use This Monitor - -Use the failure count monitor when you want immediate visibility into test failures on branches that should be green. Common scenarios: - -- **Stable branch alerting:** Flag any test that fails on `main`, even once. On a branch where all tests should pass, a single failure is a meaningful signal. -- **Post-merge regression detection:** Catch tests that start failing after a merge, before the failure rate accumulates enough data for a failure rate monitor to trigger. -- **High-confidence branches:** Monitor merge queue or release branches where failures are suspicious by definition. - -If you need to detect patterns of intermittent failure over time (e.g., a test that fails 20% of the time), use a [failure rate monitor](./failure-rate-monitor) instead. If you want to catch tests that fail and then pass on retry within a single commit, [pass-on-retry](./pass-on-retry-monitor) handles that automatically. - -## Detection Type - -Each failure count monitor has a **detection type** -- either **flaky** or **broken** -- which controls what status a test receives when the monitor flags it: - -- **Flaky monitors** are appropriate when failures on the monitored branch are likely non-deterministic. A test that fails once on `main` but passes on retry is probably flaky. -- **Broken monitors** are appropriate when failures indicate a real regression. If a test fails on `main` and you expect it to keep failing until someone fixes it, broken is the right classification. - -The detection type is set at creation and cannot be changed afterward. If you need to switch a monitor's type, create a new monitor with the desired type and disable the old one. - -## How It Works - -The monitor counts the number of test failures on configured branches within a rolling time window. When a test reaches the configured failure count, it is flagged. - -### Example - -You configure a failure count monitor with: - -| Setting | Value | -|---|---| -| Detection type | Broken | -| Failure count | 1 | -| Window | 30 minutes | -| Resolution timeout | 2 hours | -| Branches | `main` | - -A developer merges a change that breaks `test_checkout`. Here is what happens: - -1. `test_checkout` fails on the next CI run on `main`. -2. The monitor sees 1 failure within the 30-minute window, which meets the configured failure count of 1. -3. `test_checkout` is immediately flagged as **broken**. -4. The developer identifies the issue and merges a correction. -5. Two hours pass with no new failures for `test_checkout`. -6. The monitor automatically resolves the test back to **healthy**. - -If another test, `test_signup`, also failed during that window, it would be flagged independently. Each test is evaluated on its own. - -## Configuration - -### Failure Count - -The number of failures required to trigger detection. The default is **1**, meaning any single failure on a monitored branch flags the test. - -Setting this higher (e.g., 3) requires multiple failures before the monitor reacts. This is useful if you want to filter out one-off infrastructure blips while still catching tests that fail repeatedly in a short window. - -### Window Duration - -The rolling time window over which failures are counted. Only test failures within this window contribute to the failure count. - -A shorter window (e.g., 30 minutes) limits detection to very recent failures. A longer window (e.g., 6 hours) catches failures that are spread out over time but still accumulating. - -The window should be long enough to capture the failures you care about but short enough that old failures roll off naturally. For a monitor with a failure count of 1, the window mainly controls how quickly a detection event is created after a failure. In practice, the pipeline evaluates frequently, so detection is near-immediate regardless of window size. - -### Resolution Timeout - -How long a flagged test must go without any new failures before it is automatically resolved. This is the only way a failure count monitor resolves. There is no "recovery rate" or sample-based resolution like the failure rate monitor. - -For example, with a resolution timeout of 2 hours, a test that was flagged at 3:00 PM will resolve at 5:00 PM if no new failures occur. If a new failure arrives at 4:30 PM, the clock resets, and the test will not resolve until 6:30 PM. - -The resolution timeout must be at least as long as the detection window. If the window is 30 minutes, the resolution timeout should be 30 minutes or longer. - -Choose a resolution timeout that gives your team enough time to verify a fix has landed. A short timeout (e.g., 30 minutes) resolves quickly but may prematurely clear tests that fail intermittently. A longer timeout (e.g., 24 hours) is more conservative and ensures the test stays flagged until it has been clean for a full day. - -### Branch Scope - -Which branches the monitor evaluates. You can specify branch names or glob patterns. Only test failures on matching branches count toward the failure count. - -Branch patterns work the same way as [failure rate monitor branch patterns](./failure-rate-monitor#branch-pattern-syntax), including glob syntax and merge queue patterns. Refer to that section for pattern syntax, examples, and tips. - -## Resolution Behavior - -A failure count monitor resolves in one way: **the test stops failing for long enough.** - -When the configured resolution timeout elapses without a new failure on any monitored branch, the test is resolved as healthy. There is no rate-based recovery and no stale timeout. If a test stops running entirely (e.g., it was deleted or renamed), it remains in its flagged state until the resolution timeout passes from the last observed failure. - -This time-based approach means you don't need to wait for enough passing runs to bring a failure rate down. Once the test is quiet, it resolves. - -## Preview Panel - -When you create or edit a failure count monitor, a **Preview** panel appears on the right side of the dialog on larger screens. The preview updates as you adjust the monitor's settings, giving you a live look at what the monitor would detect against your current branch data. - -Once the monitor configuration produces detections, the panel shows a **Failing tests** list. Each row displays the test name as a link to its detail page, along with its failure count. Counts that meet or exceed your configured failure count are highlighted in red; counts below appear in muted text. - -You can search the list by test name or parent test name. The search is case-insensitive and filters as you type. If no tests match your search term, the list shows a "No tests match" message. When more than 100 tests are detected, only the first 100 are shown with a notice to narrow your search. - -## Muting - -You can temporarily mute a failure count monitor for a specific test case. See [Muting monitors](./index#muting-monitors) for details. - -## Preview Panel - -When creating or editing a failure count monitor, a preview panel shows which tests the current configuration would flag based on recent data. - -### Status Filter - -A **status filter dropdown** in the preview panel lets you filter the test list to any combination of statuses: **Healthy**, **Flaky**, and **Broken**. By default, all statuses are shown. - -Filtering to **Healthy** is the most useful view: it shows tests that are currently healthy but would be flagged by this monitor if created with the current settings. This lets you see the new coverage the monitor adds without noise from tests already detected by other monitors. - -Selecting multiple statuses (for example, Healthy and Flaky) shows tests matching any of the selected statuses. - -When a status filter is active, the info tooltip in the panel header shows "X of Y tests" to indicate how many tests are visible relative to the total that match the monitor configuration. - -If no tests match the active filter, the empty state includes a hint to clear the filter. - -### Large Repo Truncation - -For repositories with a large number of matching tests, preview results may be truncated. When this happens, an amber warning appears in the panel. The truncation applies to the list of tests shown, not to the underlying detection logic — the monitor evaluates all matching tests when active. - -## Choosing Between Monitors - -| Scenario | Recommended monitor | -|---|---| -| Any failure on `main` should be flagged immediately | **Failure count** with count = 1 | -| Tests failing at an elevated rate over many runs | **Threshold** with appropriate activation percentage | -| A test fails then passes on retry in the same commit | **Pass-on-retry** (enabled by default) | -| Consistently failing tests (80%+ failure rate) | **Threshold** with broken detection type | -| Quick alerting on merge queue failures | **Failure count** scoped to merge queue branches | diff --git a/flaky-tests/detection/flag-as-flaky.mdx b/flaky-tests/detection/flag-as-flaky.mdx index 85401c5..26253b1 100644 --- a/flaky-tests/detection/flag-as-flaky.mdx +++ b/flaky-tests/detection/flag-as-flaky.mdx @@ -6,9 +6,9 @@ Manually mark a test as flaky when you know it's unreliable but automated monito ## When to Use It -- A test is intermittently failing but hasn't been flagged by threshold or pass-on-retry monitors. -- You want to immediately quarantine a test while investigating. -- You've identified a flaky test through code review or local observation. +* A test is intermittently failing but hasn't been flagged by threshold or pass-on-retry monitors. +* You want to immediately quarantine a test while investigating. +* You've identified a flaky test through code review or local observation. ## How It Works @@ -21,9 +21,9 @@ Manually mark a test as flaky when you know it's unreliable but automated monito Once flagged: -- The test is immediately marked as **flaky**, regardless of what automated monitors report. -- An amber banner appears below the header showing who flagged it, when, and the reason (if provided). -- The flag is additive — if automated monitors later detect the test as flaky too, both signals coexist. +* The test is immediately marked as **flaky**, regardless of what automated monitors report. +* An amber banner appears below the header showing who flagged it, when, and the reason (if provided). +* The flag is additive — if automated monitors later detect the test as flaky too, both signals coexist. ### Removing the Flag @@ -33,8 +33,8 @@ Once flagged: After removing: -- The test's status reverts to whatever the automated monitors determine. -- If monitors are still detecting the test as flaky, it remains flaky. The flag removal only clears the manual override. +* The test's status reverts to whatever the automated monitors determine. +* If monitors are still detecting the test as flaky, it remains flaky. The flag removal only clears the manual override. ## Relationship to Monitors @@ -52,4 +52,3 @@ The "Flag as Flaky" action is independent of automated monitors (threshold-based ## Flag History All flag and unflag actions are recorded as events. You can view the history by opening the Flag History panel from the test detail page. Each entry shows who performed the action, when, and the reason (if one was provided). - diff --git a/flaky-tests/detection/index.mdx b/flaky-tests/detection/index.mdx deleted file mode 100644 index ffc8f67..0000000 --- a/flaky-tests/detection/index.mdx +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: "Flake Detection" -description: "Learn how Trunk detects and labels flaky and broken tests" ---- -Flake Detection automatically identifies problematic tests in your test suite by monitoring test behavior over time. Instead of a single set of built-in detection rules, Trunk uses **monitors**, independent detectors that each watch for a specific pattern. When any monitor flags a test, it's marked as flaky or broken. When all monitors agree the test has recovered, it returns to healthy. - -## How Monitors Work - -Each monitor independently observes your test runs and tracks two states per test: **active** (problematic behavior detected) or **inactive** (no problematic behavior). A test's overall status is determined by combining all of its monitors, with the most severe status winning: - -| Priority | Status | Condition | -|----------|--------|-----------| -| Highest | **Broken** | Any enabled broken-type monitor (failure rate or failure count) is active for this test | -| Middle | **Flaky** | Any enabled flaky-type monitor (failure rate, failure count, or pass-on-retry) is active | -| Lowest | **Healthy** | No active monitors | - -If a test triggers both a broken monitor and a flaky monitor simultaneously, it shows as **Broken**. When the broken monitor resolves (e.g., you fix the regression and the failure rate drops), the test transitions to **Flaky** if a flaky monitor is still active, or to **Healthy** if no monitors remain active. - -A test stays in its detected state until every relevant monitor that flagged it has independently resolved. - -### Disabling or Deleting a Monitor - -When you disable or delete a monitor, it is immediately set to **resolved** for every test case in the repo. This triggers a status re-evaluation for all affected tests. If the disabled monitor was the only active monitor for a test, that test transitions to healthy. If other monitors are still active, the test remains in the most severe active state. - -For example, if you have a broken failure rate monitor and a flaky pass-on-retry monitor, and you disable the broken monitor, any test that was only flagged by the broken monitor will become healthy. A test flagged by both will transition from broken to flaky (because pass-on-retry is still active). - -## Monitor Types - -| Monitor | What it detects | Detection type | Plan availability | Default state | -|---|---|---|---|---| -| [**Pass-on-Retry**](./pass-on-retry-monitor) | A test fails then passes on the same commit (retry after failure) | Flaky | Team and above | Enabled | -| [**Failure Rate**](./failure-rate-monitor) | Failure rate exceeds a configured percentage over a time window | Flaky or Broken | Paid plans | Disabled | -| [**Failure Count**](./failure-count-monitor) | A test accumulates a configured number of failures in a rolling window | Flaky or Broken | Paid plans | Disabled | - -You can run multiple monitors simultaneously. For example, you might use pass-on-retry to catch classic retry-based flakiness while also running failure rate monitors scoped to different branches. A common pattern is to pair a broken-type failure rate monitor (catching consistently failing tests) with a flaky-type failure rate monitor (catching intermittently failing tests). See [Failure Rate Monitor: Recommended Configurations](./failure-rate-monitor#recommended-configurations) for details. - -The [failure count monitor](./failure-count-monitor) complements failure rate monitors by reacting to individual failures rather than failure rates. Use it on branches where any failure is a meaningful signal, like `main` or merge queue branches. - -If you need to manually flag a test that automated monitors haven't caught, use [Flag as Flaky](./flag-as-flaky) from the test detail page. - -## Branch-Aware Detection - -Tests often behave differently depending on where they run. Failures on `main` are usually unexpected and signal flakiness. Failures on PR branches may be expected during active development. Merge queue failures are suspicious because the code has already passed PR checks. - -Rather than applying a single set of branch rules automatically, Trunk gives you control over how detection treats different branches through **branch scoping** on failure rate monitors. You can create separate monitors with different thresholds and windows for your stable branch, PR branches, and merge queue branches. See [Failure Rate Monitor: Recommended configurations](./failure-rate-monitor#recommended-configurations) for specific guidance. - -Pass-on-retry detection is branch-agnostic. It flags any test that fails and passes on the same commit, regardless of which branch the test ran on. - -## Muting Monitors - -You can temporarily mute a monitor for a specific test case. A muted monitor continues to run and record detections, but it won't contribute to the test's flaky status until the mute expires. - -This is useful when you know a test is flaky but want to suppress the signal temporarily, for example while a fix is in progress or during a known infrastructure issue. Unlike [Flag as Flaky](./flag-as-flaky), which is a persistent user override, muting preserves the detection history and automatically re-enables itself after the mute period. - -### How Muting Works - -You can mute a monitor from the test case view in the Trunk app. When muting, you choose a duration: - -| Duration | -|---| -| 1 hour | -| 4 hours | -| 24 hours | -| 7 days | -| 30 days | - -While muted, the monitor is excluded from the test's status calculation. If the muted monitor was the only active monitor, the test transitions from flaky to healthy for the duration of the mute. When the mute expires, the monitor is automatically included in the next status evaluation. If it's still active, the test will be flagged as flaky again. - -You can also unmute a monitor early from the test case view. - -{/* SCREENSHOT: Mute button and duration picker on the test case monitor list. -Show the test case detail page with a monitor's mute button visible, -and ideally the duration picker dropdown open. */} - - -You can only mute a monitor that has already detected flaky behavior for a test. If a monitor has never been active for a test, the mute option is disabled. - - -### When to Mute vs. Other Options - -| Situation | Recommended action | -|---|---| -| Fix is in progress and you want to suppress noise temporarily | **Mute** the monitor for a few days | -| Test is flaky but no automated monitor has caught it | Use [**Flag as Flaky**](./flag-as-flaky) to mark it as flaky | -| You want to stop a monitor from evaluating a test permanently | Adjust the monitor's branch scope or thresholds instead | -| You want to suppress all flaky signals for a test | Mute each active monitor individually, or address the root cause | - -## Variants - -If you run the same tests across different environments or architectures, you can use [variants](../reference/cli-reference) to separate these runs into distinct test cases. This lets monitors detect environment-specific flakes. For example, a test might be flaky on iOS but stable on Android. Using variants, monitors isolate flakes on the iOS variant instead of marking the test as flaky across all environments. See the [Trunk Analytics CLI docs](../reference/cli-reference) for details on how to upload with variants. - -## Detection Time - -Detection of flaky tests is run automatically when test uploads are processed. From the time that a test with configured flake detection is uploaded, it will take at most 20 minutes for the flakiness to be detected. \ No newline at end of file diff --git a/flaky-tests/detection/pass-on-retry-monitor.mdx b/flaky-tests/detection/pass-on-retry-monitor.mdx index 738470b..0549bbd 100644 --- a/flaky-tests/detection/pass-on-retry-monitor.mdx +++ b/flaky-tests/detection/pass-on-retry-monitor.mdx @@ -4,7 +4,7 @@ description: "Detect tests that fail then pass on retry within the same commit" --- The pass-on-retry monitor detects the most common flakiness pattern: a test fails, is retried, and passes on the same commit. This indicates the failure wasn't caused by a code change and that the test is unreliable. -By default, this monitor evaluates test runs on all branches. You can scope it to specific branches to focus detection where pass-on-retry behavior is actually meaningful. +This monitor is branch-agnostic. It evaluates all test runs regardless of which branch they ran on. ## How It Works @@ -25,35 +25,15 @@ Seven days later (assuming default settings), if `test_login` hasn't exhibited a ## Configuration -{/* SCREENSHOT: Pass-on-Retry monitor configuration panel. -Show the monitor settings UI with the enabled toggle and recovery days -slider/input. Capture a state where the monitor is enabled with the -default 7-day recovery period visible. */} - -| Setting | Description | Default | -|---|---|---| -| **Enabled** | Whether the monitor is active | On | -| **Recovery days** | Days without pass-on-retry behavior before a test is resolved as healthy. Range: 1 to 15 days. | 7 | -| **Branch scope** | Which branches the monitor evaluates. Accepts branch names and glob patterns. | All branches (`*`) | +| Setting | Description | Default | +| ----------------- | ---------------------------------------------------------------------------------------------- | ------- | +| **Enabled** | Whether the monitor is active | On | +| **Recovery days** | Days without pass-on-retry behavior before a test is resolved as healthy. Range: 1 to 15 days. | 7 | ### What Recovery Days Controls A shorter recovery period (e.g., 1 to 3 days) returns tests to healthy quickly, which is useful if you fix flaky tests promptly and want fast feedback. A longer recovery period (e.g., 10 to 15 days) is more conservative. It keeps tests flagged longer to account for flaky behavior that only surfaces occasionally. -### Branch Scope - -Use the **Branch scope** setting to restrict the monitor to a specific set of branches. - -This is useful when PR branches generate too much noise. CI often retries tests on pull request branches automatically; if those retries aren't meaningful signals for your team, you can limit detection to stable branches like `main`. - -Branch scope uses the same glob syntax as [failure rate monitor branch patterns](./failure-rate-monitor#branch-pattern-syntax) and accepts up to 10 patterns. Type a pattern and press **Enter** or comma to add it as a chip. For example: - -- `main`: only stable branch runs -- `main` and `release/*`: stable plus release branches -- `*` (default): all branches - -Changes to branch scope take effect for newly detected events. Previously detected flaky tests are not re-evaluated. - ## When Detection Happens Pass-on-retry detection runs continuously as new test results arrive. A failure and its corresponding retry don't need to arrive at exactly the same time. @@ -62,7 +42,7 @@ Resolution is evaluated daily. If a test hasn't shown pass-on-retry behavior wit ## Muting -You can temporarily mute the pass-on-retry monitor for a specific test case. See [Muting monitors](./index#muting-monitors) for details. +You can temporarily mute the pass-on-retry monitor for a specific test case. See [Muting monitors](/flaky-tests/detection/..#muting-monitors) for details. ## Edge Cases diff --git a/flaky-tests/detection/failure-rate-monitor.mdx b/flaky-tests/detection/threshold-monitor.mdx similarity index 55% rename from flaky-tests/detection/failure-rate-monitor.mdx rename to flaky-tests/detection/threshold-monitor.mdx index 863608c..893181d 100644 --- a/flaky-tests/detection/failure-rate-monitor.mdx +++ b/flaky-tests/detection/threshold-monitor.mdx @@ -1,17 +1,17 @@ --- -title: "Failure Rate Monitor" +title: "Threshold Monitor" description: "Detect flaky or broken tests based on failure rate over a configurable time window" --- -The failure rate monitor detects tests based on failure rate over a rolling time window. Unlike pass-on-retry, which looks for a specific pattern on a single commit, the failure rate monitor identifies tests that fail too often over a period of time, even if no individual failure looks like a retry. +The threshold monitor detects tests based on failure rate over a rolling time window. Unlike pass-on-retry, which looks for a specific pattern on a single commit, the threshold monitor identifies tests that fail too often over a period of time, even if no individual failure looks like a retry. -You can create multiple failure rate monitors with different configurations. This is how you tailor detection to different branches, test volumes, sensitivity levels, and detection types. +You can create multiple threshold monitors with different configurations. This is how you tailor detection to different branches, test volumes, sensitivity levels, and detection types. ## Detection Type -Each failure rate monitor has a **detection type** — either **flaky** or **broken** — which controls what status a test receives when the monitor flags it: +Each threshold monitor has a **detection type** — either **flaky** or **broken** — which controls what status a test receives when the monitor flags it: -- **Flaky monitors** catch tests that fail intermittently (e.g., 20–50% failure rate). These are typically caused by timing issues, shared state, or non-deterministic behavior. -- **Broken monitors** catch tests that fail consistently at a high rate (e.g., 80%+ failure rate). These usually indicate a real regression — something in the code or environment is genuinely broken and needs a fix. +* **Flaky monitors** catch tests that fail intermittently (e.g., 20–50% failure rate). These are typically caused by timing issues, shared state, or non-deterministic behavior. +* **Broken monitors** catch tests that fail consistently at a high rate (e.g., 80%+ failure rate). These usually indicate a real regression — something in the code or environment is genuinely broken and needs a fix. The detection type is set at creation and cannot be changed afterward. If you need to switch a monitor's type, create a new monitor with the desired type and disable the old one. @@ -23,34 +23,27 @@ The monitor periodically calculates the failure rate for each test within a time ### Example -You configure a failure rate monitor with: +You configure a threshold monitor with: -| Setting | Value | -|---|---| -| Detection type | Flaky | -| Activation threshold | 30% | -| Window | 6 hours | -| Minimum sample size | 50 runs | -| Branches | `main` | +| Setting | Value | +| -------------------- | ------- | +| Detection type | Flaky | +| Activation threshold | 30% | +| Window | 6 hours | +| Minimum sample size | 50 runs | +| Branches | `main` | Over the last 6 hours, here's what the monitor observes: -| Test | Runs | Failures | Failure rate | Meets min sample? | Result | -|---|---|---|---|---|---| -| `test_checkout` | 120 | 42 | 35% | Yes (120 ≥ 50) | **Flagged as flaky** — rate exceeds 30% threshold | -| `test_signup` | 8 | 3 | 37.5% | No (8 < 50) | **Not flagged** — insufficient data | +| Test | Runs | Failures | Failure rate | Meets min sample? | Result | +| --------------- | ---- | -------- | ------------ | ----------------- | ------------------------------------------------- | +| `test_checkout` | 120 | 42 | 35% | Yes (120 ≥ 50) | **Flagged as flaky** — rate exceeds 30% threshold | +| `test_signup` | 8 | 3 | 37.5% | No (8 < 50) | **Not flagged** — insufficient data | `test_checkout` is flagged because its 35% failure rate exceeds the 30% threshold and it has enough runs to be statistically meaningful. `test_signup` has a higher failure rate but is skipped entirely — the monitor needs at least 50 runs before making a call. ## Configuration -{/* */} - ### Detection Type Choose **Flaky** or **Broken**. This determines the status a test receives when the monitor flags it. See [Detection Type](#detection-type) above for guidance on which to use. @@ -89,9 +82,9 @@ The right minimum depends on how often a test actually runs on the branches you' How long (in hours) a flagged test can go without any runs before it's automatically resolved as stale. This clears out tests that have been deleted, renamed, or are no longer part of your test suite. -When not set, flagged tests remain in their detected state indefinitely until they run enough times to recover through the normal threshold check. Setting a stale timeout (e.g., 24 hours) keeps abandoned tests from cluttering your test list. +When not set, flagged tests remain in their detected state indefinitely until they run enough times to recover through the normal threshold check. Setting a stale timeout (e.g., 24 hours) ensures abandoned tests don't clutter your test list. -A test resolved as stale is no longer being tracked by this monitor. If the test starts running again and exceeds the activation threshold, it will be re-flagged. +A test resolved as stale is simply no longer being tracked by this monitor. If the test starts running again and exceeds the activation threshold, it will be re-flagged. Skipped tests count as not being run. If you have a stale timeout configured and a test starts being skipped rather than executed, the monitor will treat it as having no runs and resolve it as stale once the timeout elapses. @@ -105,10 +98,10 @@ Which branches the monitor evaluates. You can specify up to 10 branch patterns. Branch patterns use glob-style matching with two special characters: -| Character | Meaning | Regex equivalent | -|---|---|---| -| `*` | Zero or more of any character, including `/` | `.*` | -| `?` | Exactly one of any character | `.` | +| Character | Meaning | Regex equivalent | +| --------- | -------------------------------------------- | ---------------- | +| `*` | Zero or more of any character, including `/` | `.*` | +| `?` | Exactly one of any character | `.` | All other characters are matched literally. Special regex characters (like `.`, `+`, `(`, `)`, `[`, `]`) are treated as literal characters in patterns, not as regex operators. You don't need to escape them. @@ -118,13 +111,13 @@ Unlike some glob implementations, `*` matches across `/` separators. The pattern #### Pattern Examples -| Pattern | Matches | Does not match | -|---|---|---| -| `main` | `main` | `main-v2`, `maint` | -| `feature/*` | `feature/login`, `feature/api/auth` | `feature` (no trailing path), `features/x` | -| `release-?.?.?` | `release-1.2.3` | `release-10.2.3` (10 is two characters), `release-1.2` | -| `*-hotfix` | `prod-hotfix`, `release/v1-hotfix` | `hotfix`, `hotfix-1` | -| `*` | All branches | | +| Pattern | Matches | Does not match | +| --------------- | ----------------------------------- | ------------------------------------------------------ | +| `main` | `main` | `main-v2`, `maint` | +| `feature/*` | `feature/login`, `feature/api/auth` | `feature` (no trailing path), `features/x` | +| `release-?.?.?` | `release-1.2.3` | `release-10.2.3` (10 is two characters), `release-1.2` | +| `*-hotfix` | `prod-hotfix`, `release/v1-hotfix` | `hotfix`, `hotfix-1` | +| `*` | All branches | | A pattern with no special characters matches that exact branch name only. For example, `main` matches the branch named `main` and nothing else. @@ -132,53 +125,29 @@ A pattern with no special characters matches that exact branch name only. For ex For your main or stable branch, use the exact branch name: -| Your stable branch | Pattern | -|---|---| -| `main` | `main` | -| `master` | `master` | -| `develop` | `develop` | +| Your stable branch | Pattern | +| ------------------ | --------- | +| `main` | `main` | +| `master` | `master` | +| `develop` | `develop` | #### Merge Queue Branch Patterns If you use a merge queue, your queue creates temporary branches to test changes before merging. Each merge queue product uses a different branch naming convention: -| Merge queue | Branch pattern | Example branches matched | -|---|---|---| -| Trunk Merge Queue | `trunk-merge/*` | `trunk-merge/main/1`, `trunk-merge/main/2` | -| GitHub Merge Queue | `gh-readonly-queue/*` | `gh-readonly-queue/main/pr-123-abc` | -| Graphite Merge Queue | `graphite-merge/*` | `graphite-merge/main/1` | +| Merge queue | Branch pattern | Example branches matched | +| -------------------- | --------------------- | ------------------------------------------ | +| Trunk Merge Queue | `trunk-merge/*` | `trunk-merge/main/1`, `trunk-merge/main/2` | +| GitHub Merge Queue | `gh-readonly-queue/*` | `gh-readonly-queue/main/pr-123-abc` | +| Graphite Merge Queue | `graphite-merge/*` | `graphite-merge/main/1` | GitLab Merge Trains run on the target branch directly rather than creating separate branches. To monitor merge train runs, scope your monitor to the target branch (e.g., `main`). #### Tips for Branch Scoping -- You can add up to **10 patterns** per monitor. A test run is included if its branch matches any of the patterns. -- Since patterns can't express "everything except a branch," a practical approach is to create **separate monitors**: one scoped to `main` with strict settings, and another scoped to your PR branch naming patterns (e.g., `feature/*`, `fix/*`) with more lenient settings. -- `**` is treated as two consecutive `*` wildcards, which is functionally identical to a single `*`. There is no special multi-segment matching behavior. - -{/* */} - -## Preview Panel - -When creating or editing a failure rate monitor, a preview panel shows which tests the current configuration would flag based on recent data. The panel is split into two sections: **Current** and **Proposed**. - -- **Current** shows tests flagged by the existing configuration (if editing an existing monitor). -- **Proposed** shows tests that would be flagged with the settings currently entered in the form. - -The Current section is collapsed by default, so the Proposed view is immediately visible when you open the form. - -### Status Filter - -A **status filter dropdown** in the preview panel lets you filter the test list to any combination of statuses: **Healthy**, **Flaky**, and **Broken**. By default, all statuses are shown. - -Filtering to **Healthy** shows tests that are currently healthy but would be flagged by this monitor — the new coverage it adds beyond tests already detected. Filtering to other statuses, or combining them, adjusts the visible list without affecting the underlying detection counts. - -When a filter is active, the info tooltip shows "X of Y tests" to indicate how many tests are visible relative to the total matching the configuration. If no tests match the active filter, the empty state includes a hint to clear the filter. - -The status filter applies to the **Proposed** section. The not-in-window count in the Current section reflects the full unfiltered result set and is not affected by the filter. +* You can add up to **10 patterns** per monitor. A test run is included if its branch matches any of the patterns. +* Since patterns can't express "everything except a branch," a practical approach is to create **separate monitors**: one scoped to `main` with strict settings, and another scoped to your PR branch naming patterns (e.g., `feature/*`, `fix/*`) with more lenient settings. +* `**` is treated as two consecutive `*` wildcards, which is functionally identical to a single `*`. There is no special multi-segment matching behavior. ## Resolution Behavior @@ -192,16 +161,16 @@ Tests that are still running but haven't accumulated enough runs to meet the min ## Muting -You can temporarily mute a failure rate monitor for a specific test case. See [Muting monitors](./index#muting-monitors) for details. +You can temporarily mute a threshold monitor for a specific test case. See [Muting monitors](/flaky-tests/detection/..#muting-monitors) for details. ## Recommended Configurations -A common setup is to pair two failure rate monitors — one to catch broken tests quickly and one to catch flaky tests over a longer window: +A common setup is to pair two threshold monitors — one to catch broken tests quickly and one to catch flaky tests over a longer window: -| Monitor | Detection type | Activation threshold | Window | Purpose | -|---------|---------------|---------------------|--------|---------| -| Broken on main | Broken | 80–100% | 1–6 hours | Catch tests that are reliably failing — real regressions that need immediate attention | -| Flaky on main | Flaky | 20–50% | 12–72 hours | Catch intermittently failing tests — candidates for investigation or quarantine | +| Monitor | Detection type | Activation threshold | Window | Purpose | +| -------------- | -------------- | -------------------- | ----------- | -------------------------------------------------------------------------------------- | +| Broken on main | Broken | 80–100% | 1–6 hours | Catch tests that are reliably failing — real regressions that need immediate attention | +| Flaky on main | Flaky | 20–50% | 12–72 hours | Catch intermittently failing tests — candidates for investigation or quarantine | You can create as many monitors as you need. For example, you might want separate monitors for your main branch and pull request branches, or different thresholds for different levels of severity. The following sections provide starting points for common scenarios. @@ -211,28 +180,28 @@ You can create as many monitors as you need. For example, you might want separat Failures on your stable branch are a strong signal. Tests should be passing before code is merged, so failures here are unexpected and likely indicate flakiness. -| Setting | Suggested value | Why | -|---|---|---| -| Activation threshold | 10 to 20% | Low threshold catches subtle flakiness early | -| Resolution threshold | 5 to 10% | Requires clear improvement before resolving | -| Window | 6 to 24 hours | Long enough to accumulate data, short enough to catch new issues | -| Min sample size | 20 to 50 | Depends on how often your tests run on main | -| Branches | `main` (or `master`, `develop`, etc.) | Use the exact name of your stable branch | +| Setting | Suggested value | Why | +| -------------------- | ------------------------------------- | ---------------------------------------------------------------- | +| Activation threshold | 10 to 20% | Low threshold catches subtle flakiness early | +| Resolution threshold | 5 to 10% | Requires clear improvement before resolving | +| Window | 6 to 24 hours | Long enough to accumulate data, short enough to catch new issues | +| Min sample size | 20 to 50 | Depends on how often your tests run on main | +| Branches | `main` (or `master`, `develop`, etc.) | Use the exact name of your stable branch | ### Pull Requests: Catch Broken Tests On PR branches, tests are expected to fail — that's part of active development. Analyzing failure rate for flakiness on PRs is generally not productive because a new failing test is likely caused by the code change under review, not non-deterministic behavior. Pass-on-retry already handles real flakiness on PRs: if a test fails and then passes on retry within the same commit, it will be detected regardless of branch. -If you do want a failure rate monitor on PRs, scope it to catch **broken** tests rather than flaky ones — tests that are consistently failing at a high rate across many PRs, which may indicate a persistent regression or a broken test environment. +If you do want a threshold monitor on PRs, scope it to catch **broken** tests rather than flaky ones — tests that are consistently failing at a high rate across many PRs, which may indicate a persistent regression or a broken test environment. -| Setting | Suggested value | Why | -|---|---|---| -| Detection type | Broken | Focus on consistently failing tests, not intermittent ones | -| Activation threshold | 70 to 90% | High threshold distinguishes real breakage from expected development failures | -| Resolution threshold | 40 to 50% | Wide buffer prevents flapping | -| Window | 12 to 24 hours | Longer window smooths out short-lived development failures | -| Min sample size | 30 to 100 | Higher minimum avoids flagging tests that only ran a few times on PRs | -| Branches | `feature/*`, `fix/*`, `dependabot/*` | Match your team's PR branch naming conventions | +| Setting | Suggested value | Why | +| -------------------- | ------------------------------------ | ----------------------------------------------------------------------------- | +| Detection type | Broken | Focus on consistently failing tests, not intermittent ones | +| Activation threshold | 70 to 90% | High threshold distinguishes real breakage from expected development failures | +| Resolution threshold | 40 to 50% | Wide buffer prevents flapping | +| Window | 12 to 24 hours | Longer window smooths out short-lived development failures | +| Min sample size | 30 to 100 | Higher minimum avoids flagging tests that only ran a few times on PRs | +| Branches | `feature/*`, `fix/*`, `dependabot/*` | Match your team's PR branch naming conventions | Since branch patterns can't express "everything except main," create one monitor scoped to `main` with strict settings and a second monitor scoped to your PR branch naming patterns with more lenient settings. @@ -242,30 +211,22 @@ Merge queue branches test code that has already passed PR checks. Failures here When sizing your window and minimum sample size, consider how many PRs your repo merges per day. For example, if your team merges 10 PRs per day, a 12-hour window will accumulate roughly 5 merge queue runs — setting a minimum sample size of 10 would mean the rule never has enough data to evaluate. Match your minimum sample size to a realistic run count within your chosen window. -| Setting | Suggested value | Why | -|---|---|---| -| Activation threshold | 10 to 15% | Low threshold, failures here are unexpected | -| Resolution threshold | 5% | Strict recovery | -| Window | 6 to 12 hours | Shorter window for faster detection | -| Min sample size | 5 to 15 | Size to how many merge queue runs accumulate in your window | -| Branches | `trunk-merge/*` or `gh-readonly-queue/*` | Use the pattern for your merge queue provider (see table above) | +| Setting | Suggested value | Why | +| -------------------- | ---------------------------------------- | --------------------------------------------------------------- | +| Activation threshold | 10 to 15% | Low threshold, failures here are unexpected | +| Resolution threshold | 5% | Strict recovery | +| Window | 6 to 12 hours | Shorter window for faster detection | +| Min sample size | 5 to 15 | Size to how many merge queue runs accumulate in your window | +| Branches | `trunk-merge/*` or `gh-readonly-queue/*` | Use the pattern for your merge queue provider (see table above) | Common branch patterns for merge queues: -| Merge queue | Branch pattern | -|---|---| -| Trunk Merge Queue | `trunk-merge/*` | +| Merge queue | Branch pattern | +| ------------------ | --------------------- | +| Trunk Merge Queue | `trunk-merge/*` | | GitHub Merge Queue | `gh-readonly-queue/*` | ### Other Patterns -- **Release branches:** A monitor scoped to `release/*` with strict thresholds catches flakiness before it ships. -- **Nightly or scheduled builds:** If you run comprehensive test suites on a schedule, a monitor with a longer window and higher minimum sample size can catch slow-burn flakiness that doesn't show up in faster CI runs. - -{/* */} - +* **Release branches:** A monitor scoped to `release/*` with strict thresholds catches flakiness before it ships. +* **Nightly or scheduled builds:** If you run comprehensive test suites on a schedule, a monitor with a longer window and higher minimum sample size can catch slow-burn flakiness that doesn't show up in faster CI runs. diff --git a/flaky-tests/flaky-tests.mdx b/flaky-tests/flaky-tests.mdx new file mode 100644 index 0000000..1b94f5b --- /dev/null +++ b/flaky-tests/flaky-tests.mdx @@ -0,0 +1,47 @@ +--- +title: "Flaky Tests API" +description: "The Trunk Flaky Tests API provides access to check the status of Trunk services and fetch unhealthy or quarantined tests in your project. The API is an HTTP REST API, returns JSON" +--- +The Trunk Flaky Tests API provides access to check the status of Trunk services and fetch [unhealthy](/flaky-tests/detection) or [quarantined](/flaky-tests/quarantining) tests in your project. The API is an HTTP REST API, returns JSON from all requests, and uses standard HTTP response codes. + +All requests must be [authenticated](/setup-and-administration/apis#authentication) by providing the `x-api-token` header. + +## POST /flaky-tests/get-test-details + +> Get the details of a test case + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/flaky-tests/get-test-details":{"post":{"summary":"Get the details of a test case","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string","description":"The host of the repository. For example, `github.com` or `gitlab.com`. If self-hosting, this will be the hostname of your instance."},"owner":{"type":"string","description":"The owner of the repository. For example, `my-github-org` or `my-gitlab-org/my/sub/group`. `owner` for GitLab will include the GitLab org, plus the project path, but excluding the repo name itself."},"name":{"type":"string","description":"The name of the repository."}},"required":["host","owner","name"],"description":"The repository to list tests for."},"org_url_slug":{"type":"string","description":"The slug of your organization. Find this at https://app.trunk.io/trunk/settings under \"Organization Name\" > \"Slug\""},"test_id":{"type":"string","format":"uuid","description":"The id of a test case. Should be a UUID."}},"required":["repo","org_url_slug","test_id"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"type":"object","properties":{"test":{"type":"object","properties":{"id":{"type":"string","format":"uuid","description":"A stable unique identifier for the test"},"repository":{"type":"object","properties":{"html_url":{"type":"string","format":"uri","description":"The URL of the repository"}},"required":["html_url"]},"html_url":{"type":"string","format":"uri","description":"The URL of the test details"},"name":{"type":"string","description":"The name of the test"},"variant":{"type":"string","description":"The name of the test variant"},"status":{"type":"object","properties":{"value":{"type":"string","enum":["healthy","flaky","broken"],"description":"The current status value in lowercase"},"reason":{"type":"string","description":"The reason for the current status"},"timestamp":{"type":"string","format":"date-time","description":"The timestamp of the current status change"}},"required":["value","reason","timestamp"]},"most_common_failures":{"type":"array","items":{"type":"object","properties":{"summary":{"type":"string","description":"The summary of the failure"},"occurrence_count":{"type":"integer","minimum":0,"description":"The number of occurrences of this failure"},"last_occurrence":{"type":"string","format":"date-time","description":"The timestamp of the last occurrence"}},"required":["summary","occurrence_count"]},"description":"Several of the most common failures of the test. This is behind a feature flag, access to this feature can be requested by reaching out to the Trunk team."},"failure_rate_last_7d":{"type":"number","description":"The failure rate over the last 7 days"},"failure_rate_last_24h":{"type":"number","description":"The failure rate over the last 24 hours"},"file_path":{"type":"string","description":"The file path of the test"},"parent":{"type":"string","description":"The parent of the test. This includes the test suite (depending on the test runner)"},"classname":{"type":"string","description":"The class name of the test"},"codeowners":{"type":"array","items":{"type":"string"},"description":"Code owners for the test"},"pull_requests_impacted_last_7d":{"type":"integer","minimum":0,"description":"The number of pull requests impacted in the last 7 days"},"quarantined":{"type":"boolean","description":"Whether the test is quarantined.\n\n This is `true` when quarantining is enabled for the repo and either of the following applies:\n\n - The quarantine override is set to `ALWAYS_QUARANTINE` for this test\n - Automatic quarantining is enabled for the repo, and this test's status is either `flaky` or `broken`\n\n If this is `true`, the next test run will be marked as passed even if the test run conclusion is\n failed."},"ticket":{"type":"object","properties":{"html_url":{"type":"string","format":"uri","description":"The URL of the associated ticket"}},"required":["html_url"]}},"required":["id","repository","html_url","name","variant","status","most_common_failures","failure_rate_last_7d","failure_rate_last_24h","codeowners","pull_requests_impacted_last_7d","quarantined"],"description":"The details of a test case."}},"required":["test"]}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /flaky-tests/link-ticket-to-test-case + +> Link a ticket to a test case + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/flaky-tests/link-ticket-to-test-case":{"post":{"summary":"Link a ticket to a test case","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"test_case_id":{"type":"string","format":"uuid","description":"The id of the test case. Should be a UUID."},"external_ticket_id":{"type":"string","description":"The external identifier of the ticket. For Jira this is the ticket number prefixed by the Project Key. For Linear this is the ticket number prefixed by the Team Identifier"},"repo":{"type":"object","properties":{"host":{"type":"string","description":"The host of the repository. For example, `github.com` or `gitlab.com`. If self-hosting, this will be the hostname of your instance."},"owner":{"type":"string","description":"The owner of the repository. For example, `my-github-org` or `my-gitlab-org/my/sub/group`. `owner` for GitLab will include the GitLab org, plus the project path, but excluding the repo name itself."},"name":{"type":"string","description":"The name of the repository."}},"required":["host","owner","name"],"description":"The repository to list tests for."}},"required":["test_case_id","external_ticket_id","repo"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /flaky-tests/list-failing-tests + +> Get a list of distinct tests that failed in the given time range + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/flaky-tests/list-failing-tests":{"post":{"summary":"Get a list of distinct tests that failed in the given time range","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string","description":"The host of the repository. For example, `github.com` or `gitlab.com`. If self-hosting, this will be the hostname of your instance."},"owner":{"type":"string","description":"The owner of the repository. For example, `my-github-org` or `my-gitlab-org/my/sub/group`. `owner` for GitLab will include the GitLab org, plus the project path, but excluding the repo name itself."},"name":{"type":"string","description":"The name of the repository."}},"required":["host","owner","name"],"description":"The repository to list tests for."},"org_url_slug":{"type":"string","description":"The slug of your organization. Find this at https://app.trunk.io/trunk/settings under \"Organization Name\" > \"Slug\""},"start_time":{"type":"string","format":"date-time","description":"The start time of the failing tests (inclusive). Must be within 7 days of the end time."},"end_time":{"type":"string","format":"date-time","description":"The end time of the failing tests (exclusive). Must be within 7 days of the start time."},"page_query":{"type":"object","properties":{"page_size":{"type":"integer","minimum":1,"maximum":100,"description":"The number of tests to return per page."},"page_token":{"type":"string","description":"The page token to use for pagination. This is returned from the previous call to this endpoint. For the first page, this should be empty."}},"required":["page_size"],"description":"Pagination options for the list of tests."}},"required":["repo","org_url_slug","start_time","end_time","page_query"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"type":"object","properties":{"tests":{"type":"array","items":{"type":"object","properties":{"id":{"type":"string","format":"uuid","description":"A stable unique identifier for the test"},"repository":{"type":"object","properties":{"html_url":{"type":"string","format":"uri","description":"The URL of the repository"}},"required":["html_url"]},"html_url":{"type":"string","format":"uri","description":"The URL of the test details"},"name":{"type":"string","description":"The name of the test"},"variant":{"type":"string","description":"The name of the test variant"},"status":{"type":"object","properties":{"value":{"type":"string","enum":["healthy","flaky","broken"],"description":"The current status value in lowercase"},"reason":{"type":"string","description":"The reason for the current status"},"timestamp":{"type":"string","format":"date-time","description":"The timestamp of the current status change"}},"required":["value","reason","timestamp"]},"most_common_failures":{"type":"array","items":{"type":"object","properties":{"summary":{"type":"string","description":"The summary of the failure"},"occurrence_count":{"type":"integer","minimum":0,"description":"The number of occurrences of this failure"},"last_occurrence":{"type":"string","format":"date-time","description":"The timestamp of the last occurrence"}},"required":["summary","occurrence_count"]},"description":"Several of the most common failures of the test. This is behind a feature flag, access to this feature can be requested by reaching out to the Trunk team."},"failure_rate_last_7d":{"type":"number","description":"The failure rate over the last 7 days"},"failure_rate_last_24h":{"type":"number","description":"The failure rate over the last 24 hours"},"file_path":{"type":"string","description":"The file path of the test"},"parent":{"type":"string","description":"The parent of the test. This includes the test suite (depending on the test runner)"},"classname":{"type":"string","description":"The class name of the test"},"codeowners":{"type":"array","items":{"type":"string"},"description":"Code owners for the test"},"pull_requests_impacted_last_7d":{"type":"integer","minimum":0,"description":"The number of pull requests impacted in the last 7 days"},"quarantined":{"type":"boolean","description":"Whether the test is quarantined.\n\n This is `true` when quarantining is enabled for the repo and either of the following applies:\n\n - The quarantine override is set to `ALWAYS_QUARANTINE` for this test\n - Automatic quarantining is enabled for the repo, and this test's status is either `flaky` or `broken`\n\n If this is `true`, the next test run will be marked as passed even if the test run conclusion is\n failed."},"ticket":{"type":"object","properties":{"html_url":{"type":"string","format":"uri","description":"The URL of the associated ticket"}},"required":["html_url"]}},"required":["id","repository","html_url","name","variant","status","most_common_failures","failure_rate_last_7d","failure_rate_last_24h","codeowners","pull_requests_impacted_last_7d","quarantined"]},"description":"A page of failing test cases."},"page":{"type":"object","properties":{"total_rows":{"type":"number","minimum":0,"description":"The total number of test cases in the paginated list."},"total_pages":{"type":"number","minimum":0,"description":"The total number of pages in the paginated list of test cases."},"next_page_token":{"type":"string","description":"The next page token to use for pagination. See `page_token` in the request for more information."},"prev_page_token":{"type":"string","description":"The previous page token to use for pagination. See `page_token` in the request for more information."},"last_page_token":{"type":"string","description":"The last page token to use for pagination. See `page_token` in the request for more information."},"page_index":{"type":"number","minimum":0,"description":"The index of the current page in the paginated list of test cases."}},"required":["total_rows","total_pages","next_page_token","prev_page_token","last_page_token","page_index"],"description":"Pagination information for the list of test cases."}},"required":["tests","page"]}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /flaky-tests/list-unhealthy-tests + +> Get a list of unhealthy tests + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/flaky-tests/list-unhealthy-tests":{"post":{"summary":"Get a list of unhealthy tests","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string","description":"The host of the repository. For example, `github.com` or `gitlab.com`. If self-hosting, this will be the hostname of your instance."},"owner":{"type":"string","description":"The owner of the repository. For example, `my-github-org` or `my-gitlab-org/my/sub/group`. `owner` for GitLab will include the GitLab org, plus the project path, but excluding the repo name itself."},"name":{"type":"string","description":"The name of the repository."}},"required":["host","owner","name"],"description":"The repository to list tests for."},"org_url_slug":{"type":"string","description":"The slug of your organization. Find this at https://app.trunk.io/trunk/settings under \"Organization Name\" > \"Slug\""},"page_query":{"type":"object","properties":{"page_size":{"type":"integer","minimum":1,"maximum":100,"description":"The number of tests to return per page."},"page_token":{"type":"string","description":"The page token to use for pagination. This is returned from the previous call to this endpoint. For the first page, this should be empty."}},"required":["page_size"],"description":"Pagination options for the list of tests."},"status":{"type":"string","enum":["FLAKY","BROKEN"],"description":"The status filter for unhealthy tests."}},"required":["repo","org_url_slug","page_query","status"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"type":"object","properties":{"tests":{"type":"array","items":{"type":"object","properties":{"id":{"type":"string","format":"uuid","description":"A stable unique identifier for the test"},"repository":{"type":"object","properties":{"html_url":{"type":"string","format":"uri","description":"The URL of the repository"}},"required":["html_url"]},"html_url":{"type":"string","format":"uri","description":"The URL of the test details"},"name":{"type":"string","description":"The name of the test"},"variant":{"type":"string","description":"The name of the test variant"},"status":{"type":"object","properties":{"value":{"type":"string","enum":["healthy","flaky","broken"],"description":"The current status value in lowercase"},"reason":{"type":"string","description":"The reason for the current status"},"timestamp":{"type":"string","format":"date-time","description":"The timestamp of the current status change"}},"required":["value","reason","timestamp"]},"file_path":{"type":"string","description":"The file path of the test"},"parent":{"type":"string","description":"The parent of the test. This includes the test suite (depending on the test runner)"},"classname":{"type":"string","description":"The class name of the test"},"codeowners":{"type":"array","items":{"type":"string"},"description":"Code owners for the test"},"pull_requests_impacted_last_7d":{"type":"integer","minimum":0,"description":"The number of pull requests impacted in the last 7 days"},"quarantined":{"type":"boolean","description":"Whether the test is quarantined.\n\n This is `true` when quarantining is enabled for the repo and either of the following applies:\n\n - The quarantine override is set to `ALWAYS_QUARANTINE` for this test\n - Automatic quarantining is enabled for the repo, and this test's status is either `flaky` or `broken`\n\n If this is `true`, the next test run will be marked as passed even if the test run conclusion is\n failed."},"ticket":{"type":"object","properties":{"html_url":{"type":"string","format":"uri","description":"The URL of the associated ticket"}},"required":["html_url"]}},"required":["id","repository","html_url","name","variant","status","codeowners","pull_requests_impacted_last_7d","quarantined"]},"description":"A page of unhealthy test cases."},"page":{"type":"object","properties":{"total_rows":{"type":"number","minimum":0,"description":"The total number of test cases in the paginated list."},"total_pages":{"type":"number","minimum":0,"description":"The total number of pages in the paginated list of test cases."},"next_page_token":{"type":"string","description":"The next page token to use for pagination. See `page_token` in the request for more information."},"prev_page_token":{"type":"string","description":"The previous page token to use for pagination. See `page_token` in the request for more information."},"last_page_token":{"type":"string","description":"The last page token to use for pagination. See `page_token` in the request for more information."},"page_index":{"type":"number","minimum":0,"description":"The index of the current page in the paginated list of test cases."}},"required":["total_rows","total_pages","next_page_token","prev_page_token","last_page_token","page_index"],"description":"Pagination information for the list of test cases."}},"required":["tests","page"]}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /flaky-tests/list-quarantined-tests + +> Get a list of quarantined tests + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/flaky-tests/list-quarantined-tests":{"post":{"summary":"Get a list of quarantined tests","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string","description":"The host of the repository. For example, `github.com` or `gitlab.com`. If self-hosting, this will be the hostname of your instance."},"owner":{"type":"string","description":"The owner of the repository. For example, `my-github-org` or `my-gitlab-org/my/sub/group`. `owner` for GitLab will include the GitLab org, plus the project path, but excluding the repo name itself."},"name":{"type":"string","description":"The name of the repository."}},"required":["host","owner","name"],"description":"The repository to list tests for."},"org_url_slug":{"type":"string","description":"The slug of your organization. Find this at https://app.trunk.io/trunk/settings under \"Organization Name\" > \"Slug\""},"page_query":{"type":"object","properties":{"page_size":{"type":"integer","minimum":1,"maximum":100,"description":"The number of tests to return per page."},"page_token":{"type":"string","description":"The page token to use for pagination. This is returned from the previous call to this endpoint. For the first page, this should be empty."}},"required":["page_size"],"description":"Pagination options for the list of tests."}},"required":["repo","org_url_slug","page_query"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"type":"object","properties":{"quarantined_tests":{"type":"array","items":{"type":"object","properties":{"name":{"type":"string","description":"The name of the test case."},"parent":{"type":["string","null"],"description":"The parent of the test case."},"file":{"type":["string","null"],"description":"The file of the test case."},"classname":{"type":["string","null"],"description":"The class name of the test case."},"status":{"type":"string","enum":["HEALTHY","FLAKY","BROKEN"],"description":"The status of the test case."},"codeowners":{"type":"array","items":{"type":"string"},"description":"The latest codeowners of the test case."},"quarantine_setting":{"type":"string","enum":["ALWAYS_QUARANTINE","AUTO_QUARANTINE"],"description":"The quarantine setting of the test case."},"quarantined_at":{"type":"string","format":"date-time","description":"The time at which the test case was quarantined, if applicable."},"status_last_updated_at":{"type":"string","format":"date-time","description":"The last time the status of the test case was updated."},"test_case_id":{"type":"string","description":"The ID of the test case. This value is unstable and should not be relied upon."},"variant":{"type":"string","description":"The variant of the test case."}},"required":["name","parent","file","classname","status","codeowners","quarantine_setting","quarantined_at","status_last_updated_at","test_case_id","variant"],"description":"A quarantined test case."},"description":"A page of quarantined test cases."},"page":{"type":"object","properties":{"total_rows":{"type":"number","minimum":0,"description":"The total number of test cases in the paginated list."},"total_pages":{"type":"number","minimum":0,"description":"The total number of pages in the paginated list of test cases."},"next_page_token":{"type":"string","description":"The next page token to use for pagination. See `page_token` in the request for more information."},"prev_page_token":{"type":"string","description":"The previous page token to use for pagination. See `page_token` in the request for more information."},"last_page_token":{"type":"string","description":"The last page token to use for pagination. See `page_token` in the request for more information."},"page_index":{"type":"number","minimum":0,"description":"The index of the current page in the paginated list of test cases."}},"required":["total_rows","total_pages","next_page_token","prev_page_token","last_page_token","page_index"],"description":"Pagination information for the list of test cases."}},"required":["quarantined_tests","page"]}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` diff --git a/flaky-tests/get-started/index.mdx b/flaky-tests/get-started.mdx similarity index 54% rename from flaky-tests/get-started/index.mdx rename to flaky-tests/get-started.mdx index 8ad10e8..8c38aeb 100644 --- a/flaky-tests/get-started/index.mdx +++ b/flaky-tests/get-started.mdx @@ -1,6 +1,6 @@ --- title: "Getting Started" -description: "Set up Trunk Flaky Tests by configuring test result output, uploading from CI, and enabling flake detection monitors." +description: "Trunk Flaky Tests detects flaky tests by analyzing test results from your CI runs. Setup requires configuring test result output and CI upload integration." --- Trunk Flaky Tests detects flaky tests by analyzing test results from your CI runs. Setup requires configuring test result output and CI upload integration. @@ -8,7 +8,7 @@ Trunk Flaky Tests detects flaky tests by analyzing test results from your CI run * Account at [app.trunk.io](https://app.trunk.io) * Ability to modify repository CI configuration and add secrets -* Tests running in CI on both PRs and stable branches (e.g., main, master, or develop) +* Tests running in CI on both PRs and stable branches (e.g., main) #### Step 1: Ensure JUnit XML output @@ -16,24 +16,20 @@ Trunk ingests test results in JUnit XML format. If your CI already generates JUn If not, configure your test frameworks to output JUnit XML: -* See [**Test Frameworks**](./frameworks/) for framework-specific configuration +* See [**Test Frameworks**](/flaky-tests/get-started/frameworks) for framework-specific configuration * Supports multiple frameworks simultaneously #### Step 2: Configure CI uploads Add test result uploads to all CI jobs that run tests. -1. See [**CI Providers**](./ci-providers/) for integration instructions +1. See [**CI Providers**](/flaky-tests/get-started/ci-providers) for integration instructions 2. Configure uploads in jobs that run on: * Pull request branches * Stable branches (`main`, `master`, `develop`, etc.) * Merge queue branches (if applicable) -Uploads from both PRs and stable branches are required for Trunk Flaky Tests to accurately detect flaky tests. - - -Trunk automatically recognizes `main`, `master`, and `develop` as stable branches. If your primary branch uses a different name, configure uploads from that branch the same way and Trunk will classify it correctly. - +Uploads from both PRs and stable branches are required for accurate flaky test detection. #### Step 3: Verify integration @@ -41,10 +37,7 @@ Trunk automatically recognizes `main`, `master`, and `develop` as stable branche 2. Check CI logs for successful upload confirmation 3. Results typically appear within a few minutes. Verify uploads appear at [app.trunk.io](https://app.trunk.io) → your repo → **Flaky Tests > Uploads** - - - - +

Uploads tab

#### Step 4: Configure flake detection @@ -52,15 +45,15 @@ After uploads are flowing, navigate to your repo → **Flaky Tests > Monitors** **Pass-on-retry** is enabled by default and is the recommended baseline for everyone. It catches the most common flakiness pattern — a test that fails and then passes on retry within the same commit — without any configuration needed. -**Failure rate monitors** let you detect flakiness based on failure rate over a rolling time window. How you configure them depends on your CI setup: +**Threshold monitors** let you detect flakiness based on failure rate over a rolling time window. How you configure them depends on your CI setup: -- **If tests must pass before merging to main**, set up a failure rate monitor scoped to `main` to catch an elevated failure rate. For example, if you run tests 5 times per day on `main`, a 24-hour rolling window with a minimum of 4 runs and a failure threshold of 25% is a reasonable starting point. This gives the monitor enough data before flagging anything. -- **If you use a merge queue**, consider a dedicated monitor scoped to your merge queue branches (e.g., `trunk-merge/*` or `gh-readonly-queue/*`). Failures here are especially suspicious since the code has already passed PR checks, so a low threshold is appropriate. +* **If tests must pass before merging to main**, set up a threshold monitor scoped to `main` to catch an elevated failure rate. For example, if you run tests 5 times per day on `main`, a 24-hour rolling window with a minimum of 4 runs and a failure threshold of 25% is a reasonable starting point. This ensures the monitor has enough data before flagging anything. +* **If you use a merge queue**, consider a dedicated monitor scoped to your merge queue branches (e.g., `trunk-merge/*` or `gh-readonly-queue/*`). Failures here are especially suspicious since the code has already passed PR checks, so a low threshold is appropriate. -[How failure rate monitors work →](../detection/failure-rate-monitor) +[How threshold monitors work →](/flaky-tests/detection/threshold-monitor) #### Quarantining Quarantining suppresses failures from known flaky tests, preventing them from forcing CI re-runs or blocking your merge queue. Flaky tests continue to run and report results — they just don't cause pipeline failures while your team works on fixes. This is especially valuable for unblocking merge queues and keeping development velocity high. -[Configure Quarantining →](../quarantining/) +[Configure Quarantining →](/flaky-tests/quarantining) diff --git a/flaky-tests/get-started/ci-providers.mdx b/flaky-tests/get-started/ci-providers.mdx new file mode 100644 index 0000000..7ef2997 --- /dev/null +++ b/flaky-tests/get-started/ci-providers.mdx @@ -0,0 +1,15 @@ +--- +title: "CI Providers" +description: "You can easily integrate Flaky Tests from any CI Provider" +--- +Trunk Flaky Tests integrates with your CI by adding a `Upload Test Results` step in each of your testing CI jobs via the [Trunk CLI](/flaky-tests/uploader). See the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing test reports for your test runner, which Trunk can ingest. + + +**Not using GitHub for source control?** + +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. + + +### Quickstart + +
Atlassian Bambooatlassian-bamboo
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
Google Cloud Buildgoogle-cloud-build
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/ci-providers/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/ci-providers/atlassian-bamboo.mdx b/flaky-tests/get-started/ci-providers/atlassian-bamboo.mdx index 5d74c05..2ff4f73 100644 --- a/flaky-tests/get-started/ci-providers/atlassian-bamboo.mdx +++ b/flaky-tests/get-started/ci-providers/atlassian-bamboo.mdx @@ -2,15 +2,15 @@ title: "Atlassian Bamboo" description: "Configure Atlassian Bamboo to upload test results to Trunk Flaky Tests" --- -Trunk Flaky Tests integrates with your CI by adding a step in your Bamboo Plans to upload tests with the [Trunk Analytics CLI](../../reference/cli-reference). +Trunk Flaky Tests integrates with your CI by adding a step in your Bamboo Plans to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). **Not using GitHub for source control?** -Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. -Before you start on these steps, see the [Test Frameworks](../frameworks/) docs for instructions on producing a Trunk-compatible output for your test framework. +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. ### Checklist @@ -21,7 +21,7 @@ By the end of this guide, you should achieve the following. * [ ] Configure your CI to upload to Trunk * [ ] Validate your uploads in Trunk -After completing these checklist items, you'll be integrated with Trunk. +After completing these checklist items, you'll be integrated with Trunk. ### Trunk Organization Slug and Token @@ -33,7 +33,7 @@ You can find your organization slug under **Settings > Organization > Manage > O #### Trunk Token -You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your _organization token_, not your project/repo token. +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. ### Add the Trunk Token as a Secret @@ -41,24 +41,27 @@ Store the Trunk slug and API token obtained in the previous step as [Bamboo plan ### Upload to Trunk -Add an `Upload Test Results` step after running tests in each of your Bamboo jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your [stable branches](../../detection#stable-branches), for example, `main`, `master`, or `develop`. +Add an `Upload Test Results` step after running tests in each of your Bamboo jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your [stable branches](/flaky-tests/detection/threshold-monitor#stable-branch-patterns), for example, `main`, `master`, or `develop`. - -It is important to upload test results from CI runs on [**stable branches**](../../detection#stable-branches), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection/threshold-monitor#stable-branch-patterns), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. -[Learn more about detection](../../detection) - +[Learn more about detection](/flaky-tests/detection) + #### Example Bamboo Plan Spec The following is an example of a [Bamboo Plan Spec](https://confluence.atlassian.com/bamboo/bamboo-specs-894743906.html) that uploads test results after your tests run. The upload step is placed under `final-tasks` so it runs even when tests fail. Note: you must either run `trunk` from the repo root when uploading test results or pass a `--repo-root` argument. -To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [frameworks](../frameworks/ "mention") docs. +To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [frameworks](/flaky-tests/get-started/frameworks "mention") docs. + + - -```yaml XML + + +```yaml version: 2 plan: project-key: @@ -80,9 +83,9 @@ Run Tests and Upload to Trunk: - script: name: Upload Test Results to Trunk.io body: | - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz - chmod +x trunk-analytics-cli - ./trunk-analytics-cli upload \ + curl -fsSLO --retry 3 https://trunk.io/releases/trunk + chmod +x ./trunk + ./trunk flakytests upload \ --junit-paths "" \ --org-url-slug ${bamboo.TRUNK_ORG_SLUG} \ --token ${bamboo.TRUNK_TOKEN} @@ -91,7 +94,12 @@ variables: TRUNK_ORG_SLUG: TRUNK_TOKEN: ``` -```yaml Bazel + + + + + +```yaml version: 2 plan: project-key: @@ -113,9 +121,9 @@ Run Tests and Upload to Trunk: - script: name: Upload Test Results to Trunk.io body: | - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz - chmod +x trunk-analytics-cli - ./trunk-analytics-cli upload \ + curl -fsSLO --retry 3 https://trunk.io/releases/trunk + chmod +x ./trunk + ./trunk flakytests upload \ --bazel-bep-path \ --org-url-slug ${bamboo.TRUNK_ORG_SLUG} \ --token ${bamboo.TRUNK_TOKEN} @@ -124,7 +132,12 @@ variables: TRUNK_ORG_SLUG: TRUNK_TOKEN: ``` -```yaml XCode + + + + + +```yaml version: 2 plan: project-key: @@ -146,9 +159,9 @@ Run Tests and Upload to Trunk: - script: name: Upload Test Results to Trunk.io body: | - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz - chmod +x trunk-analytics-cli - ./trunk-analytics-cli upload \ + curl -fsSLO --retry 3 https://trunk.io/releases/trunk + chmod +x ./trunk + ./trunk flakytests upload \ --xcresult-path \ --org-url-slug ${bamboo.TRUNK_ORG_SLUG} \ --token ${bamboo.TRUNK_TOKEN} @@ -157,7 +170,10 @@ variables: TRUNK_ORG_SLUG: TRUNK_TOKEN: ``` - + + + + #### Uploading from Pull Request Builds @@ -170,25 +186,20 @@ branches: accept-fork: false ``` -Bamboo automatically sets the `bamboo_repository_pr_key` variable on PR builds, which the Trunk Analytics CLI uses to associate uploads with the correct pull request. +Bamboo automatically sets the `bamboo_repository_pr_key` variable on PR builds, which the Trunk CLI uses to associate uploads with the correct pull request. **PR number not detected?** If your Bamboo setup does not set `bamboo_repository_pr_key`, you can override it by passing the `--pr-number` flag or setting the `TRUNK_PR_NUMBER` environment variable when running the upload command. - - -The examples above use the Linux x64 binary. If your CI runs on a different platform, see the [Trunk Analytics CLI](../../reference/cli-reference#manual-download) page for all available platform downloads. - - -See the [uploader.md](../../reference/cli-reference.md "mention") for all available command line arguments and usage. +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. #### Stale files Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. - + **Have questions?** Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. - + diff --git a/flaky-tests/get-started/ci-providers/azure-devops-pipelines.mdx b/flaky-tests/get-started/ci-providers/azure-devops-pipelines.mdx index 367c39f..cad5e09 100644 --- a/flaky-tests/get-started/ci-providers/azure-devops-pipelines.mdx +++ b/flaky-tests/get-started/ci-providers/azure-devops-pipelines.mdx @@ -1,15 +1,16 @@ --- title: "Azure DevOps Pipelines" +description: "Trunk Flaky Tests integrates with your CI by adding a step in your Azure DevOps Pipelines to upload tests with the Trunk Uploader CLI." --- -Trunk Flaky Tests integrates with your CI by adding a step in your Azure DevOps Pipelines to upload tests with the [Trunk Analytics CLI](../../reference/cli-reference). +Trunk Flaky Tests integrates with your CI by adding a step in your Azure DevOps Pipelines to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). **Not using GitHub for source control?** -Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. -Before you start on these steps, see the [Test Frameworks](../frameworks/) docs for instructions on producing a Trunk-compatible output for your test framework. +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. ### Checklist @@ -20,7 +21,7 @@ By the end of this guide, you should achieve the following. * [ ] Configure your CI to upload to Trunk * [ ] Validate your uploads in Trunk -After completing these checklist items, you'll be integrated with Trunk. +After completing these checklist items, you'll be integrated with Trunk. ### Trunk Organization Slug and Token @@ -32,7 +33,7 @@ You can find your organization slug under **Settings > Organization > Manage > O #### Trunk Token -You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your _organization token_, not your project/repo token. +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. ### Add the Trunk Token as a Secret @@ -40,15 +41,15 @@ Store the Trunk slug and API token obtained in the previous step in your Azure D ### Upload to Trunk -Add an upload step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your [stable branches](../../detection/), for example, `main`, `master`, or `develop`. +Add an upload step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your [stable branches](/flaky-tests/detection), for example, `main`, `master`, or `develop`. - -It is important to upload test results from CI runs on [**stable branches**](../../detection/), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. -[Learn more about detection](../../detection/) - +[Learn more about detection](/flaky-tests/detection) + #### Add Uploader to Testing Pipelines @@ -56,8 +57,11 @@ The following is an example of a workflow step to upload test results after your To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [Test Frameworks](/flaky-tests/get-started/frameworks) docs. - -```yaml XML + + + + +```yaml trigger: - main @@ -68,15 +72,20 @@ steps: # ... Omitted steps - script: | - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz - chmod +x trunk-analytics-cli - ./trunk-analytics-cli upload --junit-paths "" \ + curl -fsSLO --retry 3 https://trunk.io/releases/trunk + chmod +x ./trunk + ./trunk flakytests upload --junit-paths "" \ --org-url-slug $(TRUNK_ORG_SLUG) \ --token $(TRUNK_TOKEN) condition: always() # this should always run displayName: Upload test results to Trunk.io ``` -```yaml Bazel + + + + + +```yaml trigger: - main @@ -87,15 +96,20 @@ steps: # ... Omitted steps - script: | - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz - chmod +x trunk-analytics-cli - ./trunk-analytics-cli upload --bazel-bep-path \ + curl -fsSLO --retry 3 https://trunk.io/releases/trunk + chmod +x ./trunk + ./trunk flakytests upload --bazel-bep-path \ --org-url-slug $(TRUNK_ORG_SLUG) \ --token $(TRUNK_TOKEN) condition: always() # this should always run displayName: Upload test results to Trunk.io ``` -```yaml XCode + + + + + +```yaml trigger: - main @@ -106,15 +120,20 @@ steps: # ... Omitted steps - script: | - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz - chmod +x trunk-analytics-cli - ./trunk-analytics-cli upload --xcresult-path \ + curl -fsSLO --retry 3 https://trunk.io/releases/trunk + chmod +x ./trunk + ./trunk flakytests upload --xcresult-path \ --org-url-slug $(TRUNK_ORG_SLUG) \ --token $(TRUNK_TOKEN) condition: always() # this should always run displayName: Upload test results to Trunk.io ``` -```yaml RSpec plugin + + + + + +```yaml trigger: - main @@ -130,14 +149,12 @@ steps: bundle exec rspec displayName: Run RSpec tests and upload results to Trunk.io ``` - + - -The examples above use the Linux x64 binary. If your CI runs on a different platform, see the [Trunk Analytics CLI](../../reference/cli-reference#manual-download) page for all available platform downloads. - + -See the [uploader.md](../../reference/cli-reference.md "mention") for all available command line arguments and usage. +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. #### Stale files @@ -145,9 +162,8 @@ Ensure you report every test run in CI and **clean up stale files** produced by [Learn more about cleaning up artifacts in Azure DevOps Pipelines](https://learn.microsoft.com/en-us/azure/devops/pipelines/repos/pipeline-options-for-git?view=azure-devops\&tabs=yaml#clean-the-local-repo-on-the-agent) - + **Have questions?** Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. - - + diff --git a/flaky-tests/get-started/ci-providers/bitbucket-pipelines.mdx b/flaky-tests/get-started/ci-providers/bitbucket-pipelines.mdx index e8b3a5c..0d225bf 100644 --- a/flaky-tests/get-started/ci-providers/bitbucket-pipelines.mdx +++ b/flaky-tests/get-started/ci-providers/bitbucket-pipelines.mdx @@ -1,15 +1,16 @@ --- title: "BitBucket Pipelines" +description: "Trunk Flaky Tests integrates with your CI by adding a step in your BitBucket Pipelines to upload tests with the Trunk Uploader CLI." --- -Trunk Flaky Tests integrates with your CI by adding a step in your BitBucket Pipelines to upload tests with the [Trunk Analytics CLI](../../reference/cli-reference). +Trunk Flaky Tests integrates with your CI by adding a step in your BitBucket Pipelines to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). **Not using GitHub for source control?** -Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. -Before you start on these steps, see the [Test Frameworks](../frameworks/) docs for instructions on producing a Trunk-compatible output for your test framework. +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. ### Checklist @@ -20,7 +21,7 @@ By the end of this guide, you should achieve the following. * [ ] Configure your CI to upload to Trunk * [ ] Validate your uploads in Trunk -After completing these checklist items, you'll be integrated with Trunk. +After completing these checklist items, you'll be integrated with Trunk. ### Trunk Organization Slug and Token @@ -32,7 +33,7 @@ You can find your organization slug under **Settings > Organization > Manage > O #### Trunk Token -You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your _organization token_, not your project/repo token. +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. ### Add the Trunk Token as a Secret @@ -40,15 +41,15 @@ Store the Trunk slug and API token obtained in the previous step in your BitBuck ### Upload to Trunk -Add an `after-script` step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your [stable branches](../../detection/), for example, `main`, `master`, or `develop`. +Add an `after-script` step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your [stable branches](/flaky-tests/detection), for example, `main`, `master`, or `develop`. - -It is important to upload test results from CI runs on [**stable branches**](../../detection/), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. -[Learn more about detection](../../detection/) - +[Learn more about detection](/flaky-tests/detection) + #### Add Uploader to Testing Pipelines @@ -56,8 +57,12 @@ The following is an example of a workflow step to upload test results after your To find out how to produce the JUnit XML files the uploader needs, see the instructions for your test framework in the [Test Frameworks](/flaky-tests/get-started/frameworks) docs. - -```yaml XML + + + + + +```yaml image: pipelines: @@ -69,15 +74,21 @@ pipelines: script: - after-script: - # trunk upload runs even if the test script fails + # This ensures trunk upload runs even if the test script fails - | - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz - chmod +x trunk-analytics-cli - ./trunk-analytics-cli upload --junit-paths "**/junit.xml" \ + curl -fsSLO --retry 3 https://trunk.io/releases/trunk + chmod +x ./trunk + ./trunk flakytests upload --junit-paths "**/junit.xml" \ --org-url-slug $TRUNK_ORG_SLUG \ --token $TRUNK_TOKEN ``` -```yaml Bazel + + + + + + +```yaml image: pipelines: @@ -89,15 +100,20 @@ pipelines: script: - after-script: - # trunk upload runs even if the test script fails + # This ensures trunk upload runs even if the test script fails - | - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz - chmod +x trunk-analytics-cli - ./trunk-analytics-cli upload --bazel-bep-path \ + curl -fsSLO --retry 3 https://trunk.io/releases/trunk + chmod +x ./trunk + ./trunk flakytests upload --bazel-bep-path \ --org-url-slug $TRUNK_ORG_SLUG \ --token $TRUNK_TOKEN ``` -```yaml XCode + + + + + +```yaml image: pipelines: @@ -109,15 +125,20 @@ pipelines: script: - after-script: - # trunk upload runs even if the test script fails + # This ensures trunk upload runs even if the test script fails - | - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz - chmod +x trunk-analytics-cli - ./trunk-analytics-cli upload --xcresult-path \ + curl -fsSLO --retry 3 https://trunk.io/releases/trunk + chmod +x ./trunk + ./trunk flakytests upload --xcresult-path \ --org-url-slug $TRUNK_ORG_SLUG \ --token $TRUNK_TOKEN ``` -```yaml RSpec plugin + + + + + +```yaml image: pipelines: @@ -132,14 +153,12 @@ pipelines: TRUNK_API_TOKEN=$TRUNK_TOKEN \ bundle exec rspec ``` - + - -The examples above use the Linux x64 binary. If your CI runs on a different platform, see the [Trunk Analytics CLI](../../reference/cli-reference#manual-download) page for all available platform downloads. - + -See the [uploader.md](../../reference/cli-reference.md "mention") for all available command line arguments and usage. +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. #### Stale files @@ -147,8 +166,8 @@ Ensure you report every test run in CI and **clean up stale files** produced by You can do this by omitting the `artifacts` definitions in the test steps of your configuration. [Learn more about artifacts in BitBucket Pipelines](https://support.atlassian.com/bitbucket-cloud/docs/use-artifacts-in-steps/). - + **Have questions?** Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. - + diff --git a/flaky-tests/get-started/ci-providers/buildkite.mdx b/flaky-tests/get-started/ci-providers/buildkite.mdx index c38b722..6072276 100644 --- a/flaky-tests/get-started/ci-providers/buildkite.mdx +++ b/flaky-tests/get-started/ci-providers/buildkite.mdx @@ -2,15 +2,15 @@ title: "Buildkite" description: "Configure Buildkite jobs to upload test results to Trunk Flaky Tests" --- -Trunk Flaky Tests integrates with your CI by adding a step in your Buildkite Pipelines to upload tests with the [Trunk Analytics CLI](../../reference/cli-reference). +Trunk Flaky Tests integrates with your CI by adding a step in your Buildkite Pipelines to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). **Not using GitHub for source control?** -Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. -Before you start on these steps, see the [Test Frameworks](../frameworks/) docs for instructions on producing a Trunk-compatible output for your test framework. +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. ### Checklist @@ -21,7 +21,7 @@ By the end of this guide, you should achieve the following. * [ ] Configure your CI to upload to Trunk * [ ] Validate your uploads in Trunk -After completing these checklist items, you'll be integrated with Trunk. +After completing these checklist items, you'll be integrated with Trunk. ### Trunk Organization Slug and Token @@ -33,7 +33,7 @@ You can find your organization slug under **Settings > Organization > Manage > O #### Trunk Token -You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your _organization token_, not your project/repo token. +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. ### Add the Trunk Token as a Secret @@ -41,24 +41,27 @@ Store the Trunk slug and API token obtained in the previous step in your as a ne ### Upload to Trunk -Add an `Upload Test Results` step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your [stable branches](../../detection/), for example, `main`, `master`, or `develop`. +Add an `Upload Test Results` step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your [stable branches](/flaky-tests/detection), for example, `main`, `master`, or `develop`. - -It is important to upload test results from CI runs on [**stable branches**](../../detection/), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. -[Learn more about detection](../../detection/) - +[Learn more about detection](/flaky-tests/detection) + #### Example Buildkite Pipeline The following is an example of a Buildkite step to upload test results after your tests run. Note: you must either run `trunk` from the repo root when uploading test results or pass a `--repo-root` argument. -To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [frameworks](../frameworks/ "mention") docs. +To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [frameworks](/flaky-tests/get-started/frameworks "mention") docs. + + + + - -```yaml XML +```yaml steps: - label: Run Tests command: ... @@ -66,13 +69,18 @@ steps: - label: Upload Test Results to Trunk.io commands: - - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli - - ./trunk-analytics-cli upload --junit-paths "" --org-url-slug --token $TRUNK_TOKEN + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --junit-paths "" --org-url-slug --token $TRUNK_TOKEN key: upload depends_on: - tests ``` -```yaml Bazel + + + + + +```yaml steps: - label: Run Tests command: ... @@ -80,13 +88,18 @@ steps: - label: Upload Test Results to Trunk.io commands: - - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli - - ./trunk-analytics-cli upload --bazel-bep-path --org-url-slug --token $TRUNK_TOKEN + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --bazel-bep-path --org-url-slug --token $TRUNK_TOKEN key: upload depends_on: - tests ``` -```yaml XCode + + + + + +```yaml steps: - label: Run Tests command: ... @@ -94,33 +107,36 @@ steps: - label: Upload Test Results to Trunk.io commands: - - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli - - ./trunk-analytics-cli upload --xcresult-path --org-url-slug --token $TRUNK_TOKEN + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --xcresult-path --org-url-slug --token $TRUNK_TOKEN key: upload depends_on: - tests ``` -```yaml RSpec plugin + + + + + +```yaml steps: - label: Run Tests and Upload Results to Trunk.io command: TRUNK_ORG_URL_SLUG=$TRUNK_ORG_SLUG TRUNK_API_TOKEN=$TRUNK_TOKEN bundle exec rspec key: tests ``` - + - -The examples above use the Linux x64 binary. If your CI runs on a different platform, see the [Trunk Analytics CLI](../../reference/cli-reference#manual-download) page for all available platform downloads. - + -See the [uploader.md](../../reference/cli-reference.md "mention") for all available command line arguments and usage. +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. #### Stale files Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. - + **Have questions?** Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. - + diff --git a/flaky-tests/get-started/ci-providers/circleci.mdx b/flaky-tests/get-started/ci-providers/circleci.mdx index 9b53730..3c461cf 100644 --- a/flaky-tests/get-started/ci-providers/circleci.mdx +++ b/flaky-tests/get-started/ci-providers/circleci.mdx @@ -2,15 +2,15 @@ title: "CircleCI" description: "Configure CircleCI jobs to upload test results to Trunk Flaky Tests" --- -Trunk Flaky Tests integrates with your CI by adding a step in your CircleCI Pipelines to upload tests with the [Trunk Analytics CLI](../../reference/cli-reference). +Trunk Flaky Tests integrates with your CI by adding a step in your CircleCI Pipelines to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). **Not using GitHub for source control?** -Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. -Before you start on these steps, see the [Test Frameworks](../frameworks/) docs for instructions on producing a Trunk-compatible output for your test framework. +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. ### Checklist @@ -21,7 +21,7 @@ By the end of this guide, you should achieve the following. * [ ] Configure your CI to upload to Trunk * [ ] Validate your uploads in Trunk -After completing these checklist items, you'll be integrated with Trunk. +After completing these checklist items, you'll be integrated with Trunk. ### Trunk Organization Slug and Token @@ -33,7 +33,7 @@ You can find your organization slug under **Settings > Organization > Manage > O #### Trunk Token -You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your _organization token_, not your project/repo token. +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. ### Add the Trunk Token as a Secret @@ -41,19 +41,15 @@ Store your Trunk slug and API token in your CircleCI project settings under **En ### Upload to Trunk -Add an `Upload Test Results` step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your main or [stable branches](../../detection/), for example, `main`, `master`, or `develop`. +Add an `Upload Test Results` step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your main or [stable branches](/flaky-tests/detection), for example, `main`, `master`, or `develop`. - -The Trunk Analytics CLI automatically detects PR context from CircleCI environment variables, including the pull request number. No additional configuration is needed to associate test uploads with the correct PR in Trunk. - - - -It is important to upload test results from CI runs on [**stable branches**](../../detection/), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. -[Learn more about detection](../../detection/) - +[Learn more about detection](/flaky-tests/detection) + #### Example CircleCI workflow @@ -61,8 +57,11 @@ The following is an example of a workflow step to upload test results after your To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [Test Frameworks](/flaky-tests/get-started/frameworks) docs. - -```yaml XML + + + + +```yaml jobs: test-node: # Install node dependencies and run tests @@ -75,10 +74,15 @@ jobs: - run: name: Upload Test Results to Trunk.io command: | - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli - ./trunk-analytics-cli upload --junit-paths "**/junit.xml" --org-url-slug --token ${TRUNK_TOKEN} + curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + ./trunk flakytests upload --junit-paths "**/junit.xml" --org-url-slug --token ${TRUNK_TOKEN} ``` -```yaml Bazel + + + + + +```yaml jobs: test-node: # Install node dependencies and run tests @@ -91,10 +95,15 @@ jobs: - run: name: Upload Test Results to Trunk.io command: | - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli - ./trunk-analytics-cli upload --bazel-bep-path --org-url-slug --token ${TRUNK_TOKEN} + curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + ./trunk flakytests upload --bazel-bep-path --org-url-slug --token ${TRUNK_TOKEN} ``` -```yaml XCode + + + + + +```yaml jobs: test-node: # Install node dependencies and run tests @@ -107,10 +116,15 @@ jobs: - run: name: Upload Test Results to Trunk.io command: | - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli - ./trunk-analytics-cli upload --xcresult-path --org-url-slug --token ${TRUNK_TOKEN} + curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + ./trunk flakytests upload --xcresult-path --org-url-slug --token ${TRUNK_TOKEN} ``` -```yaml RSpec plugin + + + + + +```yaml jobs: test-node: # Install node dependencies and run tests @@ -120,20 +134,19 @@ jobs: name: Run Tests and Upload Results to Trunk.io command: TRUNK_ORG_URL_SLUG=${TRUNK_ORG_SLUG} TRUNK_API_TOKEN=${TRUNK_TOKEN} bundle exec rspec ``` - - -The examples above use the Linux x64 binary. If your CI runs on a different platform, see the [Trunk Analytics CLI](../../reference/cli-reference#manual-download) page for all available platform downloads. - + + + -See the [Uploader CLI Reference](../../reference/cli-reference) for all available command line arguments and usage. +See the [Uploader CLI Reference](/flaky-tests/uploader) for all available command line arguments and usage. #### Stale files Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. - + **Have questions?** Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. - + diff --git a/flaky-tests/get-started/ci-providers/droneci.mdx b/flaky-tests/get-started/ci-providers/droneci.mdx index 3cdfbee..a4f26d2 100644 --- a/flaky-tests/get-started/ci-providers/droneci.mdx +++ b/flaky-tests/get-started/ci-providers/droneci.mdx @@ -2,15 +2,15 @@ title: "Drone CI" description: "Configure Flaky Tests using Drone CI" --- -Trunk Flaky Tests integrates with your CI by adding a step in your Drone CI Pipelines to upload tests with the [Trunk Analytics CLI](../../reference/cli-reference). +Trunk Flaky Tests integrates with your CI by adding a step in your Drone CI Pipelines to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). **Not using GitHub for source control?** -Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. -Before you start on these steps, see the [Test Frameworks](../frameworks/) docs for instructions on producing a Trunk-compatible output for your test framework. +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. ### Checklist @@ -21,7 +21,7 @@ By the end of this guide, you should achieve the following. * [ ] Configure your CI to upload to Trunk * [ ] Validate your uploads in Trunk -After completing these checklist items, you'll be integrated with Trunk. +After completing these checklist items, you'll be integrated with Trunk. ### Trunk Organization Slug and Token @@ -33,7 +33,7 @@ You can find your organization slug under **Settings > Organization > Manage > O #### Trunk Token -You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your _organization token_, not your project/repo token. +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. ### Add the Trunk Token as a Secret @@ -41,15 +41,15 @@ Store your Trunk slug and API token in your Drone CI project settings as new var ### Upload to Trunk -Add an upload step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your [stable branches](../../detection/), for example, `main`, `master`, or `develop`. +Add an upload step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your [stable branches](/flaky-tests/detection), for example, `main`, `master`, or `develop`. - -It is important to upload test results from CI runs on [**stable branches**](../../detection/), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. -[Learn more about detection](../../detection/) - +[Learn more about detection](/flaky-tests/detection) + #### Add Uploader to Testing Pipelines @@ -57,8 +57,11 @@ The following is an example of a workflow step to upload test results after your To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [Test Frameworks](/flaky-tests/get-started/frameworks) docs. - -```yaml XML + + + + +```yaml kind: pipeline type: docker name: test @@ -71,13 +74,18 @@ steps: environment: TRUNK_ORG_SLUG: from_secret: TRUNK_ORG_SLUG - TRUNK_TOKEN: - from_secret: TRUNK_TOKEN + TRUNK_API_TOKEN: + from_secret: TRUNK_API_TOKEN commands: - - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli - - ./trunk-analytics-cli upload --junit-paths --org-url-slug --token $TRUNK_TOKEN + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --junit-paths --org-url-slug --token $TRUNK_TOKEN ``` -```yaml Bazel + + + + + +```yaml kind: pipeline type: docker name: test @@ -90,13 +98,18 @@ steps: environment: TRUNK_ORG_SLUG: from_secret: TRUNK_ORG_SLUG - TRUNK_TOKEN: - from_secret: TRUNK_TOKEN + TRUNK_API_TOKEN: + from_secret: TRUNK_API_TOKEN commands: - - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli - - ./trunk-analytics-cli upload --bazel-bep-path --org-url-slug --token $TRUNK_TOKEN + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --bazel-bep-path --org-url-slug --token $TRUNK_TOKEN ``` -```yaml XCode + + + + + +```yaml kind: pipeline type: docker name: test @@ -109,13 +122,18 @@ steps: environment: TRUNK_ORG_SLUG: from_secret: TRUNK_ORG_SLUG - TRUNK_TOKEN: - from_secret: TRUNK_TOKEN + TRUNK_API_TOKEN: + from_secret: TRUNK_API_TOKEN commands: - - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli - - ./trunk-analytics-cli upload --xcresult-path --org-url-slug --token $TRUNK_TOKEN + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --xcresult-path --org-url-slug --token $TRUNK_TOKEN ``` -```yaml RSpec plugin + + + + + +```yaml kind: pipeline type: docker name: test @@ -130,21 +148,19 @@ steps: commands: - TRUNK_ORG_URL_SLUG=$TRUNK_ORG_SLUG TRUNK_API_TOKEN=$TRUNK_TOKEN bundle exec rspec ``` - + - -The examples above use the Linux x64 binary. If your CI runs on a different platform, see the [Trunk Analytics CLI](../../reference/cli-reference#manual-download) page for all available platform downloads. - + -See the [uploader.md](../../reference/cli-reference.md "mention") for all available command line arguments and usage. +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. #### Stale files Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. - + **Have questions?** Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. - + diff --git a/flaky-tests/get-started/ci-providers/github-actions.mdx b/flaky-tests/get-started/ci-providers/github-actions.mdx index 31abbe8..3da8e59 100644 --- a/flaky-tests/get-started/ci-providers/github-actions.mdx +++ b/flaky-tests/get-started/ci-providers/github-actions.mdx @@ -2,9 +2,11 @@ title: "GitHub Actions" description: "Configure Flaky Tests detection using a GitHub Action" --- -Trunk Flaky Tests integrates with your CI by adding a step in your GitHub Action workflow to upload tests with the [Trunk Analytics CLI](../../reference/cli-reference). +Before you start these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing Trunk-compatible reports for your test runner. -Before you start these steps, see the [Test Frameworks](../frameworks/) docs for instructions on producing Trunk-compatible reports for your test framework. +Trunk Flaky Tests integrates with your CI by adding a step in your GitHub Action workflow to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). + +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. ### Checklist @@ -23,36 +25,39 @@ Before setting up uploads to Trunk, you must sign in to [app.trunk.io](https://a #### Trunk Slug -You can find your organization slug under **Settings > Organization > Manage > Organization Name > Slug**. You'll save this as a variable in CI in a later step. +You can find your organization slug under **Settings > Organization > General > Organization > Name**. You'll save this as a variable in CI in a later step. #### Trunk Token -You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your _organization token_, not your project/repo token. +You can find your token under **Settings > Organization > General > API > API Key**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. ### Add Your Trunk Token and Organization Slug as Secrets -Store the Trunk slug and API token obtained in the previous step in your repo as [GitHub secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions) named `TRUNK_ORG_SLUG` and `TRUNK_TOKEN` respectively. +Store the Trunk slug and API token obtained in the previous step in your repo as [GitHub secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions) named `TRUNK_ORG_URL_SLUG` and `TRUNK_API_TOKEN` respectively. ### Upload to Trunk -Add an `Upload Test Results` step after running tests in each of your CI jobs that run tests. This should minimally include all jobs that run on pull requests, as well as jobs that run on your main or [stable branches](../../detection/), for example, `main`, `master`, or `develop`. +Add an `Upload Test Results` step after running tests in each of your CI jobs that run tests. This should minimally include all jobs that run on pull requests, as well as jobs that run on your main or [stable branches](/flaky-tests/detection), for example, `main`, `master`, or `develop`. - -It is important to upload test results from CI runs on [**stable branches**](../../detection/), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. -[Learn more about detection](../../detection/) - +[Learn more about detection](/flaky-tests/detection) + #### Example GitHub Actions Workflow The following is an example of a GitHub Actions workflow step to upload test results after your tests using Trunk's [**Analytics Uploader Action**](https://github.com/trunk-io/analytics-uploader). -To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [**Test Frameworks**](../frameworks/) docs. +To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [**Test Frameworks**](/flaky-tests/get-started/frameworks) docs. + + - -```yaml JUnit XML + + +```yaml jobs: test: name: Upload Tests @@ -71,7 +76,12 @@ jobs: org-slug: token: ${{ secrets.TRUNK_TOKEN }} ``` -```yaml XCResult Path + + + + + +```yaml jobs: test: name: Upload Tests @@ -90,7 +100,12 @@ jobs: org-slug: token: ${{ secrets.TRUNK_TOKEN }} ``` -```yaml Bazel BEP JSON + + + + + +```yaml jobs: test: name: Upload Tests @@ -109,7 +124,12 @@ jobs: org-slug: token: ${{ secrets.TRUNK_TOKEN }} ``` -```yaml RSpec plugin + + + + + +```yaml jobs: test: name: Run and Upload Tests @@ -120,7 +140,10 @@ jobs: run: TRUNK_ORG_URL_SLUG=${{ secrets.TRUNK_ORG_SLUG }} TRUNK_API_TOKEN=${{ secrets.TRUNK_TOKEN }} bundle exec rspec ``` - + + + + See the [GitHub Actions Reference page](https://github.com/trunk-io/analytics-uploader) for all available CLI arguments and usage. @@ -129,7 +152,9 @@ See the [GitHub Actions Reference page](https://github.com/trunk-io/analytics-up You can quarantine flaky tests by wrapping the test command or as a follow-up step. + + Using the Trunk Analytics Uploader Action in your GitHub Actions Workflow files, may need modifications to your workflow files to support quarantining. @@ -138,86 +163,97 @@ If you upload your test results as a second step after you run your tests, **you Here's an example file. -```yaml highlight={12,13} - name: Run Tests And Upload Results - on: - workflow_dispatch: - jobs: - upload-test-results: - runs-on: ubuntu-latest - timeout-minutes: 60 - steps: - - name: Run Tests - id: unit_tests - shell: bash - run: # command to run tests goes here - continue-on-error: true # allow CI job to continue to upload step on errors - - - name: Upload test results - if: always() - uses: trunk-io/analytics-uploader@v1 - with: - junit-paths: - org-slug: my-trunk-org-slug - token: ${{ secrets.TRUNK_TOKEN }} + +```yaml +name: Run Tests And Upload Results +on: + workflow_dispatch: +jobs: + upload-test-results: + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - name: Run Tests + id: unit_tests + shell: bash + run: # command to run tests goes here + continue-on-error: true # ensure CI job continues to upload step on errors + + - name: Upload test results + if: always() + uses: trunk-io/analytics-uploader@v1 + with: + junit-paths: + org-slug: my-trunk-org-slug + token: ${{ secrets.TRUNK_API_TOKEN }} ``` + If you want to run the test command and upload in a single step, the test command must be **run via the Analytics Uploader** through the `run: ` parameter. This will override the response code of the test command. Make sure to set `continue-on-error: false` so un-quarantined tests are blocking. -```yaml highlight={16} - name: Run Tests And Upload Results - on: - workflow_dispatch: - jobs: - upload-test-results: - runs-on: ubuntu-latest - timeout-minutes: 60 - steps: - - name: Checkout - uses: actions/checkout@v3 - - - name: Run tests and upload results - uses: trunk-io/analytics-uploader@v1 - with: - junit-paths: - run: # command to run tests goes here - org-slug: my-trunk-org-slug - token: ${{ secrets.TRUNK_TOKEN }} + +```yaml +name: Run Tests And Upload Results +on: + workflow_dispatch: +jobs: + upload-test-results: + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Run tests and upload results + uses: trunk-io/analytics-uploader@v1 + with: + junit-paths: + run: # command to run tests goes here + org-slug: my-trunk-org-slug + token: ${{ secrets.TRUNK_API_TOKEN }} ``` + + - + + + **Using Flaky Tests as a separate step** -If you upload your test results as a second step after you run your tests, you need to make sure your test step **continues on errors** so the upload step that's run after can quarantine failed tests. +If you upload your test results as a second step after you run your tests, you need to ensure your test step **continues on errors** so the upload step that's run after can quarantine failed tests. -When quarantining is enabled, the `trunk-analytics-cli upload` command will **return an error** if there are unquarantined failures and return a status code 0 if all tests are quarantined. +When quarantining is enabled, the `flakytests upload` command will **return an error** if there are unquarantined failures and return a status code 0 if all tests are quarantined. -```sh + +```bash || true # doesn't fail job on failure | - ./trunk-analytics-cli upload \ + ./trunk flakytests upload \ --org-url-slug $TRUNK_ORG_SLUG \ - --token $TRUNK_TOKEN \ + --token $TRUNK_API_TOKEN \ --junit-paths $JUNIT_PATH ``` + **Using Flaky Tests as a single step** -You can also wrap the test command with the Trunk Analytics CLI. When wrapping the command with the Trunk Analytics CLI, if there are unquarantined tests, the command will return an error. If there are no unquarantined tests, the command will return a status code `0`. +You can also wrap the test command with the Trunk CLI. When wrapping the command with the Trunk CLI, if there are unquarantined tests, the command will return an error. If there are no unquarantined tests, the command will return a status code `0`. ```bash -./trunk-analytics-cli test \ +./trunk flakytests test \ --org-url-slug \ - --token $TRUNK_TOKEN \ + --token $TRUNK_API_TOKEN \ --junit-paths $JUNIT_PATH \ --allow-empty-test-results \ ``` + + #### Stale files @@ -238,7 +274,8 @@ If you want **direct links to individual job logs** instead of the workflow run, 1. **Add the job ID extraction step** to your workflow using a community action: -```yaml highlight={9-16} + +```yaml jobs: run_tests: runs-on: ubuntu-latest @@ -257,9 +294,11 @@ jobs: job-name: Run Tests # Must match the job 'name' above ``` + 2. **Pass the job URL** when uploading test results: -```yaml highlight={13-14} + +```yaml - name: Run Tests id: unit_tests run: @@ -271,11 +310,12 @@ jobs: with: junit-paths: org-slug: my-trunk-org-slug - token: ${{ secrets.TRUNK_TOKEN }} + token: ${{ secrets.TRUNK_API_TOKEN }} env: JOB_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}/job/${{ steps.get-job-id.outputs.jobId }} ``` + #### Complete Example Here's a full workflow example with direct job linking: @@ -312,7 +352,7 @@ jobs: with: junit-paths: junit.xml org-slug: my-trunk-org-slug - token: ${{ secrets.TRUNK_TOKEN }} + token: ${{ secrets.TRUNK_API_TOKEN }} env: JOB_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}/job/${{ steps.get-job-id.outputs.jobId }} ``` diff --git a/flaky-tests/get-started/ci-providers/gitlab.mdx b/flaky-tests/get-started/ci-providers/gitlab.mdx index 241d038..ef99dff 100644 --- a/flaky-tests/get-started/ci-providers/gitlab.mdx +++ b/flaky-tests/get-started/ci-providers/gitlab.mdx @@ -2,15 +2,15 @@ title: "GitLab" description: "Configure Flaky Tests using GitLab CI" --- -Trunk Flaky Tests integrates with your CI by adding a step in your GitLab CI/CD pipelines to upload tests with the [Trunk Analytics CLI](../../reference/cli-reference). +Trunk Flaky Tests integrates with your CI by adding a step in your GitLab CI/CD pipelines to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). **Not using GitHub for source control?** -Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. -Before you start on these steps, see the [Test Frameworks](../frameworks/) docs for instructions on producing a Trunk-compatible output for your test framework. +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. ### Checklist @@ -21,7 +21,7 @@ By the end of this guide, you should achieve the following. * [ ] Configure your CI to upload to Trunk * [ ] Validate your uploads in Trunk -After completing these checklist items, you'll be integrated with Trunk. +After completing these checklist items, you'll be integrated with Trunk. ### Trunk Organization Slug and Token @@ -33,7 +33,7 @@ You can find your organization slug under **Settings > Organization > Manage > O #### Trunk Token -You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your _organization token_, not your project/repo token. +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. ### Add the Trunk Token as a Secret @@ -41,26 +41,29 @@ Store the Trunk slug and API token obtained in the previous step in your GitLab ### Upload to Trunk -Add an `upload_test_results` step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your main or [stable branches](../../detection/), for example, `main`, `master`, or `develop`. +Add an `upload_test_results` step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your main or [stable branches](/flaky-tests/detection), for example, `main`, `master`, or `develop`. - -It is important to upload test results from CI runs on [**stable branches**](../../detection/), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. -[Learn more about detection](../../detection/) - +[Learn more about detection](/flaky-tests/detection) + #### Example GitLab Pipeline The following is an example of a GitLab pipeline step to upload test results after your tests run. Note: you must either run `trunk` from the repo root when uploading test results or pass a `--repo-root` argument. -To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [frameworks](../frameworks/ "mention") docs. +To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [frameworks](/flaky-tests/get-started/frameworks "mention") docs. To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [Test Frameworks](/flaky-tests/get-started/frameworks) docs. - -```yaml XML + + + + +```yaml image: node:latest stages: # List of stages for jobs, and their order of execution @@ -74,10 +77,15 @@ unit_test_job: # This job runs the tests upload_test_results: # This job uploads tests results run in the last stage to Trunk.io stage: flaky-tests script: - - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli - - ./trunk-analytics-cli upload --junit-paths "" --org-url-slug --token $TRUNK_TOKEN + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --junit-paths "" --org-url-slug --token $TRUNK_TOKEN ``` -```yaml Bazel + + + + + +```yaml image: node:latest stages: # List of stages for jobs, and their order of execution @@ -91,10 +99,15 @@ unit_test_job: # This job runs the tests upload_test_results: # This job uploads tests results run in the last stage to Trunk.io stage: flaky-tests script: - - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli - - ./trunk-analytics-cli upload --bazel-bep-path --org-url-slug --token $TRUNK_TOKEN + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --bazel-bep-path --org-url-slug --token $TRUNK_TOKEN ``` -```yaml XCode + + + + + +```yaml image: node:latest stages: # List of stages for jobs, and their order of execution @@ -108,10 +121,15 @@ unit_test_job: # This job runs the tests upload_test_results: # This job uploads tests results run in the last stage to Trunk.io stage: flaky-tests script: - - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli - - ./trunk-analytics-cli upload --xcresult-path --org-url-slug --token $TRUNK_TOKEN + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --xcresult-path --org-url-slug --token $TRUNK_TOKEN ``` -```yaml RSpec plugin + + + + + +```yaml image: node:latest stages: # List of stages for jobs, and their order of execution @@ -122,21 +140,19 @@ unit_test_job: # This job runs the tests and uploads the results to Trunk.io script: - TRUNK_ORG_URL_SLUG=$TRUNK_ORG_SLUG TRUNK_API_TOKEN=$TRUNK_TOKEN bundle exec rspec ``` - + - -The examples above use the Linux x64 binary. If your CI runs on a different platform, see the [Trunk Analytics CLI](../../reference/cli-reference#manual-download) page for all available platform downloads. - + -See the [uploader.md](../../reference/cli-reference.md "mention") for all available command line arguments and usage. +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. #### Stale files Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. - + **Have questions?** Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. - + diff --git a/flaky-tests/get-started/ci-providers/google-cloud-build.mdx b/flaky-tests/get-started/ci-providers/google-cloud-build.mdx index e31475d..ff1629f 100644 --- a/flaky-tests/get-started/ci-providers/google-cloud-build.mdx +++ b/flaky-tests/get-started/ci-providers/google-cloud-build.mdx @@ -2,15 +2,15 @@ title: "Google Cloud Build" description: "Configure Google Cloud Build to upload test results to Trunk Flaky Tests" --- -Trunk Flaky Tests integrates with your CI by adding a step in your Google Cloud Build configuration to upload tests with the [Trunk Analytics CLI](../../reference/cli-reference). +Trunk Flaky Tests integrates with your CI by adding a step in your Google Cloud Build configuration to upload tests with the [Trunk Analytics CLI](/flaky-tests/uploader). **Not using GitHub for source control?** -Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. -Before you start on these steps, see the [Test Frameworks](../frameworks/) docs for instructions on producing a Trunk-compatible output for your test framework. +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. ### Checklist @@ -23,7 +23,7 @@ By the end of this guide, you should achieve the following. * [ ] Configure your `cloudbuild.yaml` to upload to Trunk * [ ] Validate your uploads in Trunk -After completing these checklist items, you'll be integrated with Trunk. +After completing these checklist items, you'll be integrated with Trunk. ### Trunk Organization Slug and Token @@ -35,7 +35,7 @@ You can find your organization slug under **Settings > Organization > Manage > O #### Trunk Token -You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your _organization token_, not your project/repo token. +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. ### Store the Trunk Token in GCP Secret Manager @@ -63,13 +63,13 @@ Create two Cloud Build triggers for each repository you want to upload test resu 2. Create a trigger for **pull request events** — this uploads test results from PR branches. 3. Create a trigger for **push events** to your stable branch (for example, `main`) — this uploads test results from your stable branch. - -It is important to upload test results from CI runs on [**stable branches**](../../detection#stable-branches), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. -[Learn more about detection](../../detection) - +[Learn more about detection](/flaky-tests/detection) + ### Upload to Trunk @@ -83,23 +83,26 @@ Google Cloud Build does not automatically provide environment variables to build The following environment variables must be passed to the upload step: -| Variable | Description | -|----------|-------------| -| `TRIGGER_NAME` | Name of the Cloud Build trigger (used for CI platform detection) | -| `PROJECT_ID` | GCP project ID (used to construct the CI job link) | -| `BUILD_ID` | Unique ID of the Cloud Build run (used to construct the CI job link) | -| `BRANCH_NAME` | Git branch being built (used for push/stable branch uploads) | -| `_HEAD_BRANCH` | Head branch for PR-triggered builds | -| `_PR_NUMBER` | Pull request number for PR-triggered builds | +| Variable | Description | +| --------------- | ----------------------------------------------------------------- | +| `TRIGGER_NAME` | Name of the Cloud Build trigger (used for CI platform detection) | +| `PROJECT_ID` | GCP project ID (used to construct the CI job link) | +| `BUILD_ID` | Unique ID of the Cloud Build run (used to construct the CI job link) | +| `BRANCH_NAME` | Git branch being built (used for push/stable branch uploads) | +| `_HEAD_BRANCH` | Head branch for PR-triggered builds | +| `_PR_NUMBER` | Pull request number for PR-triggered builds | #### Example `cloudbuild.yaml` The following is an example of a `cloudbuild.yaml` configuration that runs tests and uploads results to Trunk. Note: you must either run `trunk` from the repo root when uploading test results or pass a `--repo-root` argument. -To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [Test Frameworks](../frameworks/ "mention") docs. +To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [frameworks](/flaky-tests/get-started/frameworks "mention") docs. + + - -```yaml XML + + +```yaml steps: - name: gcr.io/cloud-builders/npm id: run-tests @@ -141,7 +144,12 @@ availableSecrets: - versionName: projects/${PROJECT_ID}/secrets//versions/latest env: TRUNK_API_TOKEN ``` -```yaml Bazel + + + + + +```yaml steps: - name: gcr.io/cloud-builders/bazel id: run-tests @@ -179,32 +187,35 @@ availableSecrets: - versionName: projects/${PROJECT_ID}/secrets//versions/latest env: TRUNK_API_TOKEN ``` - - -The examples above use the Linux x64 binary. If your CI runs on a different platform, see the [Trunk Analytics CLI](../../reference/cli-reference#manual-download) page for all available platform downloads. - + + + + + +The examples above use the Linux x64 binary. If your CI runs on a different platform, see the [Trunk Analytics CLI](/flaky-tests/uploader) page for all available platform downloads. + - + **Important:** Set `allowExitCodes: [0, 1]` on your test step so the upload step runs even when tests fail. Without this, Cloud Build stops the pipeline on test failures and your results won't be uploaded. - + Replace the following placeholders in the example: -| Placeholder | Description | -|-------------|-------------| -| `` | Glob pattern matching your JUnit XML test report files (for example, `**/junit.xml`) | -| `` | Your Trunk organization slug | -| `` | The name of the secret you created in GCP Secret Manager | +| Placeholder | Description | +| ------------------- | -------------------------------------------------------------------------------------------- | +| `` | Glob pattern matching your JUnit XML test report files (for example, `**/junit.xml`) | +| `` | Your Trunk organization slug | +| ``| The name of the secret you created in GCP Secret Manager | -See the [Trunk Analytics CLI](../../reference/cli-reference.md "mention") for all available command line arguments and usage. +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. #### Stale files Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. - + **Have questions?** Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. - + diff --git a/flaky-tests/get-started/ci-providers/index.mdx b/flaky-tests/get-started/ci-providers/index.mdx deleted file mode 100644 index 2e281b5..0000000 --- a/flaky-tests/get-started/ci-providers/index.mdx +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: "CI Providers" -mode: wide -description: "You can easily integrate Flaky Tests from any CI Provider" ---- -Trunk Flaky Tests integrates with your CI by adding a `Upload Test Results` step in each of your testing CI jobs via the [Trunk Analytics CLI](../../reference/cli-reference). See the [Test Frameworks](../frameworks/) docs for instructions on producing test reports for your test runner, which Trunk can ingest. - - -**Not using GitHub for source control?** - -Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. - - -### Quickstart - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/flaky-tests/get-started/ci-providers/jenkins.mdx b/flaky-tests/get-started/ci-providers/jenkins.mdx index 1f94dfc..7b7f311 100644 --- a/flaky-tests/get-started/ci-providers/jenkins.mdx +++ b/flaky-tests/get-started/ci-providers/jenkins.mdx @@ -2,15 +2,15 @@ title: "Jenkins" description: "Configure Flaky Tests using Jenkins" --- -Trunk Flaky Tests integrates with your CI by adding a step in your Jenkins Pipelines to upload tests with the [Trunk Analytics CLI](../../reference/cli-reference). +Trunk Flaky Tests integrates with your CI by adding a step in your Jenkins Pipelines to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). **Not using GitHub for source control?** -Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. -Before you start on these steps, see the [Test Frameworks](../frameworks/) docs for instructions on producing a Trunk-compatible output for your test framework. +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. ### Checklist @@ -21,7 +21,7 @@ By the end of this guide, you should achieve the following. * [ ] Configure your CI to upload to Trunk * [ ] Validate your uploads in Trunk -After completing these checklist items, you'll be integrated with Trunk. +After completing these checklist items, you'll be integrated with Trunk. ### Trunk Organization Slug and Token @@ -33,7 +33,7 @@ You can find your organization slug under **Settings > Organization > Manage > O #### Trunk Token -You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your _organization token_, not your project/repo token. +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. ### Add the Trunk Token as a Secret @@ -41,24 +41,27 @@ Store the Trunk slug and API token obtained in the previous step in your Jenkins ### Upload to Trunk -Add an `Upload Test Results` step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your main or [stable branches](../../detection/), for example, `main`, `master`, or `develop`. +Add an `Upload Test Results` step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your main or [stable branches](/flaky-tests/detection), for example, `main`, `master`, or `develop`. - -It is important to upload test results from CI runs on [**stable branches**](../../detection/), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. -[Learn more about detection](../../detection/) - +[Learn more about detection](/flaky-tests/detection) + #### Example Jenkins Pipeline The following is an example of a Jenkins pipeline step to upload test results after your tests run. Note: you must either run `trunk` from the repo root when uploading test results or pass a `--repo-root` argument. -To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [Test Frameworks](../frameworks/) docs +To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [Test Frameworks](/flaky-tests/get-started/frameworks) docs + + + + - -```groovy XML +```groovy pipeline { environment { TRUNK_TOKEN = credentials('TRUNK_TOKEN') @@ -68,13 +71,18 @@ pipeline { ... } stage('Upload Test Results'){ - sh 'curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli' - sh './trunk-analytics-cli upload --junit-paths "" --org-url-slug --token $TRUNK_TOKEN' + sh 'curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk' + sh './trunk flakytests upload --junit-paths "" --org-url-slug --token $TRUNK_TOKEN' } } } ``` -```yaml Bazel + + + + + +```yaml pipeline { environment { TRUNK_TOKEN = credentials('TRUNK_TOKEN') @@ -84,13 +92,18 @@ pipeline { ... } stage('Upload Test Results'){ - sh 'curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli' - sh './trunk-analytics-cli upload --bazel-bep-path --org-url-slug --token $TRUNK_TOKEN' + sh 'curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk' + sh './trunk flakytests upload --bazel-bep-path --org-url-slug --token $TRUNK_TOKEN' } } } ``` -```yaml XCode + + + + + +```yaml pipeline { environment { TRUNK_TOKEN = credentials('TRUNK_TOKEN') @@ -100,13 +113,18 @@ pipeline { ... } stage('Upload Test Results'){ - sh 'curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli' - sh './trunk-analytics-cli upload --xcresult-path --org-url-slug --token $TRUNK_TOKEN' + sh 'curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk' + sh './trunk flakytests upload --xcresult-path --org-url-slug --token $TRUNK_TOKEN' } } } ``` -```groovy RSpec plugin + + + + + +```groovy pipeline { environment { TRUNK_ORG_SLUG = credentials('TRUNK_ORG_SLUG') @@ -119,21 +137,19 @@ pipeline { } } ``` - + - -The examples above use the Linux x64 binary. If your CI runs on a different platform, see the [Trunk Analytics CLI](../../reference/cli-reference#manual-download) page for all available platform downloads. - + -See the [uploader.md](../../reference/cli-reference.md "mention") for all available command line arguments and usage. +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. #### Stale files Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. - + **Have questions?** Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. - + diff --git a/flaky-tests/get-started/ci-providers/otherci.mdx b/flaky-tests/get-started/ci-providers/otherci.mdx index 556c8c9..8613989 100644 --- a/flaky-tests/get-started/ci-providers/otherci.mdx +++ b/flaky-tests/get-started/ci-providers/otherci.mdx @@ -2,15 +2,15 @@ title: "Other CI Providers" description: "Configure Flaky Tests using any CI Provider" --- -Trunk Flaky Tests integrates with your CI provider by adding an upload step in each of your testing CI jobs via the [Trunk Analytics CLI](../../reference/cli-reference). +Trunk Flaky Tests integrates with your CI provider by adding an upload step in each of your testing CI jobs via the [Trunk Uploader CLI](/flaky-tests/uploader). **Not using GitHub for source control?** -Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. -Before you start on these steps, see the [Test Frameworks](../frameworks/) docs for instructions on producing JUnit XML output for your test runner, supported by virtually all test frameworks, which is what Trunk ingests. +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing JUnit XML output for your test runner, supported by virtually all test frameworks, which is what Trunk ingests. ### Checklist @@ -33,7 +33,7 @@ You can find your organization slug under **Settings > Organization > Manage > O #### Trunk Token -You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your _organization token_, not your project/repo token. +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. ### Add the Trunk Token as a Secret @@ -41,26 +41,29 @@ Store the Trunk slug and API token obtained in the previous step in your CI prov ### Upload to Trunk -Add an `Upload Test Results` step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your main or [stable branches](../../detection/), for example,`main`, `master`, or `develop`. +Add an `Upload Test Results` step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your main or [stable branches](/flaky-tests/detection), for example,`main`, `master`, or `develop`. - -It is important to upload test results from CI runs on [**stable branches**](../../detection/), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. -[Learn more about detection](../../detection/) - +[Learn more about detection](/flaky-tests/detection) + #### Example Upload Script The following is an example of a script to upload test results after your tests run. Note: you must either run `trunk` from the repo root when uploading test results or pass a `--repo-root` argument. -To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [frameworks](../frameworks/ "mention") docs. +To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [frameworks](/flaky-tests/get-started/frameworks "mention") docs. + +You can install the Trunk CLI locally like this: -You can install the Trunk Analytics CLI locally like this: + - -```bash Linux (x64) + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -68,7 +71,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -76,7 +84,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -84,7 +97,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -92,22 +110,25 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ``` - -Then, you can validate the results using the `trunk-analytics-cli validate` command like this: + + + + +Then, you can validate the results using the `trunk flakytests validate` command like this: ```bash ./trunk-analytics-cli validate --junit-paths ``` -See the [uploader.md](../../reference/cli-reference.md "mention") for all available command line arguments and usage. +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. #### Environment Variables -Set these environment variables before running `trunk-analytics-cli upload` on unsupported CI systems: +Set these environment variables before running `trunk flaky-tests upload` on unsupported CI systems: -**Config Requirement:** `CUSTOM` must be set to `true` for environment variables to take effect and override the auto-detection of CI. +**Config Requirement:** `CUSTOM` must be set to `true` for environment varaibles to take effect and override the auto-detection of CI. All other variables are optional but recommended. @@ -135,15 +156,15 @@ The `JOB_URL` variable controls where the "Logs" link in Trunk Flaky Tests point * ❌ Link to a dashboard or workflow overview (less helpful for debugging) -**For GitHub Actions users:** While GitHub Actions is auto-detected, you can override the default workflow URL with a direct job URL. See [GitHub Actions - Getting Direct Links to Job Logs](./github-actions#getting-direct-links-to-job-logs) for instructions. +**For GitHub Actions users:** While GitHub Actions is auto-detected, you can override the default workflow URL with a direct job URL. See [GitHub Actions - Getting Direct Links to Job Logs](/flaky-tests/get-started/ci-providers/github-actions) for instructions. #### Stale files Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. - + **Have questions?** Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. - + diff --git a/flaky-tests/get-started/ci-providers/semaphoreci.mdx b/flaky-tests/get-started/ci-providers/semaphoreci.mdx index 6f34f90..6d166b8 100644 --- a/flaky-tests/get-started/ci-providers/semaphoreci.mdx +++ b/flaky-tests/get-started/ci-providers/semaphoreci.mdx @@ -2,15 +2,15 @@ title: "Semaphore CI" description: "Configure Flaky Tests using Semaphore CI" --- -Trunk Flaky Tests integrates with your CI by adding a step in your Semaphore CI Pipeline to upload tests with the [Trunk Analytics CLI](../../reference/cli-reference). +Trunk Flaky Tests integrates with your CI by adding a step in your Semaphore CI Pipeline to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). **Not using GitHub for source control?** -Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. -Before you start on these steps, see the [Test Frameworks](../frameworks/) docs for instructions on producing a Trunk-compatible output for your test framework. +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. ### Checklist @@ -21,7 +21,7 @@ By the end of this guide, you should achieve the following. * [ ] Configure your CI to upload to Trunk * [ ] Validate your uploads in Trunk -After completing these checklist items, you'll be integrated with Trunk. +After completing these checklist items, you'll be integrated with Trunk. ### Trunk Organization Slug and Token @@ -33,7 +33,7 @@ You can find your organization slug under **Settings > Organization > Manage > O #### Trunk Token -You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your _organization token_, not your project/repo token. +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. ### Add the Trunk Token as a Secret @@ -41,15 +41,15 @@ Store the Trunk slug and API token obtained in the previous step in your Semapho ### Upload to Trunk -Add an upload step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your main or [stable branches](../../detection/), for example, `main`, `master`, or `develop`. +Add an upload step after running tests in each of your CI jobs that run tests. This should be minimally all jobs that run on pull requests, as well as from jobs that run on your main or [stable branches](/flaky-tests/detection), for example, `main`, `master`, or `develop`. - -It is important to upload test results from CI runs on [**stable branches**](../../detection/), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. -[Learn more about detection](../../detection/) - +[Learn more about detection](/flaky-tests/detection) + #### Example Semaphore CI Workflow @@ -57,8 +57,11 @@ The following is an example of a Semaphore CI workflow step to upload test resul To find out how to produce the report files the uploader needs, see the instructions for your test framework in the [Test Frameworks](/flaky-tests/get-started/frameworks) docs. - -```yaml XML + + + + +```yaml version: v1.0 name: Semaphore JavaScript Example Pipeline blocks: @@ -84,10 +87,15 @@ blocks: always: commands: # Upload results to trunk.io - - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli - - ./trunk-analytics-cli upload --junit-paths "" --org-url-slug --token ${TRUNK_TOKEN} + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x trunk + - ./trunk flakytests upload --junit-paths "" --org-url-slug --token ${TRUNK_TOKEN} ``` -```yaml Bazel + + + + + +```yaml version: v1.0 name: Semaphore JavaScript Example Pipeline blocks: @@ -113,10 +121,15 @@ blocks: always: commands: # Upload results to trunk.io - - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli - - ./trunk-analytics-cli upload --bazel-bep-path --org-url-slug --token ${TRUNK_TOKEN} + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x trunk + - ./trunk flakytests upload --bazel-bep-path --org-url-slug --token ${TRUNK_TOKEN} ``` -```yaml XCode + + + + + +```yaml version: v1.0 name: Semaphore JavaScript Example Pipeline blocks: @@ -142,10 +155,15 @@ blocks: always: commands: # Upload results to trunk.io - - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli - - ./trunk-analytics-cli upload --xcresult-path --org-url-slug --token ${TRUNK_TOKEN} + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x trunk + - ./trunk flakytests upload --xcresult-path --org-url-slug --token ${TRUNK_TOKEN} +``` + + + + + ``` -``` RSpec plugin version: v1.0 name: Semaphore JavaScript Example Pipeline blocks: @@ -170,21 +188,19 @@ blocks: commands: - TRUNK_ORG_URL_SLUG=${TRUNK_ORG_SLUG} TRUNK_API_TOKEN=${TRUNK_TOKEN} bundle exec rspec ``` - + - -The examples above use the Linux x64 binary. If your CI runs on a different platform, see the [Trunk Analytics CLI](../../reference/cli-reference#manual-download) page for all available platform downloads. - + -See the [uploader.md](../../reference/cli-reference.md "mention") for all available command line arguments and usage. +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. #### Stale files Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. - + **Have questions?** Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. - + diff --git a/flaky-tests/get-started/ci-providers/travisci.mdx b/flaky-tests/get-started/ci-providers/travisci.mdx index e189661..a459103 100644 --- a/flaky-tests/get-started/ci-providers/travisci.mdx +++ b/flaky-tests/get-started/ci-providers/travisci.mdx @@ -2,15 +2,15 @@ title: "Travis CI" description: "Configure Flaky Tests using Travis CI" --- -Trunk Flaky Tests integrates with your CI by adding a step in your Travis CI Pipelines to upload tests with the [Trunk Analytics CLI](../../reference/cli-reference). +Trunk Flaky Tests integrates with your CI by adding a step in your Travis CI Pipelines to upload tests with the [Trunk Uploader CLI](/flaky-tests/uploader). **Not using GitHub for source control?** -Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. +Flaky Test support for source control providers like GitLab and BitBucket is still experimental. If you're using a source control provider other than GitHub, [**contact us**](mailto:support@trunk.io) **to get started**. -Before you start on these steps, see the [Test Frameworks](../frameworks/) docs for instructions on producing a Trunk-compatible output for your test framework. +Before you start on these steps, see the [Test Frameworks](/flaky-tests/get-started/frameworks) docs for instructions on producing a Trunk-compatible output for your test framework. ### Checklist @@ -21,7 +21,7 @@ By the end of this guide, you should achieve the following. * [ ] Configure your CI to upload to Trunk * [ ] Validate your uploads in Trunk -After completing these checklist items, you'll be integrated with Trunk. +After completing these checklist items, you'll be integrated with Trunk. ### Trunk Organization Slug and Token @@ -33,7 +33,7 @@ You can find your organization slug under **Settings > Organization > Manage > O #### Trunk Token -You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your _organization token_, not your project/repo token. +You can find your token under **Settings > Organization > Manage > Organization API Token > View Organization API Token > View**. Since this is a secret, do not leak it publicly. Ensure you get your *organization token*, not your project/repo token. ### Add the Trunk Token as a Secret @@ -41,15 +41,15 @@ Store the Trunk slug and API token obtained in the previous step as new secrets ### Upload to Trunk -Add a script step after running tests in each of your CI jobs that run tests. This should be run on pull requests, as well as from jobs that run on your main or [stable branches](../../detection/), for example, `main`, `master`, or `develop`. +Add a script step after running tests in each of your CI jobs that run tests. This should be run on pull requests, as well as from jobs that run on your main or [stable branches](/flaky-tests/detection), for example, `main`, `master`, or `develop`. - -It is important to upload test results from CI runs on [**stable branches**](../../detection/), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. + +It is important to upload test results from CI runs on [**stable branches**](/flaky-tests/detection), such as `main`, `master`, or `develop`. This will give you a stronger signal about the health of your code and tests. Trunk can also detect test flakes on PR and merge branches. To best detect flaky tests, it is recommended to upload test results from stable, PR, and merge branch CI runs. -[Learn more about detection](../../detection/) - +[Learn more about detection](/flaky-tests/detection) + #### Example Travis CI Workflow @@ -61,35 +61,53 @@ To find out how to produce the report files the uploader needs, see the instruct Note that TravisCI requires a recent version of Linux to use the current NodeJS runtimes. You may need to set the `dist` to `jammy` or later. See this [forum note](https://travis-ci.community/t/node-lib-x86-64-linux-gnu-libm-so-6-version-glibc-2-27-not-found-required-by-node/13655/2) for more details.
- -```yaml XML + + + + +```yaml language: node_js dist: jammy node_js: - 20 script: - - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli - - ./trunk-analytics-cli upload --junit-paths "" --org-url-slug --token $TRUNK_TOKEN + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --junit-paths "" --org-url-slug --token $TRUNK_TOKEN ``` -```yaml Bazel + + + + + +```yaml language: node_js dist: jammy node_js: - 20 script: - - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli - - ./trunk-analytics-cli upload --bazel-bep-path --org-url-slug --token $TRUNK_TOKEN + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --bazel-bep-path --org-url-slug --token $TRUNK_TOKEN ``` -```yaml XCode + + + + + +```yaml language: node_js dist: jammy node_js: - 20 script: - - curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli - - ./trunk-analytics-cli upload --xcresult-path --org-url-slug --token $TRUNK_TOKEN + - curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x ./trunk + - ./trunk flakytests upload --xcresult-path --org-url-slug --token $TRUNK_TOKEN ``` -```yaml RSpec plugin + + + + + +```yaml language: node_js dist: jammy node_js: @@ -97,21 +115,19 @@ node_js: script: - TRUNK_ORG_URL_SLUG=$TRUNK_ORG_SLUG TRUNK_API_TOKEN=$TRUNK_TOKEN bundle exec rspec ``` - + - -The examples above use the Linux x64 binary. If your CI runs on a different platform, see the [Trunk Analytics CLI](../../reference/cli-reference#manual-download) page for all available platform downloads. - + -See the [uploader.md](../../reference/cli-reference.md "mention") for all available command line arguments and usage. +See the [uploader](/flaky-tests/uploader "mention") for all available command line arguments and usage. #### Stale files Ensure you report every test run in CI and **clean up stale files** produced by your test framework. If you're reusing test runners and using a glob like `**/junit.xml` to upload tests, stale files not cleaned up will be included in the current test run, throwing off detection of flakiness. You should clean up all your results files after every upload step. - + **Have questions?** Join us and 1500+ fellow engineers [on Slack](https://slack.trunk.io/) to get help with Trunk. - + diff --git a/flaky-tests/get-started/frameworks.mdx b/flaky-tests/get-started/frameworks.mdx new file mode 100644 index 0000000..d4e25d2 --- /dev/null +++ b/flaky-tests/get-started/frameworks.mdx @@ -0,0 +1,9 @@ +--- +title: "Test frameworks" +description: "Guides for generating Trunk-compatible test results from various test frameworks" +--- +Trunk Flaky Tests uses test results uploaded from your CI jobs to detect flaky tests. + +Follow one of the guides below to configure your test framework to output compatible test reports and integrate with Trunk. + +
Cover image
Androidandroidandroid.png
Bazelbazelbazel.png
Behavebehaveunittest.png
cargo-nextestrustcargo-next.png
Cypresscypresscypress.png
Dart Testdart-testdart.png
Gogotestsumgotestsum.png
GoogleTestgoogletestgoogletest.png
Gradlegradlegradle.png
Jasminejasminejasmine.png
Jestjestjest.png
Karmakarmakarma.png
Kotestkotestkotest.png
Mavenmavenmaven.png
minitestminitestminitest.png
Mochamochamocha.png
Nightwatchnightwatchnightwatch.png
NUnitnunitnunit.png
PESTpest.png
PHPUnitphpunitphpunit.png
Playwrightplaywrightplaywright.png
pytestpytestpytest.png
Robot Frameworkrobot-frameworkrobot.png
RSpecrspecrspec.png
Swift Testingswift-testingswift-testing.png
Testplantestplantestplan-box.png
Vitestvitestvitest.png
XCTestxctestxctest.png
diff --git a/flaky-tests/get-started/frameworks/android.mdx b/flaky-tests/get-started/frameworks/android.mdx index e079e49..e4c0c78 100644 --- a/flaky-tests/get-started/frameworks/android.mdx +++ b/flaky-tests/get-started/frameworks/android.mdx @@ -2,18 +2,18 @@ title: "Android" description: "A guide for generating Trunk-compatible test reports for Android projects" --- -You can automatically [detect and manage flaky tests](../../detection/) in your Android projects by integrating with Trunk. This document explains how to configure Android to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Android projects by integrating with Trunk. This document explains how to configure Android to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./android#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating reports @@ -25,8 +25,11 @@ By default, Android projects will produce a directory with JUnit XML reports und You can customize the report output location in your `build.gradle.kts` or `build.gradle`, for example, writing the reports to `./app/junit-reports`. - -```groovy Groovy + + + + +```groovy android { testOptions { unitTests { @@ -41,7 +44,12 @@ android { } } ``` -```kotlin Kotlin + + + + + +```kotlin android { testOptions { unitTests { @@ -54,24 +62,30 @@ android { } } ``` - + + + + When you configure your CI provider to upload reports in later steps, you will be uploading the reports using a glob such as `./junit-reports/*.xml`. #### Disable Retries -You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](../../quarantining/) feature to stop flaky tests from failing your CI jobs. +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. -If you've enabled retries using a plugin like the [test-retry-gradle-plugin](https://github.com/gradle/test-retry-gradle-plugin), disable it when running tests for Trunk Flaky Tests. +If you've enabled retries using a plugin like the [test-retry-gradle-plugin](https://github.com/gradle/test-retry-gradle-plugin), disable it when running tests for Trunk flaky tests. ### Try It Locally #### The Validate Command -You can validate your test reports using the [Trunk Analytics CLI](../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + - -```bash Linux (x64) + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -80,7 +94,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./apps/junit-reports/*.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -89,7 +108,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./apps/junit-reports/*.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -99,7 +123,11 @@ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./apps/junit-reports/*.xml" ``` -```bash macOS (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -108,7 +136,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./apps/junit-reports/*.xml" ``` - + + + + **This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. @@ -128,64 +159,4 @@ You make an upload to Trunk using the following command: Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/bazel.mdx b/flaky-tests/get-started/frameworks/bazel.mdx index a4395e9..569a6d3 100644 --- a/flaky-tests/get-started/frameworks/bazel.mdx +++ b/flaky-tests/get-started/frameworks/bazel.mdx @@ -2,18 +2,18 @@ title: "Bazel" description: "A guide for generating Trunk-compatible test reports with Bazel" --- -You can automatically [detect and manage flaky tests](../../detection/) in your Bazel projects by integrating with Trunk. This document explains how to configure Bazel to output compatible reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Bazel projects by integrating with Trunk. This document explains how to configure Bazel to output compatible reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./bazel#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports @@ -34,7 +34,7 @@ bazel test \ --build_event_json_file=build_events.json ``` -Trunk can parse the `build_events.json` file to locate your test reports. You will still need to **configure your test runners to output compatible reports**, and you can refer to the guides for [individual test frameworks](./). +Trunk can parse the `build_events.json` file to locate your test reports. You will still need to **configure your test runners to output compatible reports**, and you can refer to the guides for [individual test frameworks](/flaky-tests/get-started/frameworks). #### Build Without the Bytes @@ -54,8 +54,11 @@ Disable retries if you're retrying tests using the `--flaky_test_attempts` comma #### **The Validate Command** - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -64,7 +67,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --bazel-bep-path=build_events.json ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -73,7 +81,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --bazel-bep-path=build_events.json ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -82,7 +95,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --bazel-bep-path=build_events.json ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -91,7 +109,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --bazel-bep-path=build_events.json ``` - + + + + #### Test Upload @@ -109,64 +130,4 @@ You make an upload to Trunk using the following command: Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/behave.mdx b/flaky-tests/get-started/frameworks/behave.mdx index 08309ba..2e6c84e 100644 --- a/flaky-tests/get-started/frameworks/behave.mdx +++ b/flaky-tests/get-started/frameworks/behave.mdx @@ -2,22 +2,22 @@ title: "Behave" description: "A guide for generating Trunk-compatible test reports for Behave" --- -You can automatically [detect and manage flaky tests](../../detection/) in your projects running Behave by integrating with Trunk. This document explains how to configure Behave to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your projects running Behave by integrating with Trunk. This document explains how to configure Behave to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./behave#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports -Trunk detects flaky tests by analyzing test results automatically uploaded from your CI jobs. Behave can output JUnit XML reports which are compatible with Trunk. You can do so with the `--junit` option: +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. Behave can output JUnit XML reports which are compatible with Trunk. You can do so with the `--junit` option: ```sh behave --junit @@ -35,7 +35,7 @@ Behave outputs multiple XML reports under the JUnit directory. You can locate th #### Disable Retries -You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](../../quarantining/) feature to stop flaky tests from failing your CI jobs. +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. You must remove the [rerun formatter](https://behave.readthedocs.io/en/latest/formatters/#formatters) from your `behave.ini` file if it is being used to automatically rerun failed tests. @@ -43,10 +43,13 @@ You must remove the [rerun formatter](https://behave.readthedocs.io/en/latest/fo #### The Validate Command -You can validate your test reports using the [Trunk Analytics CLI](../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -55,7 +58,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -64,7 +72,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -73,7 +86,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -82,7 +100,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" ``` - + + + + **This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. @@ -100,79 +121,10 @@ You make an upload to Trunk using the following command: You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
### Next Steps Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - - +
Atlassian Bambooatlassian-bamboo
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/.gitbook/includes/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/cypress.mdx b/flaky-tests/get-started/frameworks/cypress.mdx index 785ee9e..a6e9d4f 100644 --- a/flaky-tests/get-started/frameworks/cypress.mdx +++ b/flaky-tests/get-started/frameworks/cypress.mdx @@ -2,51 +2,26 @@ title: "Cypress" description: "A guide for generating Trunk-compatible test reports for Cypress tests" --- -You can automatically [detect and manage flaky tests](../../detection/) in your Cypress projects by integrating with Trunk. This document explains how to configure Cypress to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Cypress projects by integrating with Trunk. This document explains how to configure Cypress to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./cypress#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports -Cypress has a built-in Mocha JUnit reporter which outputs XML test reports. However, the built-in reporter does not include file paths in test case elements, which means Trunk cannot match tests to code owners or enable file-based filtering in the dashboard. +Cypress has a built-in XML reporter which you can use to output a Trunk-compatible report. -#### Recommended: Use cypress-junit-plugin for file paths +Update your Cypress config, such as you `cypress.config.js` or `cypress.config.ts` file to output XML reports: -For full functionality including code owner detection and file-based search, use the [`cypress-junit-plugin`](https://github.com/saucelabs/cypress-junit-plugin) reporter. It outputs test cases with the correct nested structure and file path attributes that Trunk expects. - -Install the plugin: - -```bash -npm install --save-dev @saucelabs/cypress-junit-plugin -``` - -Update your Cypress config: - -```javascript title="cypress.config.js" -const { defineConfig } = require('cypress') - -module.exports = defineConfig({ - reporter: '@saucelabs/cypress-junit-plugin', - reporterOptions: { - mochaFile: './junit.xml', - }, -}) -``` - -#### Alternative: Built-in Mocha reporter - -If you don't need file path matching or code owner detection, you can use the built-in reporter. Uploads will still work, but you will see warnings about missing file paths and won't be able to search by file in the dashboard. - -```javascript title="cypress.config.js" +```javascript const { defineConfig } = require('cypress') module.exports = defineConfig({ @@ -58,10 +33,6 @@ module.exports = defineConfig({ }) ``` - -The built-in Mocha JUnit reporter places the `file` attribute on `` elements but not on individual `` elements. Trunk requires file paths on test cases for code owner matching. If you see warnings like "report has test cases with missing file or filepath", switch to the `cypress-junit-plugin` above. - - #### Report File Path The JUnit report location is specified by the `mochaFile` property in your Cypress config. In the above example, the file will be at `./junit.xml`. @@ -72,7 +43,7 @@ You need to disable automatic retries if you previously enabled them. Retries co You can disable retries by setting `retries: 0` in your Cypress config file. -```javascript title="cypress.config.js" +```javascript module.exports = defineConfig({ retries: 0, }) @@ -82,8 +53,11 @@ module.exports = defineConfig({ #### **The Validate Command** - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -92,7 +66,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -101,7 +80,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -110,7 +94,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -119,7 +108,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` - + + + + #### Test Upload @@ -135,74 +127,10 @@ You make an upload to Trunk using the following command: You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
## Next Step Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/dart-test.mdx b/flaky-tests/get-started/frameworks/dart-test.mdx index 7a68542..3097e73 100644 --- a/flaky-tests/get-started/frameworks/dart-test.mdx +++ b/flaky-tests/get-started/frameworks/dart-test.mdx @@ -2,18 +2,18 @@ title: "Dart Test" description: "A guide for generating Trunk-compatible test reports for Dart tests" --- -You can automatically [detect and manage flaky tests](../../detection/) in your Dart projects by integrating with Trunk. This document explains how to configure Dart to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Dart projects by integrating with Trunk. This document explains how to configure Dart to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./dart-test#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports @@ -25,10 +25,12 @@ dart pub global activate junitreport Then, you can convert test reports to a JUnit format by piping it to `tojunit`and piping the output to a file like this: + ```sh dart test --reporter json | tojunit > junit.xml ``` + #### Report File Path The JUnit report is written to the location specified by the `tojunit >` pipe. In the example above, the test results will be written to `./junit.xml`. @@ -37,14 +39,17 @@ The JUnit report is written to the location specified by the `tojunit >` pipe. I You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. -Dart provides retries through the [retry class annotations](https://pub.dev/documentation/test/latest/test/Retry-class.html). Disable retry, use Trunk to [detect](../../detection/)[ flaky tests](../../detection/), and use Quarantining to isolate flaky tests dynamically at run time. +Dart provides retries through the [retry class annotations](https://pub.dev/documentation/test/latest/test/Retry-class.html). Disable retry, use Trunk to [detect](/flaky-tests/detection)[ flaky tests](/flaky-tests/detection), and use Quarantining to isolate flaky tests dynamically at run time. ### Try It Locally #### **The Validate Command** - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -53,7 +58,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -62,7 +72,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -71,7 +86,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -80,7 +100,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` - + + + + #### Test Upload @@ -98,65 +121,4 @@ You make an upload to Trunk using the following command: Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/googletest.mdx b/flaky-tests/get-started/frameworks/googletest.mdx index b16a58b..8e111da 100644 --- a/flaky-tests/get-started/frameworks/googletest.mdx +++ b/flaky-tests/get-started/frameworks/googletest.mdx @@ -2,18 +2,18 @@ title: "GoogleTest" description: "A guide for generating Trunk-compatible test reports for GoogleTest" --- -You can automatically [detect and manage flaky tests](../../detection/) in your GoogleTest projects by integrating with Trunk. This document explains how to configure GoogleTest to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your GoogleTest projects by integrating with Trunk. This document explains how to configure GoogleTest to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./googletest#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports @@ -49,8 +49,11 @@ Omit the[ ](https://docs.pytest.org/en/stable/how-to/cache.html)[`--gtest_repeat #### **The Validate Command** - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -59,7 +62,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -68,7 +76,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -77,7 +90,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -86,7 +104,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` - + + + + #### Test Upload @@ -104,65 +125,4 @@ You make an upload to Trunk using the following command: Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/gotestsum.mdx b/flaky-tests/get-started/frameworks/gotestsum.mdx index 5d9d1ce..797eb8f 100644 --- a/flaky-tests/get-started/frameworks/gotestsum.mdx +++ b/flaky-tests/get-started/frameworks/gotestsum.mdx @@ -2,29 +2,31 @@ title: "Go" description: "A guide for generating Trunk-compatible test reports for Go tests" --- -You can automatically [detect and manage flaky tests](../../detection/) in your Go projects by integrating with Trunk. This document explains how to configure Go to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Go projects by integrating with Trunk. This document explains how to configure Go to output JUnit XML reports that can be uploaded to Trunk for analysis. ### **Why an Extra Step for `go test`?** -The standard Go test runner, `go test`, is excellent for executing tests and providing immediate feedback to developers. However, it does not natively produce test reports in the JUnit XML format that Trunk Flaky Tests requires for ingestion and analysis. Therefore, an additional tool is needed to convert the output of `go test` into this compatible format. This intermediate step allows Trunk to accurately process your test results and identify flaky tests. +The standard Go test runner, `go test`, is excellent for executing tests and providing immediate feedback to developers. However, it does not natively produce test reports in the JUnit XML format that Trunk Flaky Tests requires for ingestion and analysis. Therefore, an additional tool is needed to convert the output of `go test` into this compatible format. This intermediate step ensures that Trunk can accurately process your test results and identify flaky tests. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./gotestsum#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report (JUnit XML). * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating JUnit XML Reports from Go Tests Before integrating with Trunk, you need to generate a Trunk-compatible report. For Go, `go test` does not output JUnit XML by default, so you must use a tool to format it. + + Update your existing `go test` usage to generate json and use [**go-junit-report**](https://github.com/jstemmer/go-junit-report) to convert your standard Go testing output into JUnit XML. ``` @@ -36,8 +38,11 @@ Then pipe `go test` into the `go-junit-report`: ``` go test -json 2>&1 | go-junit-report -parser gojson -out junit_report.xml ``` + + + Install gotestsum into your project:\ \ `go install gotest.tools/gotestsum@latest`\ @@ -47,7 +52,9 @@ Call `gotestsum` to both execute your tests and generate the junit.xml file ``` gotestsum [path-to-tests-to-run] --junitfile ./junit.xml ``` + + Since `go test` doesn't directly output JUnit XML, you'll use a tool to convert its output. Here are two common options: @@ -100,8 +107,11 @@ If you're using a package like [**retry**](https://pkg.go.dev/github.com/hashico ### Try It Locally - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -110,7 +120,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -119,7 +134,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -128,7 +148,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -137,7 +162,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` - + + + + ### Test Upload @@ -153,74 +181,10 @@ You make an upload to Trunk using the following command: You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
### Next Steps Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/gradle.mdx b/flaky-tests/get-started/frameworks/gradle.mdx index 401dbb3..d24df02 100644 --- a/flaky-tests/get-started/frameworks/gradle.mdx +++ b/flaky-tests/get-started/frameworks/gradle.mdx @@ -2,18 +2,18 @@ title: "Gradle" description: "A guide for generating Trunk-compatible test reports for Gradle" --- -You can automatically [detect and manage flaky tests](../../detection/) in your Gradle projects by integrating with Trunk. This document explains how to configure Gradle to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Gradle projects by integrating with Trunk. This document explains how to configure Gradle to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./gradle#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports @@ -26,34 +26,44 @@ By default, Android projects will produce a directory with JUnit XML reports und If you wish to override the default test result path, you can do so in the `build.gradle.kts` or `build.gradle` files: + -```groovy title="build.gradle" + +```groovy java.testResultsDir = layout.buildDirectory.dir("junit-reports") ``` + + -```kotlin title="build.gradle.kts" + +```kotlin java.testResultsDir = layout.buildDirectory.dir("junit-reports") ``` + + This example will write report files to `"./app/build/junit-reports/test/*.xml"` #### Disable Retries -You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](../../quarantining/) feature to stop flaky tests from failing your CI jobs. +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. -If you've enabled retries using a plugin like the [test-retry-gradle-plugin](https://github.com/gradle/test-retry-gradle-plugin), disable it when running tests for Trunk Flaky Tests. +If you've enabled retries using a plugin like the [test-retry-gradle-plugin](https://github.com/gradle/test-retry-gradle-plugin), disable it when running tests for Trunk flaky tests. ### Try It Locally #### The Validate Command -You can validate your test reports using the [Trunk Analytics CLI](../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -62,7 +72,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./app/build/junit-reports/*.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -71,7 +86,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./app/build/junit-reports/*.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -80,7 +100,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./app/build/junit-reports/*.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -89,7 +114,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./app/build/junit-reports/*.xml" ``` - + + + + **This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. @@ -107,74 +135,10 @@ You make an upload to Trunk using the following command: You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
### Next Steps Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/index.mdx b/flaky-tests/get-started/frameworks/index.mdx deleted file mode 100644 index c3d638a..0000000 --- a/flaky-tests/get-started/frameworks/index.mdx +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: "Test frameworks" -mode: wide -description: "Guides for generating Trunk-compatible test results from various test frameworks" ---- -Trunk Flaky Tests uses test results uploaded from your CI jobs to detect flaky tests. - -Follow one of the guides below to configure your test framework to output compatible test reports and integrate with Trunk. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/flaky-tests/get-started/frameworks/jasmine.mdx b/flaky-tests/get-started/frameworks/jasmine.mdx index afe48d8..9214a9e 100644 --- a/flaky-tests/get-started/frameworks/jasmine.mdx +++ b/flaky-tests/get-started/frameworks/jasmine.mdx @@ -2,18 +2,18 @@ title: "Jasmine" description: "A guide for generating Trunk-compatible test reports for Jasmine tests" --- -You can automatically [detect and manage flaky tests](../../detection/) in your Jasmine projects by integrating with Trunk. This document explains how to configure Jasmine to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Jasmine projects by integrating with Trunk. This document explains how to configure Jasmine to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./jasmine#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports @@ -29,7 +29,7 @@ npm install --save-dev jasmine-reporters When used for in-browser tests, the reporters are registered on a `jasmineReporters` object in the global scope (i.e. `window.jasmineReporters`). You can register it like this in your Jasmine config under `/spec/support/jasmine.mjs`: -```javascript title="/spec/support/jasmine.mjs" +```javascript import jasmineReporters from 'jasmine-reporters'; var junitReporter = new jasmineReporters.JUnitXmlReporter({ @@ -61,16 +61,19 @@ Jasmine will generate an XML report at the location specified by the `savePath` You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. -If you're using a package like [protractor-flake](https://www.npmjs.com/package/protractor-flake), disable it to get more accurate results from Trunk. Instead, you can mitigate flaky tests using the [Quarantining](../../quarantining/) feature in Trunk. +If you're using a package like [protractor-flake](https://www.npmjs.com/package/protractor-flake), disable it to get more accurate results from Trunk. Instead, you can mitigate flaky tests using the [Quarantining](/flaky-tests/quarantining) feature in Trunk. ### Try It Locally #### The Validate Command -You can validate your test reports using the [Trunk Analytics CLI](../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + - -```bash Linux (x64) + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -79,7 +82,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -88,7 +96,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -97,7 +110,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -106,7 +124,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" ``` - + + + + **This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. @@ -117,81 +138,17 @@ Before modifying your CI jobs to automatically upload test results to Trunk, try You make an upload to Trunk using the following command: ```sh -./trunk-analytics-cli upload --junit-paths "./junit-reports/*.xml" \ +./trunk-analytics-cli flakytests upload --junit-paths "./junit-reports/*.xml" \ --org-url-slug \ --token ``` You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
### Next Steps Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/jest.mdx b/flaky-tests/get-started/frameworks/jest.mdx index 54bd22d..255b211 100644 --- a/flaky-tests/get-started/frameworks/jest.mdx +++ b/flaky-tests/get-started/frameworks/jest.mdx @@ -2,22 +2,22 @@ title: "Jest" description: "A guide for generating Trunk-compatible test reports for Jest tests" --- -You can automatically [detect and manage flaky tests](../../detection/) in your Jest projects by integrating with Trunk. This document explains how to configure Jest to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Jest projects by integrating with Trunk. This document explains how to configure Jest to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./jest#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports -Trunk detects flaky tests by analyzing test results automatically uploaded from your CI jobs. You can do this by generating XML reports from your test runs. +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this by generating XML reports from your test runs. To generate a Trunk-compatible XML report, install the `jest-junit` package: @@ -27,7 +27,7 @@ npm install --save-dev jest-junit Update your Jest config to add `jest-junit` as a reporter: -```json title="jest.config.json" +```json { "reporters": [ [ @@ -47,36 +47,9 @@ Update your Jest config to add `jest-junit` as a reporter: The `outputDirectory` and `outputName` options specify the path of the XML report. You'll need this path later when configuring automatic uploads to Trunk. -#### Using `filePathPrefix` - -In a monorepo with `pnpm` workspaces (or similar), Jest runs from within the package directory, so the file paths it records in the XML report are relative to that package — not to the repository root. For example, a test at `packages/my-package/src/__tests__/foo.test.js` would be recorded as `src/__tests__/foo.test.js`. - -This causes codeowners matching to fail because Trunk compares test file paths against the codeowners file at the repo root, which uses full repo-relative paths. - -To fix this, set the `filePathPrefix` option to the path of the package within the repo: - -```json title="jest.config.json" -{ - "reporters": [ - [ - "jest-junit", - { - "outputDirectory": "./", - "outputName": "junit.xml", - "addFileAttribute": "true", - "reportTestSuiteErrors": "true", - "filePathPrefix": "packages/my-package" - } - ] - ] -} -``` - -With `filePathPrefix` set, `jest-junit` will prepend the given path to every file path in the XML output, producing repo-root-relative paths that Trunk can correctly match against your codeowners file. - #### Disable Retries -You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](../../quarantining/) feature to stop flaky tests from failing your CI jobs. +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. If you have retries configured using the [jest.retryTimes method](https://jestjs.io/docs/jest-object#jestretrytimesnumretries-options), disable them for more accurate results. @@ -84,8 +57,11 @@ If you have retries configured using the [jest.retryTimes method](https://jestjs #### **The Validate Command** - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -94,7 +70,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -103,7 +84,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -112,7 +98,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -121,7 +112,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` - + + + + #### Test Upload @@ -137,142 +131,12 @@ You make an upload to Trunk using the following command: You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
### Next Steps Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
Atlassian Bambooatlassian-bamboo
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/.gitbook/includes/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
+ +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/karma.mdx b/flaky-tests/get-started/frameworks/karma.mdx index 6c21dc1..119e6c9 100644 --- a/flaky-tests/get-started/frameworks/karma.mdx +++ b/flaky-tests/get-started/frameworks/karma.mdx @@ -2,22 +2,22 @@ title: "Karma" description: "A guide for generating Trunk-compatible test reports for Karma tests" --- -You can automatically [detect and manage flaky tests](../../detection/) in your Karma projects by integrating with Trunk. This document explains how to configure Karma to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Karma projects by integrating with Trunk. This document explains how to configure Karma to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./karma#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports -Trunk detects flaky tests by analyzing test results automatically uploaded from your CI jobs. You can do this by generating XML reports from your test runs. +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this by generating XML reports from your test runs. To generate a Trunk-compatible XML report, install the `karma-junit-reporter` package: @@ -27,7 +27,7 @@ npm install --save-dev karma-junit-reporter Add the `junit` reporter to your karma config file: -```javascript title="karma.conf.js" +```javascript module.exports = function(config) { config.set( { @@ -54,10 +54,13 @@ Karma doesn't support retries out of the box, but if you implemented retries, re #### The Validate Command -You can validate your test reports using the [Trunk Analytics CLI](../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -66,7 +69,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -75,7 +83,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -84,7 +97,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -93,7 +111,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit-reports/*.xml" ``` - + + + + **This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. @@ -111,79 +132,10 @@ You make an upload to Trunk using the following command: You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
### Next Steps Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - - +
Atlassian Bambooatlassian-bamboo
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/.gitbook/includes/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/kotest.mdx b/flaky-tests/get-started/frameworks/kotest.mdx index 7d559df..9b5a35c 100644 --- a/flaky-tests/get-started/frameworks/kotest.mdx +++ b/flaky-tests/get-started/frameworks/kotest.mdx @@ -2,33 +2,38 @@ title: "Kotest" description: "A guide for generating Trunk-compatible test reports for Kotest" --- -You can automatically [detect and manage flaky tests](../../detection/) in your Kotest projects by integrating with Trunk. This document explains how to configure Kotest to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Kotest projects by integrating with Trunk. This document explains how to configure Kotest to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./kotest#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports Steps for generating JUnit XML reports for Kotest depend on the build system you use for your project: + + Tests run with Gradle will generate Trunk-compatible JUnit XML reports by default. You can further [configure reporting behavior](https://docs.gradle.org/8.10.2/userguide/java_testing.html#test_reporting) in your `build.gradle.kts` or `build.gradle`. + + + Kotest projects using Maven require the following to be added to a project's `pom.xml` so JUnit XML reports can be generated: * the `maven-surefire-plugin` must be added to the `plugins` section of `pom.xml` -```xml title="pom.xml" +```xml @@ -46,7 +51,7 @@ Kotest projects using Maven require the following to be added to a project's `po * the `kotest-extensions-junitxml` must be added to the `dependencies` section of `pom.xml` -```xml title="pom.xml" +```xml io.kotest @@ -58,7 +63,9 @@ Kotest projects using Maven require the following to be added to a project's `po ``` + + #### Report File Path @@ -66,19 +73,24 @@ Kotest projects using Maven require the following to be added to a project's `po You can configure the path for generated JUnit XML files: + + By default, Kotlin projects will produce a directory with JUnit XML reports under `./app/build/test-results/test`. You can locate these files with the glob `"./app/build/test-results/test/*.xml"`. If you wish to override the default test result path, you can do so in the `build.gradle.kts` or `build.gradle` files: -```kotlin title="build.gradle.kts (Kotlin) or build.gradle (Groovy)" +```kotlin java.testResultsDir = layout.buildDirectory.dir("junit-reports") ``` + + + You can change the report file path by configuring the `reportsDirectory` in your `maven-surefire-plugin` in your `pom.xml` file: -```xml title="pom.xml" +```xml org.apache.maven.plugins maven-surefire-plugin @@ -90,34 +102,46 @@ You can change the report file path by configuring the `reportsDirectory` in you ``` The example above will output JUnit XML reports that can be located with the `/target/junit/*.xml` glob. + + #### Disable Retries -You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](../../quarantining/) feature to stop flaky tests from failing your CI jobs. +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. + -If you've enabled retries using a plugin like the [test-retry-gradle-plugin](https://github.com/gradle/test-retry-gradle-plugin), disable it when running tests for Trunk Flaky Tests. + +If you've enabled retries using a plugin like the [test-retry-gradle-plugin](https://github.com/gradle/test-retry-gradle-plugin), disable it when running tests for Trunk flaky tests. + + + Maven uses the `maven-surefire-plugin` to run tests, which allows you to control the test retry behavior. You can disable retries by specifying 0 retries: ``` mvn -Dsurefire.rerunFailingTestsCount=0 test ``` + + ### Try It Locally #### The Validate Command -You can validate your test reports using the [Trunk Analytics CLI](../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + - -```bash Linux (x64) + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -126,7 +150,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./app/junit-reports/*.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -135,7 +164,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./app/junit-reports/*.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -144,7 +178,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./app/junit-reports/*.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -153,7 +192,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./app/junit-reports/*.xml" ``` - + + + + Make sure to specify the path to your JUnit XML test reports. @@ -173,79 +215,10 @@ You make an upload to Trunk using the following command: You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
### Next Steps Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - - +
Atlassian Bambooatlassian-bamboo
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/.gitbook/includes/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/maven.mdx b/flaky-tests/get-started/frameworks/maven.mdx index 8831c74..3f7126e 100644 --- a/flaky-tests/get-started/frameworks/maven.mdx +++ b/flaky-tests/get-started/frameworks/maven.mdx @@ -2,18 +2,18 @@ title: "Maven" description: "A guide for generating Trunk-compatible test reports for Maven" --- -You can automatically [detect and manage flaky tests](../../detection/) in your Maven projects by integrating with Trunk. This document explains how to configure Maven to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Maven projects by integrating with Trunk. This document explains how to configure Maven to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./maven#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports @@ -23,7 +23,7 @@ Maven uses the `maven-surefire-plugin` by default to output JUnit XML reports, w You can change the report file path by configuring the `maven-surefire-plugin` plugin in your `pom.xml` file: -```xml title="pom.xml" +```xml @@ -46,7 +46,7 @@ The example above will output JUnit XML reports that can be located with the `/t If you have a Kotlin project and are using the Kotest test framework, you also need to include `kotest-extensions-junitxml` in your project's `pom.xml`. This allows Kotest to generate JUnit XML reports. -```xml title="pom.xml" +```xml io.kotest kotest-extensions-junitxml-jvm @@ -57,7 +57,7 @@ If you have a Kotlin project and are using the Kotest test framework, you also n #### Disable Retries -You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](../../quarantining/) feature to stop flaky tests from failing your CI jobs. +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. Maven uses the `maven-surefire-plugin` to run tests, which allows you to control the test retry behavior. You can disable retries by specifying 0 retries: @@ -69,10 +69,13 @@ mvn -Dsurefire.rerunFailingTestsCount=0 test #### The Validate Command -You can validate your test reports using the [Trunk Analytics CLI](../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -81,7 +84,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./app/junit-reports/*.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -90,7 +98,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./app/junit-reports/*.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -99,7 +112,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./app/junit-reports/*.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -108,7 +126,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./app/junit-reports/*.xml" ``` - + + + + **This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. @@ -126,79 +147,10 @@ You make an upload to Trunk using the following command: You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
### Next Steps Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - - +
Atlassian Bambooatlassian-bamboo
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/.gitbook/includes/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/minitest.mdx b/flaky-tests/get-started/frameworks/minitest.mdx index d7409ec..dfba7b6 100644 --- a/flaky-tests/get-started/frameworks/minitest.mdx +++ b/flaky-tests/get-started/frameworks/minitest.mdx @@ -2,22 +2,22 @@ title: "minitest" description: "A guide for generating Trunk-compatible test reports for minitest" --- -You can automatically [detect and manage flaky tests](../../detection/) in your minitest projects by integrating with Trunk. This document explains how to configure minitest to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your minitest projects by integrating with Trunk. This document explains how to configure minitest to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./minitest#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports -Trunk detects flaky tests by analyzing test results automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. To generate XML reports, install the `minitest-reporters` gem: @@ -27,7 +27,7 @@ gem install minitest-reporters Configure the `JUnitReporter` reporter in your `test_helper.rb` file: -```ruby title="test_helper.rb" +```ruby require "minitest/reporters" Minitest::Reporters.use! Minitest::Reporters::JUnitReporter.new ``` @@ -52,10 +52,13 @@ Minitest doesn't support retries out of the box, but if you implemented retries #### **The Validate Command** -You can validate your test reports using the [Trunk Analytics CLI](../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -64,7 +67,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -73,7 +81,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -82,7 +95,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -91,7 +109,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` - + + + + **This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. @@ -109,74 +130,10 @@ You make an upload to Trunk using the following command: You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
### Next Steps Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/mocha.mdx b/flaky-tests/get-started/frameworks/mocha.mdx index f10a4e5..0e31d2e 100644 --- a/flaky-tests/get-started/frameworks/mocha.mdx +++ b/flaky-tests/get-started/frameworks/mocha.mdx @@ -2,18 +2,18 @@ title: "Mocha" description: "A guide for generating Trunk-compatible test reports for Mocha" --- -You can automatically [detect and manage flaky tests](../../detection/) in your Mocha projects by integrating with Trunk. This document explains how to configure Mocha to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Mocha projects by integrating with Trunk. This document explains how to configure Mocha to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./mocha#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports @@ -56,10 +56,13 @@ You can disable retry by omitting the `--retries` CLI option and [removing retri #### **The Validate Command** -You can validate your test reports using the [Trunk Analytics CLI](../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -68,7 +71,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -77,7 +85,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -86,7 +99,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -95,7 +113,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` - + + + + **This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. @@ -113,74 +134,10 @@ You make an upload to Trunk using the following command: You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
### Next Steps Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/nightwatch.mdx b/flaky-tests/get-started/frameworks/nightwatch.mdx index 1c75928..7539066 100644 --- a/flaky-tests/get-started/frameworks/nightwatch.mdx +++ b/flaky-tests/get-started/frameworks/nightwatch.mdx @@ -2,18 +2,18 @@ title: "Nightwatch" description: "A guide for generating Trunk-compatible test reports for Nightwatch" --- -You can automatically [detect and manage flaky tests](../../detection/) in your Nightwatch projects by integrating with Trunk. This document explains how to configure Nightwatch to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Nightwatch projects by integrating with Trunk. This document explains how to configure Nightwatch to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./nightwatch#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports @@ -54,10 +54,13 @@ Nightwatch doesn't implement any form of automatic retry for failed or flaky tes #### **The Validate Command** -You can validate your test reports using the [Trunk Analytics CLI](../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -66,7 +69,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -75,7 +83,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -84,7 +97,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -93,7 +111,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` - + + + + **This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. @@ -111,74 +132,10 @@ You make an upload to Trunk using the following command: You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
### Next Steps Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/nunit.mdx b/flaky-tests/get-started/frameworks/nunit.mdx index d3108f8..0d5a6f5 100644 --- a/flaky-tests/get-started/frameworks/nunit.mdx +++ b/flaky-tests/get-started/frameworks/nunit.mdx @@ -2,22 +2,22 @@ title: "NUnit" description: "A guide for generating Trunk-compatible test reports for NUnit" --- -You can automatically [detect and manage flaky tests](../../detection/) in your NUnit projects by integrating with Trunk. This document explains how to configure NUnit to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your NUnit projects by integrating with Trunk. This document explains how to configure NUnit to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./nunit#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports -Trunk detects flaky tests by analyzing test results automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. You can do this in dotnet with the NUnit's built-in JUnit reporter: @@ -29,7 +29,7 @@ dotnet test -o build -- NUnit.TestOutputXml="junit" .NET will output each build to the path specified by `-o ` and test results under a sub-folder of `/test-reports`, specified by the `-- NUnit.TestOutputXml=""` option. -In the example command from the [Generating Reports step](./nunit#generating-reports), the XMLs will be located under `./build/test-reports/junit/*.xml`. This is also the glob you'll use to locate the results when uploading test results. +In the example command from the [Generating Reports step](#generating-reports), the XMLs will be located under `./build/test-reports/junit/*.xml`. This is also the glob you'll use to locate the results when uploading test results. #### Disable Retries @@ -41,10 +41,13 @@ Omit `[Retry(n)]` from tests to disable retries. #### The Validate Command -You can validate your test reports using the [Trunk Analytics CLI](../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -53,7 +56,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./build/test-reports/junit/*.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -62,7 +70,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./build/test-reports/junit/*.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -71,7 +84,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./build/test-reports/junit/*.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -80,7 +98,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./build/test-reports/junit/*.xml" ``` - + + + + **This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. @@ -98,74 +119,10 @@ You make an upload to Trunk using the following command: You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
### Next Steps Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/other-test-frameworks.mdx b/flaky-tests/get-started/frameworks/other-test-frameworks.mdx index a606a0b..0e29d67 100644 --- a/flaky-tests/get-started/frameworks/other-test-frameworks.mdx +++ b/flaky-tests/get-started/frameworks/other-test-frameworks.mdx @@ -8,7 +8,7 @@ Trunk Flaky Tests is designed to be test framework agnostic. If you don't see a Trunk detects flaky tests by analyzing each test case's results over time. Trunk currently supports the JUnit XML and XCResult report formats. You will need to configure your test runner to report in one of these formats using a plugin or your own test result reporter. -Make sure your test reports accurately report the file name, test name, and stack trace of each test result. Make sure the test names are not randomized. These details help Trunk better detect and display your test cases' health status. +Make sure your tests reports accurately report the file name, test name, and stack trace of each test result. Make sure the test names are not randomized. These details help Trunk better detect and display your test cases' health status. ## 2. Output Location @@ -16,12 +16,15 @@ You'll need to validate and upload the generated JUnit files to Trunk later duri ## 3. Validate Your Reports -Since you'll be generating JUnit reports using a new plugin or custom reporter, you should use the Trunk Analytics CLI to validate your results and fix any warnings or errors. +Since you'll be generating JUnit reports using a new plugin or custom reporter, you should use the Trunk CLI to validate your results and fix any warnings or errors. -You can install the Trunk Analytics CLI locally like this: +You can install the Trunk CLI locally like this: - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -29,7 +32,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -37,7 +45,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -45,7 +58,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -53,9 +71,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ``` - -Then, you can validate the results using the `trunk-analytics-cli validate` command like this: + + + + +Then, you can validate the results using the `trunk flakytests validate` command like this: ```bash ./trunk-analytics-cli validate --junit-paths @@ -63,67 +84,6 @@ Then, you can validate the results using the `trunk-analytics-cli validate` comm ## Next Step -You'll need to upload the JUnit reports generated by your CI jobs to Trunk so Trunk can [detect flaky tests](../../detection/) and [report them to the dashboard](../../dashboard). See [CI Providers](../ci-providers/) for a guide on how to upload test results to Trunk. - - - - - - - - - - - - - - - +You'll need to upload the JUnit reports generated by your CI jobs to Trunk so Trunk can [detect flaky tests](/flaky-tests/detection) and [report them to the dashboard](/flaky-tests/dashboard). See [CI Providers](/flaky-tests/get-started/ci-providers) for a guide on how to upload test results to Trunk. + +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/pest.mdx b/flaky-tests/get-started/frameworks/pest.mdx index 7e1cff4..18fb363 100644 --- a/flaky-tests/get-started/frameworks/pest.mdx +++ b/flaky-tests/get-started/frameworks/pest.mdx @@ -2,22 +2,22 @@ title: "Pest" description: "A guide for generating Trunk-compatible test reports for Pest" --- -You can automatically [detect and manage flaky tests](../../detection/) in your PHP projects by integrating with Trunk. This document explains how to configure Pest to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your PHP projects by integrating with Trunk. This document explains how to configure Pest to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./pest#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports -Trunk detects flaky tests by analyzing test results automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. To generate XML reports, append `--log-junit junit.xml` to your `pest` test command: @@ -39,10 +39,13 @@ Pest doesn't support retries out of the box, but if you implemented retries, rem #### **The Validate Command** -You can validate your test reports using the [Trunk Analytics CLI](../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -51,7 +54,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -60,7 +68,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -69,7 +82,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -78,7 +96,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` - + + + + **This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. @@ -96,74 +117,10 @@ You make an upload to Trunk using the following command: You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
### Next Steps Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/phpunit.mdx b/flaky-tests/get-started/frameworks/phpunit.mdx index e64dd57..7718d51 100644 --- a/flaky-tests/get-started/frameworks/phpunit.mdx +++ b/flaky-tests/get-started/frameworks/phpunit.mdx @@ -2,22 +2,22 @@ title: "PHPUnit" description: "A guide for generating Trunk-compatible test reports for PHPUnit" --- -You can automatically [detect and manage flaky tests](../../detection/) in your PHP projects by integrating with Trunk. This document explains how to configure PHPUnit to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your PHP projects by integrating with Trunk. This document explains how to configure PHPUnit to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./phpunit#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports -Trunk detects flaky tests by analyzing test results automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. To generate XML reports, append `--log-junit junit.xml` to your `phpunit` test command: @@ -39,10 +39,13 @@ PHPUnit doesn't support retries out of the box, but if you implemented retries, #### **The Validate Command** -You can validate your test reports using the [Trunk Analytics CLI](../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -51,7 +54,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -60,7 +68,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -69,7 +82,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -78,7 +96,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` - + + + + **This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. @@ -96,74 +117,10 @@ You make an upload to Trunk using the following command: You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
### Next Steps Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/playwright.mdx b/flaky-tests/get-started/frameworks/playwright.mdx index d030e55..31bc5b4 100644 --- a/flaky-tests/get-started/frameworks/playwright.mdx +++ b/flaky-tests/get-started/frameworks/playwright.mdx @@ -2,24 +2,24 @@ title: "Playwright" description: "A guide for generating Trunk-compatible test reports for Playwright" --- -You can automatically [detect and manage flaky tests](../../detection/) in your Playwright projects by integrating with Trunk. This document explains how to configure Playwright to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Playwright projects by integrating with Trunk. This document explains how to configure Playwright to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./playwright#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports Playwright has multiple built-in reporters, including JUnit XML which Trunk can ingest. To get XML reports, add the following to your Playwright config: -```typescript title="playwright.config.ts" +```typescript import { defineConfig } from '@playwright/test'; export default defineConfig({ @@ -55,7 +55,7 @@ export default defineConfig({ #### Disable Retries -You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](../../quarantining/) feature to stop flaky tests from failing your CI jobs. +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. You can disable retries in Playwright by omitting the `--retries` command line option and [removing retries in your `playwright.config.ts` file](https://playwright.dev/docs/test-retries#retries). @@ -63,10 +63,13 @@ You can disable retries in Playwright by omitting the `--retries` command line o #### **The Validate Command** -You can validate your test reports using the [Trunk Analytics CLI](../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: + + - -```bash Linux (x64) + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -75,7 +78,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -84,7 +92,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -93,7 +106,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -102,7 +120,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` - + + + + **This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. @@ -120,74 +141,10 @@ You make an upload to Trunk using the following command: You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
## Next Step Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/pytest.mdx b/flaky-tests/get-started/frameworks/pytest.mdx index 288c903..095ef65 100644 --- a/flaky-tests/get-started/frameworks/pytest.mdx +++ b/flaky-tests/get-started/frameworks/pytest.mdx @@ -2,22 +2,22 @@ title: "Pytest" description: "A guide for generating Trunk-compatible test reports for Pytest" --- -You can automatically [detect and manage flaky tests](../../detection/) in your Pytest projects by integrating with Trunk. This document explains how to configure Pytest to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Pytest projects by integrating with Trunk. This document explains how to configure Pytest to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./pytest#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports -Trunk detects flaky tests by analyzing test results automatically uploaded from your CI jobs. You can do this by generating JUnit XML reports from your test runs. +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this by generating JUnit XML reports from your test runs. In your CI job, update your `pytest` command to include the `--junit-xml` and `junit_family=xunit1` arguments to generate XML reports: @@ -25,7 +25,7 @@ In your CI job, update your `pytest` command to include the `--junit-xml` and `j pytest --junit-xml=junit.xml -o junit_family=xunit1 ``` -The `junit_family=xunit1` is necessary so that the generated XML report includes file paths for each test case. File paths for test cases are used for features that use code owners like the [Jira integration](../../management/ticketing/jira-integration) and [webhooks](../../webhooks/). +The `junit_family=xunit1` is necessary so that the generated XML report includes file paths for each test case. File paths for test cases are used for features that use code owners like the [Jira integration](/flaky-tests/ticketing-integrations/jira-integration) and [webhooks](/flaky-tests/webhooks). #### Report File Path @@ -33,16 +33,19 @@ The `--junit-xml` argument specifies the path of the JUnit report. You'll need t #### Disable Retries -You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](../../quarantining/) feature to stop flaky tests from failing your CI jobs. +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. Omit the [`--lf` or `--ff` options](https://docs.pytest.org/en/stable/how-to/cache.html) if you've previously configured your CI with these options to disable retries. ### Try It Locally -You can validate your test reports using the [Trunk Analytics CLI](../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -51,7 +54,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -60,7 +68,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -69,7 +82,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -78,7 +96,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` - + + + + **This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. @@ -96,74 +117,10 @@ You make an upload to Trunk using the following command: You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
### Next Steps Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/robot-framework.mdx b/flaky-tests/get-started/frameworks/robot-framework.mdx index 08af9a7..25c1202 100644 --- a/flaky-tests/get-started/frameworks/robot-framework.mdx +++ b/flaky-tests/get-started/frameworks/robot-framework.mdx @@ -2,22 +2,22 @@ title: "Robot Framework" description: "A guide for generating Trunk-compatible test reports for Robot Framework" --- -You can automatically [detect and manage flaky tests](../../detection/) in your projects running tests with Robot by integrating with Trunk. This document explains how to configure Robot to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your projects running tests with Robot by integrating with Trunk. This document explains how to configure Robot to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./robot-framework#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports -Trunk detects flaky tests by analyzing test results automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. To output compatible reports, add the `--xunit` argument to your `robot` command: @@ -31,7 +31,7 @@ The JUnit report will be written to the location specified by the `--xunit` argu #### Disable Retries -You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable them and prefer using the [Quarantine](../../quarantining/) feature to mitigate the negative impact of Flaky Tests. +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable them and prefer using the [Quarantine](/flaky-tests/quarantining) feature to mitigate the negative impact of Flaky Tests. Omit the [`--rerunfailed`](https://docs.robotframework.org/docs/flaky_tests#re-execute-failed-tests-and-merge-results) flag and remove any [RetryFailed Listeners](https://docs.robotframework.org/docs/flaky_tests#retryfailed-listener) previously configured to run as part of your CI pipeline to disable retries. @@ -39,10 +39,13 @@ Omit the [`--rerunfailed`](https://docs.robotframework.org/docs/flaky_tests#re-e #### **The Validate Command** -You can validate your test reports using the [Trunk Analytics CLI](../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -51,7 +54,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -60,7 +68,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -69,7 +82,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -78,7 +96,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` - + + + + **This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. @@ -96,142 +117,12 @@ You make an upload to Trunk using the following command: You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
### Next Steps Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
Atlassian Bambooatlassian-bamboo
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/.gitbook/includes/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
+ +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/rspec.mdx b/flaky-tests/get-started/frameworks/rspec.mdx new file mode 100644 index 0000000..364cd30 --- /dev/null +++ b/flaky-tests/get-started/frameworks/rspec.mdx @@ -0,0 +1,109 @@ +--- +title: "RSpec" +description: "A guide for generating Trunk-compatible test reports for RSpec using Trunk's RSpec plugin" +--- +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your projects running RSpec by integrating with Trunk. This document explains how to use Trunk's RSpec plugin to upload test results to Trunk. + +### Checklist + +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. + +* [ ] Set up and installed Trunk's RSpec plugin +* [ ] Disable retries for better detection accuracy +* [ ] Test uploads locally + + +Using the plugin is the best way to accurately detect flaky RSpec tests. + +You can also [manually generate and upload](/flaky-tests/get-started/frameworks/rspec/manual-uploads) test results in RSpec, however, **manual RSpec uploads are not recommended.** + + +### Installing the plugin + +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this for your Rspec tests using Trunk's RSpec plugin. + +To install the plugin in your project, add the `rspec_trunk_flaky_tests` gem to your `Gemfile`: + +```shell +gem "rspec_trunk_flaky_tests" +``` + +Install the plugin: + +```sh +bundle install +``` + +Then, load the plugin in `spec_helper.rb`: + +```shell +require "trunk_spec_helper" +``` + +#### Disable Retries + +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. + +If you have a step in CI to rerun failed tests with the `--only-failures` option, or you're using a package like [rspec-retry](https://github.com/NoRedInk/rspec-retry), remember to disable them. + +#### Versions and Updating the Plugin + +You can find the Gem for `rspec_trunk_flaky_tests` [here](https://rubygems.org/gems/rspec_trunk_flaky_tests), along with its version history. This plugin is periodically updated for more robust support and bug fixes, and if you're encountering something unexpected, we first encourage you to: + +``` +bundle update rspec_trunk_flaky_tests +``` + +### Environment Variables + +These optoinal environment variables can be set in your project to change the behavior of the RSpec plugin. + +#### Repository metadata variables: + +| Argument | Description | +| ------------------------------ | -------------------------------------------------------- | +| `TRUNK_REPO_ROOT` | Path to repository root | +| `TRUNK_REPO_URL` | Repository URL (e.g., https://github.com/org/repo.git) | +| `TRUNK_REPO_HEAD_SHA` | HEAD commit SHA | +| `TRUNK_REPO_HEAD_BRANCH` | HEAD branch name | +| `TRUNK_REPO_HEAD_COMMIT_EPOCH` | HEAD commit timestamp (seconds since epoch) | +| `TRUNK_REPO_HEAD_AUTHOR_NAME` | HEAD commit author name | + +#### Configuration variables: + +| Argument | Description | +| -------------------------------- | --------------------------------------------------------- | +| `TRUNK_CODEOWNERS_PATH` | Path to CODEOWNERS file | +| `TRUNK_VARIANT` | Variant name for test results (e.g., 'linux', 'pr-123') | +| `TRUNK_DISABLE_QUARANTINING` | Set to 'true' to disable quarantining | +| `TRUNK_ALLOW_EMPTY_TEST_RESULTS` | Set to 'true' to allow empty results | +| `TRUNK_DRY_RUN` | Set to 'true' to save bundle locally instead of uploading | +| `TRUNK_USE_UNCLONED_REPO` | Set to 'true' for uncloned repo mode | +| `TRUNK_LOCAL_UPLOAD_DIR` | Directory to save test results locally (disables upload) | +| `DISABLE_RPSEC_TRUNK_FLAY_TESTS` | Set to 'true' to completely disable Trunk | + +### Try It Locally + +#### Test Upload + +Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. + +You make an upload to Trunk using the following command: + +```sh +TRUNK_ORG_URL_SLUG= \ +TRUNK_API_TOKEN= \ +bundle exec rspec +``` + +You can find your Trunk organization URL slug and token in the **Settings** or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). + +After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the test report uploaded by the plugin has issues. + +
+ +### Next Steps + +Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: + +
Atlassian Bambooatlassian-bamboo
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/.gitbook/includes/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/rspec/index.mdx b/flaky-tests/get-started/frameworks/rspec/index.mdx deleted file mode 100644 index 0685467..0000000 --- a/flaky-tests/get-started/frameworks/rspec/index.mdx +++ /dev/null @@ -1,182 +0,0 @@ ---- -title: "RSpec" -description: "A guide for generating Trunk-compatible test reports for RSpec using Trunk's RSpec plugin" ---- -You can automatically [detect and manage flaky tests](../../../detection/) in your projects running RSpec by integrating with Trunk. This document explains how to use Trunk's RSpec plugin to upload test results to Trunk. - -### Checklist - -By the end of this guide, you should achieve the following before proceeding to the [next steps](./#next-step) to configure your CI provider. - -- [ ] Set up and installed Trunk's RSpec plugin -- [ ] Disable retries for better detection accuracy -- [ ] Test uploads locally - - -Using the plugin is the best way to accurately detect flaky RSpec tests. - -You can also [manually generate and upload](./manual-uploads) test results in RSpec, however, **manual RSpec uploads are not recommended.** - - -### Installing the plugin - -Trunk detects flaky tests by analyzing test results automatically uploaded from your CI jobs. You can do this for your Rspec tests using Trunk's RSpec plugin. - -To install the plugin in your project, add the `rspec_trunk_flaky_tests` gem to your `Gemfile`: - -```shell title="Gemfile" -gem "rspec_trunk_flaky_tests" -``` - -Install the plugin: - -```sh -bundle install -``` - -Then, load the plugin in `spec_helper.rb`: - -```shell title="spec/spec_helper.rb" -require "trunk_spec_helper" -``` - -#### Disable Retries - -You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. - -If you have a step in CI to rerun failed tests with the `--only-failures` option, or you're using a package like [rspec-retry](https://github.com/NoRedInk/rspec-retry), remember to disable them. - -#### Versions and Updating the Plugin - -You can find the Gem for `rspec_trunk_flaky_tests` [here](https://rubygems.org/gems/rspec_trunk_flaky_tests), along with its version history. This plugin is periodically updated with improved support and bug fixes. If you're encountering something unexpected, first try: - -``` -bundle update rspec_trunk_flaky_tests -``` - -### Environment Variables - -These optional environment variables can be set in your project to change the behavior of the RSpec plugin. - -#### Repository metadata variables: - -| Argument | Description | -| ------------------------------ | ------------------------------------------------------ | -| `TRUNK_REPO_ROOT` | Path to repository root | -| `TRUNK_REPO_URL` | Repository URL (e.g., https://github.com/org/repo.git) | -| `TRUNK_REPO_HEAD_SHA` | HEAD commit SHA | -| `TRUNK_REPO_HEAD_BRANCH` | HEAD branch name | -| `TRUNK_REPO_HEAD_COMMIT_EPOCH` | HEAD commit timestamp (seconds since epoch) | -| `TRUNK_REPO_HEAD_AUTHOR_NAME` | HEAD commit author name | - -#### Configuration variables: - -| Argument | Description | -| --------------------------------- | --------------------------------------------------------- | -| `TRUNK_CODEOWNERS_PATH` | Path to CODEOWNERS file | -| `TRUNK_VARIANT` | Variant name for test results (e.g., 'linux', 'pr-123') | -| `TRUNK_DISABLE_QUARANTINING` | Set to 'true' to disable quarantining | -| `TRUNK_ALLOW_EMPTY_TEST_RESULTS` | Set to 'true' to allow empty results | -| `TRUNK_DRY_RUN` | Set to 'true' to save bundle locally instead of uploading | -| `TRUNK_USE_UNCLONED_REPO` | Set to 'true' for uncloned repo mode | -| `TRUNK_LOCAL_UPLOAD_DIR` | Directory to save test results locally (disables upload) | -| `DISABLE_RSPEC_TRUNK_FLAKY_TESTS` | Set to 'true' to completely disable Trunk | - -### Try It Locally - -#### Test Upload - -Before modifying your CI jobs to automatically upload test results to Trunk, try uploading a single test run manually. - -You make an upload to Trunk using the following command: - -```sh -TRUNK_ORG_URL_SLUG= \ -TRUNK_API_TOKEN= \ -bundle exec rspec -``` - -You can find your Trunk organization URL slug and token in the **Settings** or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). - -After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the test report uploaded by the plugin has issues. - - - - - - - -You do not need to download the `trunk-analytics-cli` when using the Trunk RSpec plugin. Uploads are handled for you as long as you have set `TRUNK_ORG_URL_SLUG` and `TRUNK_API_TOKEN`. - - -### Next Steps - -Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - - - diff --git a/flaky-tests/get-started/frameworks/rspec/manual-uploads.mdx b/flaky-tests/get-started/frameworks/rspec/manual-uploads.mdx index e4c8016..0f1bf98 100644 --- a/flaky-tests/get-started/frameworks/rspec/manual-uploads.mdx +++ b/flaky-tests/get-started/frameworks/rspec/manual-uploads.mdx @@ -2,26 +2,26 @@ title: "RSpec (Manual Uploads)" description: "A guide for generating Trunk-compatible test reports for RSpec without using Trunk's RSpec plugin" --- -You can automatically [detect and manage flaky tests](../../../detection/) in your projects running RSpec by integrating with Trunk. This document explains how to configure RSpec to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your projects running RSpec by integrating with Trunk. This document explains how to configure RSpec to output JUnit XML reports that can be uploaded to Trunk for analysis. -We highly recommend using [Trunk's RSpec plugin](./) to upload test results for the best accuracy when detecting flaky tests. +We highly recommend using [Trunk's RSpec plugin](/flaky-tests/get-started/frameworks/rspec) to upload test results for the best accuracy when detecting flaky tests. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./manual-uploads#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports -Trunk detects flaky tests by analyzing test results automatically uploaded from your CI jobs. You can do this for your Rspec tests by generating JUnit XML reports from your test runs. +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this for your Rspec tests by generating JUnit XML reports from your test runs. To generate Trunk-compatible reports, install the `rspec_junit_formatter`: @@ -49,10 +49,13 @@ If you have a step in CI to rerun failed tests with the `--only-failures` option #### **The Validate Command** -You can validate your test reports using the [Trunk Analytics CLI](../../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -61,7 +64,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -70,7 +78,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -79,7 +92,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -88,7 +106,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` - + + + + **This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. @@ -106,79 +127,10 @@ You make an upload to Trunk using the following command: You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
### Next Steps Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - - +
Atlassian Bambooatlassian-bamboo
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/.gitbook/includes/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/rust.mdx b/flaky-tests/get-started/frameworks/rust.mdx index e5b04d2..34d14ab 100644 --- a/flaky-tests/get-started/frameworks/rust.mdx +++ b/flaky-tests/get-started/frameworks/rust.mdx @@ -2,24 +2,24 @@ title: "cargo-nextest" description: "A guide for generating Trunk-compatible test reports for Rust" --- -You can automatically [detect and manage flaky tests](../../detection/) in your Rust projects by integrating with Trunk. This document explains how to configure cargo-nextest to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Rust projects by integrating with Trunk. This document explains how to configure cargo-nextest to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./rust#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports `cargo-nextest` has built-in reporting for JUnit XML reports, which is trunk-compatible. You can enable JUnit reporting by adding the following to your nextest config: -```toml title=".config/nextest.toml" +```toml [profile.ci.junit] path = "junit.xml" ``` @@ -44,10 +44,13 @@ Omit the `--retries` option. #### The Validate Command -You can validate your test reports using the [Trunk Analytics CLI](../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -56,7 +59,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./target/nextest/ci/junit.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -65,7 +73,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./target/nextest/ci/junit.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -74,7 +87,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./target/nextest/ci/junit.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -83,7 +101,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./target/nextest/ci/junit.xml" ``` - + + + + This will not upload anything to Trunk. To improve detection accuracy, you should address all errors and warnings before proceeding to the next steps. @@ -103,65 +124,4 @@ You make an upload to Trunk using the following command: Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/swift-testing.mdx b/flaky-tests/get-started/frameworks/swift-testing.mdx index 749e627..57b98e0 100644 --- a/flaky-tests/get-started/frameworks/swift-testing.mdx +++ b/flaky-tests/get-started/frameworks/swift-testing.mdx @@ -2,22 +2,22 @@ title: "Swift Testing" description: "A guide for generating Trunk-compatible test reports with Swift Testing" --- -You can automatically [detect and manage flaky tests](../../detection/) in your Swift projects by integrating with Trunk. This document explains how to configure Swift Testing to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Swift projects by integrating with Trunk. This document explains how to configure Swift Testing to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./swift-testing#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports -Trunk detects flaky tests by analyzing test results automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. To output a compatible report, add the `--xunit-output` argument to your Swift test command: @@ -41,10 +41,13 @@ Swift Testing doesn't support retries out of the box, but if you implemented ret #### **The Validate Command** -You can validate your test reports using the [Trunk Analytics CLI](../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -53,7 +56,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -62,7 +70,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -71,7 +84,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -80,7 +98,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` - + + + + **This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. @@ -98,74 +119,10 @@ You make an upload to Trunk using the following command: You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
### Next Steps Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/testplan.mdx b/flaky-tests/get-started/frameworks/testplan.mdx index 599c343..4d93b1b 100644 --- a/flaky-tests/get-started/frameworks/testplan.mdx +++ b/flaky-tests/get-started/frameworks/testplan.mdx @@ -2,22 +2,22 @@ title: "Testplan" description: "A guide for generating Trunk-compatible test reports for Testplan" --- -You can automatically [detect and manage flaky tests](../../detection/) in your projects running Testplan by integrating with Trunk. This document explains how to configure Testplan to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your projects running Testplan by integrating with Trunk. This document explains how to configure Testplan to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./testplan#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating reports -Trunk detects flaky tests by analyzing test results automatically uploaded from your CI jobs. Testplan can output JUnit XML reports which are compatible with Trunk. You can do so with the `--xml` option: +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. Testplan can output JUnit XML reports which are compatible with Trunk. You can do so with the `--xml` option: ```sh ./test_plan.py --xml @@ -33,17 +33,23 @@ You can specify the file path for the reports with the `--xml` option. Testplan outputs multiple XML reports under the JUnit directory. You can locate these when uploading the reports in CI with the `"./junit-reports/*.xml"` glob. - -```python Python + + + + +```python @test_plan(name='SamplePlan', xml_dir='junit-reports') def main(plan): ... ``` - + + + + ### Disable Retries -You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](../../quarantining/) feature to stop flaky tests from failing your CI jobs. +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. #### Task-level Retries @@ -75,10 +81,13 @@ pool = ThreadPool(name="MyPool", allow_task_rerun=False) #### The Validate Command -You can validate your test reports using the [Trunk Analytics CLI](../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -87,7 +96,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "junit-reports/*.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -96,7 +110,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "junit-reports/*.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -105,7 +124,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "junit-reports/*.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -114,7 +138,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "junit-reports/*.xml" ``` - + + + + **This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. @@ -140,65 +167,4 @@ You can find your Trunk organization slug and token in the settings or by follow Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/vitest.mdx b/flaky-tests/get-started/frameworks/vitest.mdx index 449af77..8f53ff5 100644 --- a/flaky-tests/get-started/frameworks/vitest.mdx +++ b/flaky-tests/get-started/frameworks/vitest.mdx @@ -2,26 +2,26 @@ title: "Vitest" description: "A guide for generating Trunk-compatible test reports with Vitest" --- -You can automatically [detect and manage flaky tests](../../detection/) in your Vitest projects by integrating with Trunk. This document explains how to configure Vitest to output JUnit XML reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your Vitest projects by integrating with Trunk. This document explains how to configure Vitest to output JUnit XML reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./vitest#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports -Trunk detects flaky tests by analyzing test results automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. +Trunk detects flaky tests by analyzing test reports automatically uploaded from your CI jobs. You can do this by generating Trunk-compatible XML reports from your test runs. You can configure Vitest to produce a Trunk-compatible JUnitXML report by updating your `vitest.config.ts`. -```javascript title="vitest.config.ts" +```javascript import { defineConfig } from 'vitest/config'; export default defineConfig({ @@ -43,7 +43,7 @@ The `outputFile: './junit.xml'` option specifies the path of the JUnit report. Y #### Disable Retries -You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](../../quarantining/) feature to stop flaky tests from failing your CI jobs. +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. If you've enabled retries, you can disable them following the [Vitest docs](https://vitest.dev/api/) for more accurate results. @@ -77,13 +77,13 @@ If you've enabled retries, you can disable them following the [Vitest docs](http 1. **Check TypeScript Configuration**: Ensure your `tsconfig.json` is valid and includes all necessary paths 2. **Verify Dependencies**: Make sure all imported modules are properly installed and accessible 3. **Review Setup Files**: Check any test setup files referenced in your Vitest config for errors -4. **Validate Vitest Config**: Make sure your `vitest.config.ts` doesn't contain invalid options +4. **Validate Vitest Config**: Ensure your `vitest.config.ts` doesn't contain invalid options ### Try It Locally #### Validate Test Execution First -Before validating your JUnit reports with Trunk, make sure Vitest can properly execute your tests: +Before validating your JUnit reports with Trunk, ensure Vitest can properly execute your tests: ```bash # Run tests with detailed output to catch configuration issues @@ -95,12 +95,15 @@ vitest run --reporter=json | jq '.testResults[].assertionResults' If you see test files listed as single entries rather than individual test cases, you likely have configuration issues that need to be resolved before proceeding. -You can validate your test reports using the [Trunk Analytics CLI](../../reference/cli-reference). If you don't have it installed already, you can install and run the `validate` command like this: +You can validate your test reports using the [Trunk CLI](/flaky-tests/uploader). If you don't have it installed already, you can install and run the `validate` command like this: #### **The Validate Command** - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -109,7 +112,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -118,7 +126,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -127,7 +140,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -136,7 +154,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ./trunk-analytics-cli validate --junit-paths "./junit.xml" ``` - + + + + **This will not upload anything to Trunk**. To improve detection accuracy, you should **address all errors and warnings** before proceeding to the next steps. @@ -147,7 +168,7 @@ Before modifying your CI jobs to automatically upload test results to Trunk, try You make an upload to Trunk using the following command: ```sh -curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/download/trunk-analytics-cli-x86_64-unknown-linux.tar.gz" | tar -xz && chmod +x trunk-analytics-cli +curl -fsSLO --retry 3 https://trunk.io/releases/trunk && chmod +x trunk ./trunk-analytics-cli upload --junit-paths "./junit.xml" \ --org-url-slug \ --token @@ -155,74 +176,10 @@ curl -fL --retry 3 "https://github.com/trunk-io/analytics-cli/releases/latest/do You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
### Next Step Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/frameworks/xctest.mdx b/flaky-tests/get-started/frameworks/xctest.mdx index 7b94ce3..4f8c9f3 100644 --- a/flaky-tests/get-started/frameworks/xctest.mdx +++ b/flaky-tests/get-started/frameworks/xctest.mdx @@ -2,18 +2,18 @@ title: "XCTest" description: "A guide for generating Trunk-compatible test reports for XCode and xcodebuild" --- -You can automatically [detect and manage flaky tests](../../detection/) in your XCTest projects by integrating with Trunk. This document explains how to configure XCTest to output XCResult reports that can be uploaded to Trunk for analysis. +You can automatically [detect and manage flaky tests](/flaky-tests/detection) in your XCTest projects by integrating with Trunk. This document explains how to configure XCTest to output XCResult reports that can be uploaded to Trunk for analysis. ### Checklist -By the end of this guide, you should achieve the following before proceeding to the [next steps](./xctest#next-step) to configure your CI provider. +By the end of this guide, you should achieve the following before proceeding to the [next steps](#next-step) to configure your CI provider. * [ ] Generate a compatible test report * [ ] Configure the report file path or glob * [ ] Disable retries for better detection accuracy * [ ] Test uploads locally -After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](../ci-providers/). +After correctly generating reports following the above steps, you'll be ready to move on to the next steps to [configure uploads in CI](/flaky-tests/get-started/ci-providers). ### Generating Reports @@ -36,7 +36,7 @@ The test reports will be written to the `./test-results.xcresult` directory when #### Disable Retries -You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](../../quarantining/) feature to stop flaky tests from failing your CI jobs. +You need to disable automatic retries if you previously enabled them. Retries compromise the accurate detection of flaky tests. You should disable retries for accurate detection and use the [Quarantining](/flaky-tests/quarantining) feature to stop flaky tests from failing your CI jobs. If you run tests in CI with [the `-retry-tests-on-failure` option](https://keith.github.io/xcode-man-pages/xcodebuild.1.html#retry-tests-on-failure), disable it for more accurate results. @@ -46,8 +46,11 @@ Before modifying your CI jobs to automatically upload test results to Trunk, try You make an upload to Trunk using the following command: - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -58,7 +61,12 @@ chmod +x trunk-analytics-cli --org-url-slug \ --token ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -69,7 +77,12 @@ chmod +x trunk-analytics-cli --org-url-slug \ --token ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -80,7 +93,12 @@ chmod +x trunk-analytics-cli --org-url-slug \ --token ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -91,78 +109,17 @@ chmod +x trunk-analytics-cli --org-url-slug \ --token ``` - + + + + You can find your Trunk organization slug and token in the settings or by following these [instructions](/flaky-tests/get-started/ci-providers/otherci#id-1.-store-a-trunk_token-secret-in-your-ci-system). After your upload, you can verify that Trunk has received and processed it successfully in the **Uploads** tab. Warnings will be displayed if the report has issues. - - - - +
### Next Step Configure your CI to upload test runs to Trunk. Find the guides for your CI framework below: - - - - - - - - - - - - - - +
Azure DevOps Pipelinesazure-devops-pipelinesazure.png
BitBucket Pipelinesbitbucket-pipelinesbitbucket.png
BuildKitebuildkitebuildkite.png
CircleCIcirclecicircle-ci.png
Drone CIdronecidrone.png
GitHub Actionsgithub-actionsgithub.png
Gitlabgitlabgitlab.png
Jenkinsjenkinsjenkins.png
Semaphoresemaphorecisemaphore.png
TeamCityhttps://github.com/trunk-io/docs/blob/main/flaky-tests/get-started/frameworks/broken-reference/README.mdteamcity.png
Travis CItraviscitravis.png
Other CI Providersotherciother.png
diff --git a/flaky-tests/get-started/multiple-repositories.mdx b/flaky-tests/get-started/multiple-repositories.mdx index a13af6f..7ca91a5 100644 --- a/flaky-tests/get-started/multiple-repositories.mdx +++ b/flaky-tests/get-started/multiple-repositories.mdx @@ -12,7 +12,7 @@ When the Trunk Analytics CLI uploads test results, it reads the git remote URL f * **Owner**: The organization or user (e.g., `your-company`) * **Name**: The repository name (e.g., `your-repo`) -These three components together uniquely identify the repository in Trunk. The API token determines which _organization_ the upload belongs to, but does not affect which _repository_ the results are associated with. +These three components together uniquely identify the repository in Trunk. The API token determines which *organization* the upload belongs to, but does not affect which *repository* the results are associated with. ### Using Trunk with Forks @@ -20,10 +20,10 @@ If you run tests from a fork, Trunk automatically keeps test results separate ba For example, if your company forks `metabase/metabase` to `your-company/metabase-fork`: -| Repository | Remote URL | Trunk Repo ID | -| ----------- | ----------------------------------------- | -------------------------- | -| Original | `github.com/metabase/metabase` | `metabase/metabase` | -| Your fork | `github.com/your-company/metabase-fork` | `your-company/metabase-fork` | +| Repository | Remote URL | Trunk Repo ID | +| ---------- | --------------------------------------- | ---------------------------- | +| Original | `github.com/metabase/metabase` | `metabase/metabase` | +| Your fork | `github.com/your-company/metabase-fork` | `your-company/metabase-fork` | You can use the same organization API token for both repositories. Trunk creates separate repo entries and keeps all test data isolated. @@ -73,7 +73,7 @@ export TRUNK_REPO_URL="https://github.com/your-company/your-repo.git" --token $TRUNK_API_TOKEN ``` -See the [Trunk Analytics CLI](/flaky-tests/reference/cli-reference) reference for the full list of override flags. +See the [Trunk Analytics CLI](https://github.com/trunk-io/docs/blob/main/uploader) reference for the full list of override flags. ### Monorepo with Multiple Test Suites @@ -109,13 +109,13 @@ This can happen if the same repository is uploaded with different URL formats (e 1. Standardize the remote URL format across all CI jobs. 2. Use `--repo-url` to set a consistent URL. -3. Contact [support@trunk.io](mailto:support@trunk.io) to merge duplicate repository entries. +3. Contact support@trunk.io to merge duplicate repository entries. ### FAQ -| Question | Answer | -| ---------------------------------------------- | ---------------------------------------------------------------------- | -| Can I use the same API token for multiple repos? | Yes. The token is org-scoped, not repo-scoped. | -| Will fork test results mix with upstream? | No. Repos are identified by remote URL, not by token. | -| Do I need separate tokens for forks? | No. The same token works for all repos in your organization. | -| Can I override the detected repository? | Yes. Use `--repo-url` or the `TRUNK_REPO_URL` environment variable. | +| Question | Answer | +| ------------------------------------------------ | ------------------------------------------------------------------- | +| Can I use the same API token for multiple repos? | Yes. The token is org-scoped, not repo-scoped. | +| Will fork test results mix with upstream? | No. Repos are identified by remote URL, not by token. | +| Do I need separate tokens for forks? | No. The same token works for all repos in your organization. | +| Can I override the detected repository? | Yes. Use `--repo-url` or the `TRUNK_REPO_URL` environment variable. | diff --git a/flaky-tests/get-started/test-collections.mdx b/flaky-tests/get-started/test-collections.mdx deleted file mode 100644 index 342a15d..0000000 --- a/flaky-tests/get-started/test-collections.mdx +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: "Test Collections" -description: "Organize your flaky tests into named collections to track and analyze specific subsets of your test suite." -hidden: true ---- -Test Collections let you group tests from any repository into named sets. Use collections to focus on a subset of your test suite, such as tests owned by a specific team, tests covering a critical service, or any grouping that matters to your workflow. - -Each collection has its own view of tests, uploads, and settings, separate from the full test suite view. - -## Create a collection - -Only organization admins can create collections. - -1. Navigate to **Flaky Tests** > **Collections** in the Trunk web app. -2. Click **Create Collection**. -3. Enter a **Name** and optional **Description**. -4. Click **Create collection**. - -After creation, you land on the collection detail page. The **Tests** and **Uploads** tabs are disabled until you upload test results to the collection. - -## Upload tests to a collection - -To populate a collection with test data, include the collection's short ID in your uploader configuration. The collection short ID appears in the URL when viewing the collection: - -``` -https://app.trunk.io//flaky-tests/collections/ -``` - -Pass the short ID when uploading results using the Trunk CLI: - -```bash -trunk flakytests upload --collection ... -``` - -See the [Uploader reference](../reference/cli-reference) for full upload options. - -## View collection tests and uploads - -Once tests are uploaded to a collection, the **Tests** and **Uploads** tabs become active on the collection detail page. - -* **Tests** tab: Shows all tests associated with this collection, with their flaky status, failure rates, and labels. -* **Uploads** tab: Shows the history of test uploads sent to this collection. -* **Overview** tab: Shows setup instructions and the upload configuration for this collection. -* **Tests** tab: Shows all tests associated with this collection, with their flaky status, failure rates, and labels. -* **Uploads** tab: Shows the history of test uploads sent to this collection. - -## Edit a collection - -Only organization admins can edit collection settings. - -1. Navigate to the collection detail page. -2. Click the **Settings** tab. -3. Update the **Name** or **Description**. -4. Click **Save changes**. - -## Delete a collection - -Only organization admins can delete collections. - -1. Navigate to the collection's **Settings** tab. -2. Click **Delete collection**. -3. Confirm deletion in the dialog. - -Deleting a collection removes it from the **Collections** list. Test data uploaded to the collection is not deleted from your overall test suite. - -## Permissions - -| Action | Admin | Member | -| ------------------------ | ----- | ------ | -| View collections | Yes | Yes | -| Create collection | Yes | No | -| Edit collection settings | Yes | No | -| Delete collection | Yes | No | - -Members can browse existing collections and view tests and uploads, but cannot create, edit, or delete collections. diff --git a/flaky-tests/management/github-pull-request-comments.mdx b/flaky-tests/github-pull-request-comments.mdx similarity index 55% rename from flaky-tests/management/github-pull-request-comments.mdx rename to flaky-tests/github-pull-request-comments.mdx index 600d2f6..e63c96f 100644 --- a/flaky-tests/management/github-pull-request-comments.mdx +++ b/flaky-tests/github-pull-request-comments.mdx @@ -6,45 +6,39 @@ Flaky Tests can post comments on GitHub pull requests that summarize test result **Note:** Flaky Tests will only post a comment when there are failing tests. - - - - +
Each GitHub comment includes a summary report showing all tests that passed, failed, flaked, were skipped, or were quarantined on the PR. - - - - +
Each test case includes the full stack trace when expanded, and the job run link takes you to the complete CI logs. ## Configuration -If you have the [Trunk GitHub App installed](../../setup-and-administration/github-app-permissions) and are [uploading JUnit XML](../get-started/frameworks/) test results on pull requests, expect to start seeing comments on your Pull Requests soon. If you prefer not to use the Trunk GitHub App, you can still set up comments on your Pull Requests by providing Trunk with a GitHub access token. +If you have the [Trunk GitHub App installed](/setup-and-administration/github-app-permissions) and are [uploading JUnit XML](/flaky-tests/get-started/frameworks) test results on pull requests, expect to start seeing comments on your Pull Requests soon. If you prefer not to use the Trunk GitHub App, you can still set up comments on your Pull Requests by providing Trunk with a GitHub access token. +
+Without Trunk GitHub App - It's recommended that the Trunk GitHub App be used to manage GitHub comments. If you need to generate comments without the Trunk GitHub app, you can do so with a service account and an API token. 1. Create a dedicated GitHub SVC account (Service Account) with access to the repositories in your GitHub Organization that Flaky Tests will comment on e.g., `trunk-analytics-user`. -2. On [github.com](https://github.com/), for `trunk-analytics-user` (or whichever user you wish to use), generate a [_Personal access token_](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens) by navigating to **Settings** > **Developer settings** > **Personal access token** > **Fine-grained tokens** > **Generate new token**. +2. On [github.com](https://github.com/), for `trunk-analytics-user` (or whichever user you wish to use), generate a [*Personal access token*](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens) by navigating to **Settings** > **Developer settings** > **Personal access token** > **Fine-grained tokens** > **Generate new token**. 3. Name the new token something memorable. ex: `trunk-flaky-tests-token`. 4. The expiry time is up to you - however long you wish to try out Flaky Tests comments/how often you are willing to rotate the token. For a longer-term solution, consider installing the Trunk GitHub App. -5. The resource owner should be the GitHub Organization or user that owns the appropriate repositories. [See note on GitHub Org Ownership settings.](./github-pull-request-comments#github-org-ownership) +5. The resource owner should be the GitHub Organization or user that owns the appropriate repositories. [See note on GitHub Org Ownership settings.](#github-org-ownership) 6. Select the repositories you wish to enable comments on. 7. **Permissions** - you must enable **Issues (Read and write)** and **Pull requests (Read and write)**. Note: It is expected that metadata permissions automatically change. -8. If everything looks good, scroll down to double check that your Overview for permissions looks something like the image below. If so, create the token. +8. If everything looks good, scroll down to double check that your Overview for permissions looks something like the image below. If so, create the token. - - ![](/assets/Screenshot_2024-06-12_at_9.52.28_AM.png) - -9. Once the token is generated, go back to the Trunk App ([app.trunk.io](https://app.trunk.io/login?intent=flaky%20tests)) > click on your profile > **Settings** > **Manage** (under _Organization_) > **Organization GitHub Token** and enter the copied token into the text field, then finally press **Submit**. +
+9. Once the token is generated, go back to the Trunk App ([app.trunk.io](https://app.trunk.io/login?intent=flaky%20tests)) > click on your profile > **Settings** > **Manage** (under *Organization*) > **Organization GitHub Token** and enter the copied token into the text field, then finally press **Submit**. You should see comments posted by your service account on your next PR. -
+ +
## Disable commenting @@ -54,8 +48,8 @@ Pull Request comments are enabled by default. If you wish to disable the comment #### **GitHub Organization ownership** -If you wish to set the resource owner to be a GitHub Organization, you should double check that this is allowed by navigating to your **GitHub Organization** > **Settings** > **Personal access tokens** > **Settings**. Make sure under "_Fine-grained personal access tokens_", you have _"Allow access via fine-grained personal access tokens"_ selected. +If you wish to set the resource owner to be a GitHub Organization, you should double check that this is allowed by navigating to your **GitHub Organization** > **Settings** > **Personal access tokens** > **Settings**. Make sure under "*Fine-grained personal access tokens*", you have *"Allow access via fine-grained personal access tokens"* selected. -Once the token is created, the Organization admin may need to approve the request for the token. This can be done by going to **GitHub Organization** > **Settings** > **Personal access tokens** > **Pending requests**. To confirm that the token was set, you should be able to see it under **Active tokens**. +Once the token is created, the Organization admin may need to approve the request for the token. This can be done by going to **Github Organization** > **Settings** > **Personal access tokens** > **Pending requests**. To confirm that the token was set, you should be able to see it under **Active tokens**. At any point, feel free to reach out to our team [through Slack](https://slack.trunk.io). diff --git a/flaky-tests/detection/infrastructure-failure-protection.mdx b/flaky-tests/infrastructure-failure-protection.mdx similarity index 78% rename from flaky-tests/detection/infrastructure-failure-protection.mdx rename to flaky-tests/infrastructure-failure-protection.mdx index 8bd4364..5b3bde9 100644 --- a/flaky-tests/detection/infrastructure-failure-protection.mdx +++ b/flaky-tests/infrastructure-failure-protection.mdx @@ -1,18 +1,16 @@ --- title: "Infrastructure Failure Protection" -description: "Prevent false Trunk Flaky Tests detections during CI outages and infrastructure failures." +description: "Prevent false flaky test detections during CI outages and infrastructure failures." --- When infrastructure issues like database outages, network problems, or CI runner failures cause a large number of tests to fail simultaneously, retrying those tests can trigger mass false flaky detections. Infrastructure Failure Protection identifies these scenarios and excludes them from flakiness detection. - -![Configuration for Infrastructure Failure Protection and Failure Threshold](/assets/image_(1).png) - +
### How it works -Trunk monitors the failure rate of each test upload. If the percentage of failing tests exceeds your configured threshold, that upload is flagged as an infrastructure failure and excluded from Trunk Flaky Tests detection. +Trunk monitors the failure rate of each test upload. If the percentage of failing tests exceeds your configured threshold, that upload is flagged as an infrastructure failure and excluded from flaky test detection. -For example, if your threshold is set to 80% and a CI run has 85% of tests failing (this could be due to a database being unavailable or similar infrastructure issue, etc) that entire run will be excluded from Trunk Flaky Tests detection. This prevents tests from being incorrectly marked as flaky when they're retried and pass. +For example, if your threshold is set to 80% and a CI run has 85% of tests failing (this could be due to a database being unavailable or similar infrastructure issue, etc) that entire run will be excluded from flaky test detection. This prevents tests from being incorrectly marked as flaky when they're retried and pass. Uploads excluded due to infrastructure failure protection will appear in the **Uploads** tab with the status **"Upload Skipped Due to Infrastructure Error."** @@ -59,6 +57,6 @@ If you're using test quarantine, this feature is especially important to prevent ### Next steps -* Learn more about how Trunk [detects flaky tests](./index) +* Learn more about how Trunk [detects flaky tests](/flaky-tests/detection) * View excluded uploads in the Uploads tab -* Configure [test quarantine](../quarantining/#enable-quarantining) to automatically skip flaky tests +* Configure [test quarantine](/flaky-tests/quarantining#enable-quarantining) to automatically skip flaky tests diff --git a/flaky-tests/management/index.mdx b/flaky-tests/management/index.mdx deleted file mode 100644 index 6abf6d0..0000000 --- a/flaky-tests/management/index.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: "Flaky test management" -description: "Organize, triage, and coordinate follow-up for detected flaky tests." ---- - - - - - - - - - - - diff --git a/flaky-tests/management/test-labels.mdx b/flaky-tests/management/test-labels.mdx deleted file mode 100644 index 5d7598e..0000000 --- a/flaky-tests/management/test-labels.mdx +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "Test Labels" -description: "Organize and categorize test cases with organization-scoped labels." ---- -Test labels are organization-scoped tags you can apply to individual test cases to organize, filter, and categorize your test suite. Labels are applied manually today; see [Automatic labeling from monitors](#automatic-labeling-from-monitors) for what's coming. - - -![Labels applied to a test on details page](/assets/test-details-labels.png) - - -### Manage labels - -Labels are created, edited, and deleted at **Settings > Organization > Test Labels**. Each label has a name, an optional description, and a color used for its chip in the UI. The settings page also shows how many test cases each label is currently applied to. - - -Deleting a label removes it from every test case it's applied to; this cannot be undone. - - - -![Settings page to manage test labels](/assets/test-labels-settings.png) - - -### Apply and remove labels on a test case - -You apply and remove labels from a test case using the label picker on the test case detail page. The picker lets you search existing labels, toggle them on or off, and create a new label inline if one doesn't already exist. Each assignment records who applied the label and when. - - -![Label picker on test details page](/assets/test-details-label-picker.png) - - -### Filter tests by label - -On the tests list, you can filter the table down to test cases that have a particular label applied. This makes labels useful for slicing the view by the categories your team cares about. - - -![Filter tests to those that have specified label applied](/assets/tests-list-filtered-by-label.png) - - -### Automatic labeling from monitors - - -**Coming soon.** Monitors will be able to automatically apply and remove labels on test cases based on test behavior. More details will be published when this is available. - - -### Related - -* [Managing detected flaky tests](./managing-detected-flaky-tests) — a step-by-step process for handling detected flaky tests -* [Flake Detection](../detection/) — monitors that classify tests as flaky or broken diff --git a/flaky-tests/management/ticketing/jira-integration.mdx b/flaky-tests/management/ticketing/jira-integration.mdx deleted file mode 100644 index d65f685..0000000 --- a/flaky-tests/management/ticketing/jira-integration.mdx +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: "Jira integration" -description: "Triage your flaky tests faster by creating automatically assigned and labeled tickets with the Jira integration" ---- -When Trunk Flaky Tests [detects a flaky test](../../detection/index), you can create an automatically generated Jira ticket for your team to pick up and fix the test. - -Webhook payloads will also contain ticket information when a Jira ticket is created with the integration or when [existing tickets are linked](./jira-integration#link-existing-tickets-to-tests). - -### Connecting to Jira - - - - - - -To connect a Jira Cloud project, navigate to **Settings** -> **Repositories** -> **Ticketing Integration** and select **Jira** as your Ticketing System. - -Then complete the form and click Connect to Jira Cloud with the following information. - -| Field Name | Description | Examples | -|---|---|---| -| Jira URL | The URL to your Jira Cloud project. | `https://trunk-io.atlassian.net` | -| Project Key | The project key for your Jira project. | `KAN` | -| Email | The email associated with your Jira API token. | `johndoe@example.com` | -| [Jira API token](#api-token-permissions) | [Create your Jira API token here.](https://id.atlassian.com/manage-profile/security/api-tokens) | `ATATT*****19FNY5Q` | -| Default label(s) for new tickets | Labels applied to new Jira tickets created through Trunk Flaky Tests | `flaky-test, debt` | - - -Jira labels cannot contain spaces — the Trunk UI enforces this restriction in the labels field. - - -After connecting to Jira, you can specify a default issue type for new tickets and a default assignee for new tickets. - -#### API Token permissions - -Your Jira user account must have the following project permissions to create a Jira API token that allows Trunk to read, create, and assign tickets automatically: - -* _Create issues_ -* _Assign issues_ OR _Browse users and groups_ (global permission) -* _Browse projects_ - * If issue-level security is configured, issue-level security permissions must be granted to read issues. - -You need to create an API token with the following scopes: - -* Required scopes (classic) - * `read:jira-work` - * `write:jira-work` - * `read:jira-user` -* Required scopes (granular): - * `read:issue:jira` - * `read:issue-meta:jira` - * `read:issue-security-level:jira` - * `read:issue.vote:jira` - * `read:issue.changelog:jira` - * `read:avatar:jira` - * `read:status:jira` - * `read:user:jira` - * `read:field-configuration:jira` - * `read:application-role:jira` - * `read:group:jira` - * `read:issue-type:jira` - * `read:project:jira` - * `read:project.property:jira` - * `read:issue-type-hierarchy:jira` - * `read:project-category:jira` - * `read:project-version:jira` - * `read:project.component:jira` - * `read:permission:jira` - * `write:issue:jira` - * `write:comment:jira` - * `write:comment.property:jira` - * `write:attachment:jira` - - -Jira tokens cannot last longer than 365 days. Once the token expires, you will need to generate a new API token. - - -### Create a new ticket - -You can create a new ticket for any test listed in Trunk Flaky Tests. - -There are 2 ways to create a new ticket in the Flaky Tests dashboard: - -* Click on the options menu for any test case on the repo overview dashboard - - - - - - -* Use the Create ticket button in the top left corner of the [test case details](../../dashboard#test-case-details) page. - -Before you create the ticket, you will have a preview of the title and description. - - - - - - -#### Create with Jira - -If you are connected to Jira, you can click the **Create Jira Ticket** button at the end of the modal, which will automatically create a ticket with the configured labels and assignees. - -#### Link existing tickets to tests - -If you already have a ticket in Jira that you want to link to a test in the dashboard, you can use the [Link Ticket to Test Case API](../../reference/api-reference#post-flaky-tests-link-ticket-to-test-case). - -### Custom Fields - -Some Jira projects require additional fields beyond the standard fields (summary, description, and issue type) when creating tickets. Trunk supports configuring default values for any Jira field on a per-issue-type basis. Users can also override those defaults when creating a ticket. - -#### Configuring custom fields - -In the Jira integration settings (**Settings** -> **Repositories** -> **Ticketing Integration**), select an issue type. Trunk fetches all available fields for that issue type from the Jira API and displays inputs for each supported field. - -For each field, you can: - -* Set a default value that pre-fills the field when a ticket is created -* Check **Require user to fill at creation** to leave the field blank in settings and prompt the user to fill it in the create ticket modal instead - -Trunk automatically detects required fields (as marked by your Jira project) and shows a validation error if no default is set and the field is not marked for user input. - -Fields are rendered using an appropriate input type based on the Jira field schema: - -| Jira schema | Input type | -| --- | --- | -| `string` | Text input | -| `number` | Number input | -| `option` | Searchable dropdown | -| `user` | User picker dropdown | -| `array` of `string` | Chip input (comma or Enter to add values) | -| `text` / `string` with text hint | Text input | - - -The `reporter` field is treated as optional even when Jira marks it as required. Jira automatically assigns the API token owner as reporter if the field is not specified. - - -The following fields are always excluded from the custom field configuration because they are managed elsewhere in the ticket creation flow: - -`summary`, `description`, `project`, `issuetype`, `attachment`, `issuelinks`, `parent` - -#### Overriding defaults at ticket creation - -When creating a ticket from the Flaky Tests dashboard, the create ticket modal shows inputs for any field that has a configured default or is marked for user input. Users can edit pre-filled defaults before submitting. diff --git a/flaky-tests/management/ticketing/linear-integration.mdx b/flaky-tests/management/ticketing/linear-integration.mdx deleted file mode 100644 index 6a3b769..0000000 --- a/flaky-tests/management/ticketing/linear-integration.mdx +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: "Linear integration" -description: "Triage your flaky tests faster by creating automatically assigned and labeled tickets with the Linear integration" ---- -When Trunk Flaky Tests [detects a flaky test](../../detection/index), you can create an automatically generated Linear ticket for your team to pick up and fix the test. - -Webhook payloads will also contain ticket information when a Jira ticket is created with the integration or when [existing tickets are linked](./linear-integration#link-existing-tickets-to-tests). - -### Connecting to Linear - - - - - - -To connect a Linear project: - -1. Navigate to **Settings** > **Repositories** > **Ticketing Integration.** -2. Select **Linear** as your Ticketing System. -3. Add a [Linear API key](./linear-integration#api-token-permissions) -4. Select a Team and **Connect to Linear**. - -After connecting to Linear, you can specify a default project and a default assignee for new tickets. - -#### API Key permissions - -The following project permissions must be granted to your Linear API key so Trunk can read, create, and assign tickets automatically: - -* _Read_ -* _Create issues_ - -Selecting _Full Access_ will also grant the required permissions. - -### Create a new ticket - -You can create a new ticket for any test listed in Flaky Tests. - -There are 2 ways to create a new ticket in the Flaky Tests dashboard: - -* Click on the options menu for any test case on the repo overview dashboard - - - - - - -* Use the Create ticket button in the top left corner of the [test case details](../../dashboard#test-case-details) page. - -Before you create the ticket, you get a preview of the title and description. - - - - - - -#### Create with Linear - -If you are connected to Linear, you can click the **Create Linear Ticket** button at the end of the modal to automatically create a ticket with the configured team and assignees. - -Note: You can use [Flaky Tests webhooks](../../webhooks/linear-integration) to automate ticket creation, or if you need more control over how tickets are created in Linear. This integration is not required when using webhooks. - -#### Link existing tickets to tests - -If you already have a ticket in Linear that you want to link to a test in the dashboard, you can use the [Link Ticket to Test Case API](../../reference/api-reference#post-flaky-tests-link-ticket-to-test-case). diff --git a/flaky-tests/management/ticketing/other-ticketing-platforms.mdx b/flaky-tests/management/ticketing/other-ticketing-platforms.mdx deleted file mode 100644 index 71a01d5..0000000 --- a/flaky-tests/management/ticketing/other-ticketing-platforms.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "Other ticketing platforms" -description: "Triage your flaky tests faster by manually creating tickets from generated markdown" ---- -If you have not set up an integration, Trunk Flaky Tests can still generate a ticket title and description so you can copy and paste the details into your project management software. - -### Create a new ticket - -You can create a new ticket for any test listed in Trunk Flaky Tests. - -There are 2 ways to create a new ticket in the Flaky Tests dashboard: - -* Click on the options menu for any test case on the repo overview dashboard - - - - - - -* Use the Create ticket button in the top left corner of the [test case details](../../dashboard#test-case-details) page. - -Before you create the ticket, you will have a preview of the title and description. - - - - - - -Now you can copy and paste the ticket title and description into your project management or ticketing platform. diff --git a/flaky-tests/management/managing-detected-flaky-tests.mdx b/flaky-tests/managing-detected-flaky-tests.mdx similarity index 67% rename from flaky-tests/management/managing-detected-flaky-tests.mdx rename to flaky-tests/managing-detected-flaky-tests.mdx index 0c7edf6..dc17529 100644 --- a/flaky-tests/management/managing-detected-flaky-tests.mdx +++ b/flaky-tests/managing-detected-flaky-tests.mdx @@ -2,48 +2,36 @@ title: "Managing detected flaky tests" description: "A step-by-step guide for building an automated process to manage detected flaky tests." --- -It is important to have a follow-up process in place to manage detected flaky tests. A good process makes sure flaky tests do not slow down CI for your development team and prevents flakes from accumulating over time. +It is important to have a follow-up process in place to manage detected flaky tests. A good process ensures that flaky tests do not slow down CI for your development team and prevents flakes from accumulating over time. This guide walks through Trunk's recommended best practices for building a process around detected flaky tests in your organization. -Flaky tests will be [automatically detected](../detection/) by Trunk after you: +Flaky tests will be [automatically detected](/flaky-tests/detection) by Trunk after you: -* [Set up your test framework](../get-started/frameworks/) to produce test reports -* [Integrated with your CI provider](../get-started/ci-providers/) to upload those reports on CI runs. +* [Set up your test framework](/flaky-tests/get-started/frameworks) to produce test reports +* [Integrated with your CI provider](/flaky-tests/get-started/ci-providers) to upload those reports on CI runs. Go through these guides first to start detecting flaky tests. -### Step 1: Organize tests with labels - -Test labels let you categorize and group related flaky tests within Trunk. Labels are useful for tracking tests by team, component, root cause, or any grouping that fits your workflow. - -To assign or remove labels on a test: - -1. Open the test detail page in the Trunk app. -2. In the **Metadata** section at the top of the page, click the **Labels** field. -3. Select labels from the picker or type to create new ones. Remove labels by clicking the **x** on any applied label. - -Labels you apply are visible on the test detail page. Use them to filter and prioritize your backlog of flaky tests. - -### Step 2: Create tickets for flaky tests +### Step 1: Create tickets for flaky tests Creating Linear or Jira tickets for detected flaky tests helps to integrate flaky test fixes into your existing workflows. -* Start by [connecting to Linear or Jira](./ticketing/). You can also set default labels or teams for flaky test tickets. +* Start by [connecting to Linear or Jira](/flaky-tests/ticketing-integrations). You can also set default labels or teams for flaky test tickets. * Once connected, you can click **Create Ticket** on a test detail page in Trunk. Trunk will create the ticket with context, including the test ID, flake rate, and the last failure stack trace and reason. * The ticket status and assignee will be visible on the test details page in Trunk, and these details will stay in sync with changes to the ticket. -### Step 3: Broadcast flakes +### Step 2: Broadcast flakes It is important to keep the team informed on all status changes for flaky tests . This allows for fast follow-up when a test is marked as flaky. -* Use the [built-in Slack or Microsoft Teams webhook integrations](../webhooks/) to transform webhook payloads into messages. +* Use the [built-in Slack or Microsoft Teams webhook integrations](/flaky-tests/webhooks) to transform webhook payloads into messages. * Trunk's built-in templates help you get started and test the connection. * You can then customize the transformation to update the message format and content, including @-mentioning test owners so they can follow up right away. -### Step 4: Mute monitors +### Step 3: Mute monitors If a flaky test has a known issue or a fix in progress, you can mute the monitor that flagged it. A muted monitor continues to run and record detections, but it does not contribute to the test's flaky status until the mute expires or is manually removed. @@ -53,14 +41,14 @@ To mute a monitor: 2. Find the monitor that flagged the test. 3. Click **Mute** and select a duration. -| Duration | Description | -|---|---| -| 1 hour | Quick suppression for transient issues | -| 4 hours | Short-term suppression | -| 24 hours | Suppress for a full day | -| 7 days | Suppress for a week | -| 30 days | Suppress for a month | -| Forever | Mute indefinitely until manually unmuted | +| Duration | Description | +| -------- | ---------------------------------------- | +| 1 hour | Quick suppression for transient issues | +| 4 hours | Short-term suppression | +| 24 hours | Suppress for a full day | +| 7 days | Suppress for a week | +| 30 days | Suppress for a month | +| Forever | Mute indefinitely until manually unmuted | The **Forever** option mutes the monitor with no expiration. The monitor stays muted until you explicitly unmute it from the test case detail page. This is useful when a test has a known flake that your team has accepted, or when a fix is planned but not yet scheduled. @@ -74,13 +62,13 @@ You can unmute a monitor at any time from the test case detail page, regardless Muting suppresses the monitor's contribution to the test's status. If the muted monitor was the only active monitor for a test, the test transitions from flaky to healthy for the duration of the mute. -### Step 5: Flag flaky tests +### Step 4: Flag flaky tests -If automated detection hasn't caught a test you know is flaky, you can manually [flag it as flaky](../detection/flag-as-flaky) from the test detail page. Flagged tests are treated as flaky regardless of automated detection state, and the flag can be removed at any time. +If automated detection hasn't caught a test you know is flaky, you can manually [flag it as flaky](https://github.com/trunk-io/docs/blob/main/flaky-tests/detection#flag-as-flaky) from the test detail page. Flagged tests are treated as flaky regardless of automated detection state, and the flag can be removed at any time. -### Step 6: Quarantine flaky tests +### Step 5: Quarantine flaky tests -Flaky tests slow down CI and have a high negative impact on merge queue throughput. You can minimize or eliminate this CI slowdown by [quarantining](../quarantining/) flaky tests at runtime. +Flaky tests slow down CI and have a high negative impact on merge queue throughput. You can minimize or eliminate this CI slowdown by [quarantining](/flaky-tests/quarantining) flaky tests at runtime. * Enable quarantining for your repo at **Settings > your repo > Enable Test Quarantining**. * Manually quarantine flaky tests by going to the test details page, clicking **Quarantine**, and setting the status to **Always**. Leave a comment detailing why you are quarantining this test to keep your team informed. The comment and quarantine status change will appear in the timeline on the test details page. @@ -88,22 +76,22 @@ Flaky tests slow down CI and have a high negative impact on merge queue throughp After quarantining a test, Trunk will ignore the test result (pass/fail) on CI runs, preventing this flaky test from failing CI. -**Broken tests are not quarantine candidates.** Only tests with a **Flaky** status are eligible for quarantine. If a test is marked as Broken (consistently failing at a high rate), it represents a real regression that should be investigated and fixed rather than hidden. See [detection](../detection/) to understand the difference between flaky and broken tests. +**Broken tests are not quarantine candidates.** Only tests with a **Flaky** status are eligible for quarantine. If a test is marked as Broken (consistently failing at a high rate), it represents a real regression that should be investigated and fixed rather than hidden. See [detection](/flaky-tests/detection) to understand the difference between flaky and broken tests. -### Step 7: Automation +### Step 6: Automation -Trunk has [webhooks](../webhooks/) and [Flaky Tests APIs](../reference/api-reference) that can be used to build custom workflows around ticket creation, linking existing tickets to Trunk, sending notifications, and dealing with quarantined tests. +Trunk has [webhooks](/flaky-tests/webhooks) and [Flaky Tests APIs](/flaky-tests/flaky-tests) that can be used to build custom workflows around ticket creation, linking existing tickets to Trunk, sending notifications, and dealing with quarantined tests. -There is also built-in automation support that handles tasks such as assigning flaky test ownership, ticket creation, and quarantining (so that unblocking CI is not a manual process). +There is also built-in automation support that handles tasks such as assigning flaky test ownership, ticket creation, and quarantining (so that unblocking CI is not a manual process). -* [`CODEOWNERS` files](../dashboard#code-owners) can automatically assign ownership of test flakes. -* Tickets can be [auto-created using webhooks](../webhooks/) as triggers, similar to Slack or MS Teams notifications. +* [`CODEOWNERS` files](/flaky-tests/dashboard#code-owners) can automatically assign ownership of test flakes. +* Tickets can be [auto-created using webhooks](/flaky-tests/webhooks) as triggers, similar to Slack or MS Teams notifications. * Automatically quarantine flaky tests by enabling **Settings > your repo > Auto-Quarantine Flaky Tests**. You can customize how flaky and quarantined tests are handled to suit your team and organization best. -### Step 8: Review existing flakes and broken tests +### Step 7: Review existing flakes and broken tests It is important to track and triage existing flaky and broken tests over time. Trunk collects historical failure logs and stack traces, providing developers as much information as possible for debugging high-impact test failures. diff --git a/flaky-tests/overview.mdx b/flaky-tests/overview.mdx index 75a8b8e..2df8153 100644 --- a/flaky-tests/overview.mdx +++ b/flaky-tests/overview.mdx @@ -4,28 +4,19 @@ description: "Detect, quarantine, and eliminates flaky tests from your codebase" --- Trunk Flaky Tests lets your teams detect, track, quarantine, and fix **flaky tests** in your codebase. Trunk can also identify **broken tests** — tests failing consistently at a high rate that indicate real regressions needing immediate fixes, not just quarantining. Flaky Tests is language, environment, and framework-agnostic. -Let's explore how Trunk Flaky Tests' features help you tackle flaky tests. If you can't wait to try Trunk, follow our [getting started guide](/flaky-tests/get-started). +Let's explore how Trunk Flaky Tests' features help you tackle Flaky Tests. If you can't wait to try Trunk, follow our [getting started guide](/flaky-tests/get-started). You can see an overview of Trunk Flaky Tests in this video. - - - + +Watch the walkthrough. + ### Understand the impact Your dashboard shows a comprehensive overview of your test suite's health at a glance. It lets you see important impact metrics like the number of flaky tests, PRs impacted by flaky tests, and PRs rescued by quarantining flaky tests. - - - - +

Key repo metrics

To learn more, [see how Flaky Tests does detection](/flaky-tests/detection). @@ -33,75 +24,48 @@ To learn more, [see how Flaky Tests does detection](/flaky-tests/detection). You can find a list of known flaky tests complete with important information like their impact on PRs and if someone's working on a fix. For more granularity, you can also inspect individual tests for their execution history, results, and status changes. - - - - +

List of flaky tests

To learn more, [see how Flaky Tests does detection](/flaky-tests/detection). ### Stay in sync - -PR comment linking to PR Test Summary -PR comment linking to PR Test Summary - +
PR comment linking to PR Test Summary

PR comment linking to PR Test Summary

-Flaky Tests helps everyone in your team stay in sync about flaky test failures with [GitHub PR comments](./management/github-pull-request-comments), so no time is wasted debugging failures from known flaky tests. +Flaky Tests helps everyone in your team stay in sync about flaky test failures with [GitHub PR comments](/flaky-tests/github-pull-request-comments), so no time is wasted debugging failures from known flaky tests. -To learn more, [see our docs about GitHub Comments and Test Summaries](./management/github-pull-request-comments). +To learn more, [see our docs about GitHub Comments and Test Summaries](/flaky-tests/github-pull-request-comments). ### Investigate flaky failures Flaky Tests creates detailed reports for individual test failures so you can debug faster. - - - - +

Summary of unique failure types

Test details will summarize all the unique ways a flaky test fails and let you flip through the relevant stack traces in the Trunk app. - - - - +

Full failure stack traces

-To learn more, [see our docs about the detection of flaky tests](./detection/). +To learn more, [see our docs about the detection of flaky tests](/flaky-tests/detection). ### **Quarantine flaky failures** Flaky Tests allows you to [quarantine](/flaky-tests/quarantining) detected flaky tests, stopping them from failing your CI jobs. This prevents failed flaky tests from impacting your CI pipelines, so you won’t have to disable tests and won’t be slowed down by flaky CI jobs. - - - - +

flaky tests can be quarantined automatically or manually

-To learn more, [see our docs about quarantining tests](./quarantining/). +To learn more, [see our docs about quarantining tests](/flaky-tests/quarantining). ### Manage tickets - - - - +

Creating a Jira ticket for a flaky test

Trunk enables the automation of quickly creating and assigning tickets through integrations with platforms like Jira and Linear, as well as custom workflows with webhooks. The status of tickets created will be reflected in real-time in the Trunk web app. This helps you track efforts to fix high-impact, flaky tests. -To learn more, [learn about our ticketing integrations](./management/ticketing/jira-integration). +To learn more, [learn about our ticketing integrations](/flaky-tests/ticketing-integrations/jira-integration). ### **Next steps** - - - - - -Start finding flaky tests today by [signing up for Trunk](https://app.trunk.io/signup?intent=flaky%20tests) or reading our [Getting Started guides](./get-started/). +
Getting startedget-started
Create an accounthttps://app.trunk.io/signup?intent=flaky+tests
+ +Start finding flaky tests today by [signing up for Trunk](https://app.trunk.io/signup?intent=flaky%20tests) or reading our [Getting Started guides](/flaky-tests/get-started). diff --git a/flaky-tests/quarantining/quarantine-service-availability.mdx b/flaky-tests/quarantine-service-availability.mdx similarity index 74% rename from flaky-tests/quarantining/quarantine-service-availability.mdx rename to flaky-tests/quarantine-service-availability.mdx index 25212b3..7fca6eb 100644 --- a/flaky-tests/quarantining/quarantine-service-availability.mdx +++ b/flaky-tests/quarantine-service-availability.mdx @@ -1,10 +1,10 @@ --- title: "Quarantine Service Availability" -description: "How Trunk Analytics CLI handles quarantine service outages without compromising your CI pipeline." +description: "Trunk Analytics CLI is designed to fail safe when our quarantine service is unavailable. Your CI pipeline's integrity is never compromised by Trunk outages." --- ### Service Availability and Graceful Degradation -[Trunk Analytics CLI](../reference/cli-reference) is designed to fail safe when our quarantine service is unavailable. Your CI pipeline's integrity is never compromised by Trunk outages. +[Trunk Analytics CLI](/flaky-tests/uploader) is designed to fail safe when our quarantine service is unavailable. Your CI pipeline's integrity is never compromised by Trunk outages. #### What happens if Trunk is unreachable? @@ -30,4 +30,4 @@ We prioritize avoiding false positives over convenience. If Trunk is down, we'd #### Caching behavior -Trunk Analytics CLI does not cache quarantine configuration locally. Each invocation requires a successful API call to apply quarantining. This means you are always operating on the freshest quarantine state rather than potentially stale data. +Trunk Analytics CLI does not cache quarantine configuration locally. Each invocation requires a successful API call to apply quarantining. This ensures you're always operating on the freshest quarantine state rather than potentially stale data. diff --git a/flaky-tests/quarantining/index.mdx b/flaky-tests/quarantining.mdx similarity index 57% rename from flaky-tests/quarantining/index.mdx rename to flaky-tests/quarantining.mdx index be0d027..7409337 100644 --- a/flaky-tests/quarantining/index.mdx +++ b/flaky-tests/quarantining.mdx @@ -2,9 +2,7 @@ title: "Quarantining" description: "Mitigate impact of known flaky tests by isolating them at run time" --- - -![](/assets/quarantined_test1.png) - +
**Quarantining** isolates known flaky tests to prevent them from blocking CI jobs while continuing to run and track their results. The system identifies flaky tests at runtime and overrides their exit codes when they fail, allowing your CI pipeline to pass without requiring code changes to disable problematic tests. @@ -16,7 +14,7 @@ description: "Mitigate impact of known flaky tests by isolating them at run time ### What does "Quarantined" mean? -A quarantined test continues running in CI and uploading results to Trunk Flaky Tests, but its failures won't block your pipeline. The [Trunk Analytics CLI](../reference/cli-reference) checks with Trunk's backend to determine if failed tests are quarantined, then overrides the exit code for those failures. When all failures in a CI job come from quarantined tests, the entire job passes. +A quarantined test continues running in CI and uploading results to Trunk Flaky Tests, but its failures won't block your pipeline. The [Trunk Analytics CLI](/flaky-tests/uploader) checks with Trunk's backend to determine if failed tests are quarantined, then overrides the exit code for those failures. When all failures in a CI job come from quarantined tests, the entire job passes. **Why this matters:** You maintain complete test coverage and historical data while preventing known problematic tests from disrupting your development cycle. @@ -25,9 +23,9 @@ A quarantined test continues running in CI and uploading results to Trunk Flaky Tests can be quarantined through two methods: 1. **Manual Quarantine** - You explicitly select specific tests using override settings -2. **Auto-Quarantine** (when enabled) - Tests already flagged by [Trunk's flaky detection](../detection/) are automatically quarantined +2. **Auto-Quarantine** (when enabled) - Tests already flagged by [Trunk's flaky detection](/flaky-tests/detection) are automatically quarantined -Tests are auto-quarantined only if detected as **flaky** or [flagged as flaky](../detection/flag-as-flaky) manually. Tests with a **Broken** status are not auto-quarantined — they represent real failures that should be investigated and fixed. For [manually quarantined tests](./index#overriding-individual-tests), all failures are quarantined regardless of test state. +Tests are auto-quarantined only if detected as **flaky** or [flagged as flaky](/flaky-tests/detection/flag-as-flaky) manually. Tests with a **Broken** status are not auto-quarantined — they represent real failures that should be investigated and fixed. For [manually quarantined tests](#overriding-individual-tests), all failures are quarantined regardless of test state. ### Enable quarantining @@ -36,7 +34,7 @@ Toggling the **Enable Test Quarantining** switch makes quarantining possible but A test failure will only be ignored by CI if the test is already manually quarantined, or if the test has previously been identified as flaky and the Auto-Quarantine option is enabled. -Actively quarantining tests will significantly change CI results, as failures from quarantined tests no longer cause builds to fail. [Learn more about the effects of quarantining](./index#whats-affected). +Actively quarantining tests will significantly change CI results, as failures from quarantined tests no longer cause builds to fail. [Learn more about the effects of quarantining](#whats-affected). With quarantining enabled, the Analytics Uploader will compare failed test cases against known flaky tests. If a test is known to be flaky, it will be quarantined. If all failed tests are quarantined, the exit code of the test command will be overridden to return 0 and the CI job will pass. @@ -45,35 +43,11 @@ With quarantining enabled, the Analytics Uploader will compare failed test cases To enable quarantining, navigate to **Settings** > **Repositories** > repository > **Flaky Tests** > toggle **on** **Enable Test Quarantining**. - -![](/assets/enable_test_quarantining.png) - +
Here's what each of these options does when enabled: -| Setting | Description | -|---|---| -| Enable Test Quarantining | This primary toggle activates the quarantining feature set, unlocking both manual override options and the ability to enable auto-quarantining. For any quarantining to work, the [necessary configurations](#updates-in-ci) must also be made in your CI pipeline. | -| Auto-Quarantine Flaky Tests | When enabled, any test already identified by Trunk as "flaky" will be automatically quarantined. This saves you from having to manually quarantine each flaky test as it's discovered. | - -#### Collection-level quarantining settings - -[Test Collections](../get-started/test-collections) have their own quarantining settings that override the repository-level settings for any uploads routed to that collection. This lets you apply different quarantining policies to different subsets of your test suite. - -To configure collection quarantining, navigate to **Flaky Tests** > **Collections** > _collection name_ > **Settings** > **Quarantining**. - -The same two toggles are available at the collection level: - -| Setting | Description | -|---|---| -| Enable Test Quarantining | Activates quarantining for this collection. When enabled, this setting overrides the repository-level quarantining setting for uploads that belong to this collection. | -| Auto-Quarantine Flaky Tests | Automatically quarantines any test in this collection that Trunk has identified as flaky. This option is only available when quarantining is enabled for the collection. | - - -Only organization admins can change collection quarantining settings. Members can view the settings page but cannot toggle the controls. - - -When you disable collection quarantining, auto-quarantine is also disabled automatically. Re-enabling quarantining for the collection does not restore auto-quarantine — you must turn it back on separately. +
SettingDescription
Enable Test QuarantiningThis primary toggle activates the quarantining feature set, unlocking both manual override options and the ability to enable auto-quarantining. For any quarantining to work, the necessary configurations must also be made in your CI pipeline.
Auto-Quarantine Flaky TestsWhen enabled, any test already identified by Trunk as "flaky" will be automatically quarantined. This saves you from having to manually quarantine each flaky test as it's discovered.
### **Quarantining with Sharded or Parallelized Tests** @@ -87,9 +61,9 @@ Wrap each command and specify its JUnit output path. Trunk captures the exit cod ```bash # run test 1 -./trunk-analytics-cli test --org-url-slug=[org] --token=[token] --junit-paths=test1_output/*.xml -- npm run test1 +trunk flakytests test --org-url-slug=[org] --token=[token] --junit-paths=test1_output/*.xml -- npm run test1 # run test 2 -./trunk-analytics-cli test --org-url-slug=[org] --token=[token] --junit-paths=test2_output/*.xml -- npm run test2 +trunk flakytests test --org-url-slug=[org] --token=[token] --junit-paths=test2_output/*.xml -- npm run test2 ``` **Option 2: Handling quarantining during upload** @@ -103,7 +77,7 @@ To handle build issues that occur outside test runs, use the --test-process-exit **Example** ```sh -./trunk-analytics-cli test --junit-paths "test_output.xml" \ +./trunk flakytests test --junit-paths "test_output.xml" \ --org-url-slug \ --token $TRUNK_API_TOKEN \ --junit-paths="**/results/*.xml" \ @@ -118,42 +92,46 @@ The CLI only recognizes tests defined in JUnit. If multiple test executions occu ### Updates in CI -If you're using the provided [GitHub Actions workflow](../get-started/ci-providers/) to upload test results to Flaky Tests, you can quarantine flaky tests by wrapping the test command or as a follow-up step. +If you're using the provided [GitHub Actions workflow](/flaky-tests/get-started/ci-providers) to upload test results to Flaky Test, you can quarantine flaky tests by wrapping the test command or as a follow-up step. -If you're using the Trunk Analytics CLI directly or other CI providers, check the instructions in the **Using The Trunk Analytics CLI Directly** tab. +If you're using the Trunk CLI directly or other CI providers, check the instructions in the **Using The Trunk CLI Directly** tab. + + Using the Trunk Analytics Uploader Action in your GitHub Actions Workflow files, may need modifications to your workflow files to support quarantining. If you upload your test results as a second step after you run your tests, **you need to add** `continue-on-error: true` **on your test step so your CI** job will continue even on failures. Here's an example file. -```yaml lines highlight={1,9} - name: Run Tests And Upload Results - on: - workflow_dispatch: - jobs: - upload-test-results: - runs-on: ubuntu-latest - timeout-minutes: 60 - steps: - - name: Run Tests - id: unit_tests - shell: bash - run: - continue-on-error: true - - - name: Upload test results - if: always() - uses: trunk-io/analytics-uploader@v1 - with: - junit-paths: - org-slug: my-trunk-org-slug - token: ${{ secrets.TRUNK_API_TOKEN }} + +```yaml +name: Run Tests And Upload Results +on: + workflow_dispatch: +jobs: + upload-test-results: + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - name: Run Tests + id: unit_tests + shell: bash + run: + continue-on-error: true + + - name: Upload test results + if: always() + uses: trunk-io/analytics-uploader@v1 + with: + junit-paths: + org-slug: my-trunk-org-slug + token: ${{ secrets.TRUNK_API_TOKEN }} ``` + If you want to run the test command and upload in a single step, the test command must be **run via the Analytics Uploader** through the `run: ` parameter. This will override the response code of the test command. Make sure to set `continue-on-error: false` so un-quarantined tests are blocking. @@ -178,16 +156,19 @@ jobs: token: ${{ secrets.TRUNK_API_TOKEN }} org-slug: my-trunk-org-slug ``` + - + + + **Using Flaky Tests as a separate step** -If you upload your test results as a second step after you run your tests, you need to ensure your test step **continues on errors** so the upload step that's run after can quarantine failed tests. When quarantining is enabled, the `trunk-analytics-cli upload` command will **return an error** if there are unquarantined failures and return a status code 0 if all tests are quarantined. +If you upload your test results as a second step after you run your tests, you need to ensure your test step **continues on errors** so the upload step that's run after can quarantine failed tests. When quarantining is enabled, the `flakytests upload` command will **return an error** if there are unquarantined failures and return a status code 0 if all tests are quarantined. ```bash || true # doesn't fail job on failure | - ./trunk-analytics-cli upload \ + ./trunk flakytests upload \ --org-url-slug $TRUNK_ORG_SLUG \ --token $TRUNK_API_TOKEN \ --junit-paths $JUNIT_PATH @@ -195,55 +176,39 @@ If you upload your test results as a second step after you run your tests, you n **Using Flaky Tests as a single step** -You can also wrap the test command with the Trunk Analytics CLI. When wrapping the command with the Trunk Analytics CLI, if there are unquarantined tests, the command will return an error. If there are no unquarantined tests, the command will return a status code 0. +You can also wrap the test command with the Trunk CLI. When wrapping the command with the Trunk CLI, if there are unquarantined tests, the command will return an error. If there are no unquarantined tests, the command will return a status code 0. ```bash -./trunk-analytics-cli test \ +./trunk flakytests test \ --org-url-slug \ --token $TRUNK_API_TOKEN \ --junit-paths $JUNIT_PATH \ --allow-empty-test-results \ ``` + + ### Overriding individual tests If you have tests that should never be quarantined or should always be quarantined regardless of their current health status, you can do this by overriding individual tests. - -![](/assets/qurantine-individual-tests.png) - - -You can set a quarantine override from two places: - -**From the Flaky Tests table** - -Right-click any row in the Flaky Tests table to open the context menu. Two quarantine actions are available: - -* **Quarantine test** / **Unquarantine test** — toggles the always-quarantine override for that test. -* **Never Quarantine test** / **Remove Never Quarantine** — toggles the never-quarantine override. When set, the test is never quarantined, even if auto-quarantining is enabled for the repo. - -All four options require admin permissions. Non-admin users see them as disabled with the tooltip "Only admins can set manual quarantine." +

overriding

-**From the test details page** +You can manually control a test's quarantine status from its details page. * To set an override: Click the **Quarantine** (or **Override**) button, then select either Always Quarantine or Never Quarantine. * To remove an override: Click the **Remove Override** button. When a manual override is active, a banner shows who set it and when. -| Setting | Behavior | -|---|---| -| Always Quarantine | Quarantine a test failure even if the health status is healthy. | -| Never Quarantine | Never quarantine failures, even if the health status is flaky, and auto-quarantining is enabled for the repo. | +
SettingBehavior
Always QuarantineQuarantine a test failure even if the health status is healthy.
Never QuarantineNever quarantine failures, even if the health status is flaky, and auto-quarantining is enabled for the repo.
- -![](/assets/qurantine-individual-tests-revert.png) - +
-To review a history of all quarantine changes on a test, check the **Events** tab on the test details page. The Events tab shows every override, setting change, and comment, along with the author and timestamp for each entry. To see all quarantined runs of a test, set the **Quarantined** filter to **Only** on the **Test History** tab. +To review a history of all quarantine changes on a test, use the **Quarantine Events** filter within the **Test History** section. This will show every override, setting change, and comment, along with the author and timestamp for each entry. ### Tracking quarantined jobs in the dashboard @@ -254,27 +219,23 @@ Once quarantining is active, the **Quarantining** tab provides a central hub for * **Isolate Critical Workflows:** Use the filter to see how quarantining impacts specific branches, such as preventing flaky failures in your Merge Queue. * **Measure ROI:** Use the data to quantify the number of builds saved and developer time reclaimed for your organization. - -![](/assets/flaky-fullscreen.png) - +
### Audit logs Trunk provides audit logs for all setting changes and overwrites for individual tests. You can access the audit log by navigating to **Settings** > **Repositories** **>** repository **>** **Flaky Tests** > **Audit logs** under the Enable Test Quarantining heading. - -![](/assets/qurantine-audit-logs.png) - +
### Quarantining API and webhooks For advanced use cases, you can interact with quarantining features programmatically. -* API: Use the [Flaky Tests API](../reference/api-reference) to fetch a list of all currently quarantined tests in your project. -* Webhooks: Subscribe to the `test_case.quarantining_setting_changed` event to trigger automated workflows whenever a test's quarantine override is modified. Learn more about [Webhooks](https://www.svix.com/event-types/us/org_2eQPL41Ew5XSHxiXZIamIUIXg8H/#test_case.quarantining_setting_changed). +* API: Use the [Flaky Tests API](/flaky-tests/flaky-tests) to fetch a list of all currently quarantined tests in your project. +* Webhooks: Subscribe to the `test_case.quarantining_setting_changed` event to trigger automated workflows whenever a test's quarantine override is modified. Learn more about [Webhooks](https://www.svix.com/event-types/us/org_2eQPL41Ew5XSHxiXZIamIUIXg8H/#test_case.status_changed). #### Service Availability and Graceful Degradation -Trunk Analytics CLI is designed to fail safe when our quarantine service is unavailable. Read more at [Quarantine Service Availability](./quarantine-service-availability) +Trunk Analytics CLI is designed to fail safe when our quarantine service is unavailable. Read more at [Quarantine Service Availability](/flaky-tests/quarantine-service-availability) diff --git a/flaky-tests/reference/api-reference.mdx b/flaky-tests/reference/api-reference.mdx deleted file mode 100644 index 7ea903c..0000000 --- a/flaky-tests/reference/api-reference.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: "Flaky Tests API" -description: "REST API for checking Trunk service status and fetching unhealthy or quarantined tests in your project." ---- -The Trunk Flaky Tests API provides access to check the status of Trunk services and fetch [unhealthy](../detection/) or [quarantined](../quarantining/) tests in your project. The API is an HTTP REST API, returns JSON from all requests, and uses standard HTTP response codes. - -All requests must be [authenticated](../../setup-and-administration/apis/#authentication) by providing the `x-api-token` header. - - - - - - - - - - - - - \ No newline at end of file diff --git a/flaky-tests/reference/index.mdx b/flaky-tests/reference/index.mdx deleted file mode 100644 index 2fa8b98..0000000 --- a/flaky-tests/reference/index.mdx +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: "Reference" -description: "Reference documentation for Flaky Tests APIs, CLI commands, and MCP tools." ---- - - - - - - diff --git a/flaky-tests/reference/mcp-reference/configuration/bearer-auth.mdx b/flaky-tests/reference/mcp-reference/configuration/bearer-auth.mdx deleted file mode 100644 index 9d3f748..0000000 --- a/flaky-tests/reference/mcp-reference/configuration/bearer-auth.mdx +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: "Bearer Authentication" -description: "Add Trunk's MCP Server via Bearer Authentication" ---- -You can leverage Trunk's MCP server for all of your agentic needs. When using the MCP in cloud environments, authenticate using Bearer Authentication. - -### API Token - -Retrieve your organization's API token from the settings page in the web app, e.g. `https://app.trunk.io//settings`. - -### Authorization Header - -Set the following header when connecting to the MCP `https://mcp.trunk.io/mcp`: - -| Header Key | Header Value | -| - | - | -| `Authorization` | `Bearer ` | diff --git a/flaky-tests/reference/mcp-reference/configuration/claude-code-plugin.mdx b/flaky-tests/reference/mcp-reference/configuration/claude-code-plugin.mdx deleted file mode 100644 index 041c2b4..0000000 --- a/flaky-tests/reference/mcp-reference/configuration/claude-code-plugin.mdx +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: "Claude Code Plugin" -description: "Install the Trunk plugin for Claude Code" ---- -The Trunk plugin for Claude Code bundles the MCP server connection, slash commands, and skills into a single install. This is the recommended way to connect Trunk to Claude Code. - -## Install the Plugin - -First, add the [community plugins marketplace](https://github.com/anthropics/claude-plugins-community) if you haven't already: - -``` -claude plugin marketplace add anthropics/claude-plugins-community -``` - -Then install the Trunk plugin: - -``` -claude plugin install trunk@claude-community -``` - -This gives you access to the MCP server connection, slash commands, and skills that activate automatically. - - -You can also install from the plugin repository URL: - -``` -/plugin install trunk@https://github.com/trunk-io/claude-code-plugin -``` - -This is useful if you want to pin to a specific version or test changes before publishing a new release. - - -## Authentication - -After installing, Claude Code will prompt you to authenticate with Trunk on first use. - -1. Run any Trunk command (e.g., `/trunk:fix-flaky`) or trigger an MCP tool call -2. Claude Code will open a browser window for OAuth login -3. Log in with your Trunk account and authorize the connection -4. You'll see `Authentication successful. Connected to trunk.` back in the terminal - -## Slash Commands - -| Command | What it does | -|---|---| -| `/trunk:fix-flaky ` | Retrieves root cause analysis for a flaky test and offers to apply the fix | -| `/trunk:why-flaky ` | Explains why a test is flaky without making changes — good for triage | -| `/trunk:setup-uploads` | Detects your test framework and CI provider, then generates the upload configuration | - -### Fix a flaky test - -``` -/trunk:fix-flaky test_user_login -``` - -Trunk analyzes the test, explains the root cause (race condition, shared state, time dependency, etc.), and shows a proposed fix with a diff. Confirm to apply the changes directly. - -### Understand why a test is flaky - -``` -/trunk:why-flaky test_payment_processing -``` - -Same analysis as `fix-flaky`, but read-only. Useful when you want to understand the problem before deciding how to handle it — especially for tests you didn't write. - -### Set up test uploads - -``` -/trunk:setup-uploads -``` - -Walks through configuring your repo to upload test results to Trunk. The plugin detects your CI provider and test framework automatically, then generates ready-to-paste config snippets. - -## Skills - -The plugin includes two skills that activate automatically based on context: - -**Flaky test patterns** — activates when you're debugging or writing tests. Provides common flaky test patterns and proven fixes so Claude Code can reference them without you asking. - -**Trunk CI setup** — activates when you're editing CI configuration files (`.github/workflows/`, `.circleci/config.yml`, etc.). Provides best practices for test upload configuration. - -## Also Available For - -- [Cursor](./cursor-ide) (one-click install) -- [GitHub Copilot](./github-copilot-ide) (one-click install) -- [Gemini CLI](./gemini-cli) -- [Any MCP client](https://github.com/trunk-io/mcp-server) — manual configuration diff --git a/flaky-tests/reference/mcp-reference/configuration/index.mdx b/flaky-tests/reference/mcp-reference/configuration/index.mdx deleted file mode 100644 index 6b58d1f..0000000 --- a/flaky-tests/reference/mcp-reference/configuration/index.mdx +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: "Configuration" -mode: wide -description: "Configure your AI application to connect to the Trunk MCP server for flaky test insights and setup assistance." ---- - - - - - - - - - - - - - - diff --git a/flaky-tests/reference/mcp-reference/index.mdx b/flaky-tests/reference/mcp-reference/index.mdx deleted file mode 100644 index 87e9548..0000000 --- a/flaky-tests/reference/mcp-reference/index.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: "Use MCP Server" -description: "Use the Trunk MCP server from your IDE or AI application to access flaky test insights and configure test uploads" ---- -Trunk Flaky Tests includes a [Model Context Protocol (MCP)](https://modelcontextprotocol.io/docs/getting-started/intro) server. AI applications like Claude Code or Cursor can use MCP servers to connect to data sources, tools, and workflows, enabling them to access key information and perform tasks. - -### Supported AI applications - -The following applications are currently supported: Cursor, Claude Code, Gemini CLI, and GitHub Copilot. - - -Gemini Code Assist and Windsurf are not supported due to their limited support for MCP servers - - -### API - -The Trunk MCP server is available at `https://mcp.trunk.io/mcp` and exposes the following tools: - -| Tool | Capability | -|---|---| -| [`search-test`](./search-test) | Experimental: Lookup the id of a test case | -| [`fix-flaky-test`](./fix-flaky-test) | Experimental: Retrieve insights around a failing/flaky test | -| [`investigate-ci-failure`](./investigate-ci-failure) | Experimental: Retrieve failing test logs from a CI run | -| [`setup-trunk-uploads`](./set-up-test-uploads) | Create a setup plan to upload test results | - -### Authorization - -The Trunk MCP server supports two authentication methods. - -**OAuth (default)** - -OAuth 2.0 + OpenID Connect is the default. MCP clients that support the [MCP authorization spec](https://modelcontextprotocol.io/specification/2025-03-26/basic/authorization) will initiate the OAuth flow automatically. Most interactive clients (Cursor, Claude Code, GitHub Copilot) use this path. - -**API token** - -As an alternative, you can authenticate with your Trunk organization API token. This is useful for MCP clients that do not support OAuth, CI/headless environments, or quick manual setup. - -Find your token under **Settings > API** in the Trunk dashboard. Pass it as a Bearer token in the `Authorization` header: - -```json -{ - "mcpServers": { - "trunk": { - "url": "https://mcp.trunk.io/mcp", - "headers": { - "Authorization": "Bearer " - } - } - } -} -``` - -API token auth is org-level — all requests are attributed to the organization rather than to a specific user. OAuth remains the preferred method for interactive use because it provides user-level identity. - -### Get started - -**To get started, configure your AI application to communicate with Trunk's MCP server:** - -* [Cursor](./configuration/cursor-ide) -* [GitHub Copilot](./configuration/github-copilot-ide) -* [Claude Code CLI](./configuration/claude-code-cli) -* [Gemini CLI](./configuration/gemini-cli) diff --git a/flaky-tests/reference/mcp-reference/investigate-ci-failure.mdx b/flaky-tests/reference/mcp-reference/investigate-ci-failure.mdx deleted file mode 100644 index 762fc59..0000000 --- a/flaky-tests/reference/mcp-reference/investigate-ci-failure.mdx +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: "Investigate CI Failure" -description: "MCP tool reference: investigate-ci-failure" ---- -### Overview - -The `investigate-ci-failure` tool investigates a failing CI run by fetching structured test failure data from Trunk. Given a GitHub Actions workflow URL, this tool looks up test result bundles, parses them to extract test names and error messages, filters out quarantined (known-flaky) tests, and returns structured failure details the agent can act on. For more information, see [Autofix CI Failures](../../agents/autofix-ci-failures). - -**Return Type:** Structured failure details with test names, error messages, stdout, and stderr. If the CI job failed before tests ran (build or compilation failure), the tool suggests pulling raw logs from the workflow URL as a fallback. - -### Prerequisites - -- Your repository must be set up to [upload test results to Trunk](../../get-started/index) -- For best results, [enable quarantining](../../quarantining/) so known-flaky tests are filtered out automatically - -### Parameters - -#### Required Parameters - -| Parameter | Type | Description | -| ---------- | ------ | --------------------------------------------------------------- | -| `workflowUrl` | string | The GitHub Actions workflow URL, e.g. `https://github.com/{owner}/{repo}/actions/runs/{runId}` | - -#### Optional Parameters - -| Parameter | Type | Description | -| --------- | ------ | ---------------------------------------------------------------------- | -| `orgSlug` | string | The Trunk organization slug (used to disambiguate if you belong to multiple orgs) | - -### Getting Parameter Values - -**Get workflow URL:** - -Navigate to your GitHub Actions run and copy the full URL from your browser's address bar. It follows the pattern: - -``` -https://github.com/{owner}/{repo}/actions/runs/{runId} -``` - -### Usage Examples - -#### Investigate a workflow failure - -``` -Investigate the CI failure at https://github.com/trunk-io/trunk/actions/runs/12345678 -``` - -### What the tool does - -- Looks up test result uploads Trunk has received for that run -- Parses the test runs to extract test names, error messages, stdout and stderr -- Filters out quarantined (known-flaky) tests so you only see real failures -- Returns structured failure details you can act on - -**When tests didn't run:** If the CI job failed before any tests ran (e.g., a build or compilation failure), the tool will tell you so and suggest pulling raw CI logs directly from the workflow URL as a fallback. - -### Error Handling - -| Error | Cause | Resolution | -| ------------------------------ | --------------------------------------------- | --------------------------------------------------------- | -| `Invalid workflow URL` | Malformed or incorrect workflow URL | Verify the URL follows the pattern `https://github.com/{owner}/{repo}/actions/runs/{runId}` | -| `No test results were uploaded for this CI run` | No test run uploads were uploaded from the provided workflow | Check that the workflow run URL is correct and that it is uploading test results. Compilation and build failures will not upload test results | -| `No test uploads found for this repository` | Repo hasn't configured Trunk test result uploads | Follow setup instructions to [upload test results](../../get-started/index) | diff --git a/flaky-tests/reference/mcp-reference/search-test.mdx b/flaky-tests/reference/mcp-reference/search-test.mdx deleted file mode 100644 index 33ef41f..0000000 --- a/flaky-tests/reference/mcp-reference/search-test.mdx +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: "Search Test" -description: "MCP tool reference: search-test" ---- -### Overview - -The `search-test` tool looks up the ID of a test case given its name. - - -**Return Type:** Metadata about the test, including its ID. - -### Parameters - -#### Required Parameters - -| Parameter | Type | Description | -| ---------- | ------ | --------------------------------------------------------------- | -| `repoName` | string | Repository name in `owner/repo` format (e.g., `trunk-io/trunk`) | -| `testNameSearch` | string | Search string for the test name. Does not include filepaths | - -#### Optional Parameters - -| Parameter | Type | Description | -| --------- | ------ | ---------------------------------------------------------------------- | -| `orgSlug` | string | The name of your organization in the Trunk app | -| `limit` | number | Limit for test results to return, up to 20 | - -### Getting Parameter Values - -If your AI assistant doesn't have direct access to Git information, use these commands: - -**Get repository name:** - -```bash -git remote -v -``` - -Look for the repository name in the output (e.g., `trunk-io/trunk` from `git@github.com:trunk-io/trunk.git`) - -### Usage Examples - -#### Search - -``` -What's the test case ID for the test "clear all filters button appears in empty state and clears filters" -``` - -### Error Handling - -| Error | Cause | Resolution | -| ------------------------------ | --------------------------------------------- | --------------------------------------------------------- | -| `No tests matched {searchString} in repo {repoName}` | No results found | Check your search string and try again | -| Repository authorization error | Insufficient permissions or invalid repo name | Verify repository name format and your access permissions | diff --git a/flaky-tests/detection/the-importance-of-pr-test-results.mdx b/flaky-tests/the-importance-of-pr-test-results.mdx similarity index 84% rename from flaky-tests/detection/the-importance-of-pr-test-results.mdx rename to flaky-tests/the-importance-of-pr-test-results.mdx index 6d42943..cdf0f9e 100644 --- a/flaky-tests/detection/the-importance-of-pr-test-results.mdx +++ b/flaky-tests/the-importance-of-pr-test-results.mdx @@ -1,8 +1,8 @@ --- title: "The Importance of PR Test Results" -description: "Why uploading test results from pull requests is required for accurate flaky test detection, quarantining, and impact measurement." +description: "Uploading test results from pull requests (PRs) is a critical step for enabling Trunk Flaky Tests. This data provides a primary signal for detecting flaky tests and is the key metr" --- -Uploading test results from pull requests (PRs) is a critical step for enabling Trunk Flaky Tests. This data provides a primary signal for _detecting_ flaky tests and is the key metric for _measuring_ their impact. Without it, you lose the most significant source of information for identifying and prioritizing these disruptive tests. +Uploading test results from pull requests (PRs) is a critical step for enabling Trunk Flaky Tests. This data provides a primary signal for *detecting* flaky tests and is the key metric for *measuring* their impact. Without it, you lose the most significant source of information for identifying and prioritizing these disruptive tests. Here's a breakdown of the key features that depend on PR test results: @@ -30,7 +30,7 @@ If you don't upload test results from PRs: #### Unblocking Developers with Quarantining -Quarantining is one of the most important features of Trunk Flaky Tests. Its core purpose is to prevent known flaky tests from blocking developers and breaking CI pipelines, especially merge queues. +Quarantining is one of the most powerful features of Trunk Flaky Tests. Its core purpose is to prevent known flaky tests from blocking developers and breaking CI pipelines, especially merge queues. The entire quarantining workflow is predicated on analyzing test results from PRs. Without PR data, you cannot: @@ -47,7 +47,7 @@ Without uploading PR results, you lose: * In-Log Failure Details: A snippet of the stack trace and assertion error for any failed test, providing immediate context without digging through full CI logs. * Actionable Exit Codes: The CLI intelligently determines the job's outcome. * When a real test fails, it exits with a non-zero code: `⚠️ Some test failures were not quarantined, using exit code: 1` - * When _only_ a known flaky test fails, it passes the job: `🎉 All test failures were quarantined, overriding exit code to be exit_success (0)` + * When *only* a known flaky test fails, it passes the job: `🎉 All test failures were quarantined, overriding exit code to be exit_success (0)` This immediate, in-CI feedback loop is invaluable for developers trying to quickly understand why their build failed. @@ -59,8 +59,8 @@ These comments provide a summary of all tests run on a specific PR, highlighting #### Next Steps: Enable PR Uploads -Now that you understand why uploading test results from pull requests is essential, the next step is to configure your CI pipeline. This single step is the key to accurate flakiness detection, true impact measurement, and features like quarantining. +Now that you understand why uploading test results from pull requests is essential, the next step is to configure your CI pipeline. This single step is the key to unlocking accurate flakiness detection, true impact measurement, and powerful features like quarantining. Our documentation provides step-by-step guides for all major CI providers to make this setup simple. -[➡️ Find your CI provider and start uploading test results](../get-started/ci-providers/index) +[➡️ Find your CI provider and start uploading test results](/flaky-tests/get-started/ci-providers) diff --git a/flaky-tests/management/ticketing/index.mdx b/flaky-tests/ticketing-integrations.mdx similarity index 54% rename from flaky-tests/management/ticketing/index.mdx rename to flaky-tests/ticketing-integrations.mdx index bd1aed8..7cacee2 100644 --- a/flaky-tests/management/ticketing/index.mdx +++ b/flaky-tests/ticketing-integrations.mdx @@ -1,5 +1,5 @@ --- -title: "Ticketing" +title: "Ticketing integrations" description: "Triage your flaky tests faster by creating automatically assigned and labeled tickets in your ticketing system" --- You can integrate directly with your ticketing systems to automatically create tickets when Trunk [detects a flaky test](/flaky-tests/detection). @@ -23,15 +23,4 @@ The ticket description contains the following information: Currently, Ticket Creation supports integrations with Linear and Jira. However, the automatically generated ticket content is formatted in Markdown and can be copied to other platforms like Asana or GitHub issues. - - - - +
Linearlinear-v2.pnglinear-integration
Jirajira.pngjira-integration
diff --git a/flaky-tests/ticketing-integrations/jira-integration.mdx b/flaky-tests/ticketing-integrations/jira-integration.mdx new file mode 100644 index 0000000..7ec364c --- /dev/null +++ b/flaky-tests/ticketing-integrations/jira-integration.mdx @@ -0,0 +1,135 @@ +--- +title: "Jira integration" +description: "Triage your flaky tests faster by creating automatically assigned and labeled tickets with the Jira integration" +--- +When Trunk Flaky Tests [detects a flaky test](/flaky-tests/detection), you can create an automatically generated Jira ticket for your team to pick up and fix the test. + +Webhook payloads will also contain ticket information when a Jira ticket is created with the integration or when [existing tickets are linked](#link-existing-tickets-to-tests). + +### Connecting to Jira + +
+ +To connect a Jira Cloud project, navigate to **Settings** -> **Repositories** -> **Ticketing Integration** and select **Jira** as your Ticketing System. + +Then complete the form and click Connect to Jira Cloud with the following information. + +
Field NameDescriptionExamples
Jira URLThe URL to your Jira Cloud project.https://trunk-io.atlassian.net
Project KeyThe project key for your Jira project.KAN
EmailThe email associated with your Jira API token.johndoe@example.com
Jira API tokenCreate your Jira API token here.ATATT*****19FNY5Q
Default label(s) for new ticketsLabels applied to new Jira tickets created through Trunk Flaky Testsflaky-test, debt
+ +After connecting to Jira, you can specify a default issue type for new tickets and a default assignee for new tickets. + +#### API Token permissions + +Your Jira user account must have the following project permissions to create a Jira API token that allows Trunk to read, create, and assign tickets automatically: + +* *Create issues* +* *Assign issues* OR *Browse users and groups* (global permission) +* *Browse projects* + * If issue-level security is configured, issue-level security permissions must be granted to read issues. + +You need to create an API token with the following scopes: + +* Required scopes (classic) + * `read:jira-work` + * `write:jira-work` + * `read:jira-user` +* Required scopes (granular): + * `read:issue:jira` + * `read:issue-meta:jira` + * `read:issue-security-level:jira` + * `read:issue.vote:jira` + * `read:issue.changelog:jira` + * `read:avatar:jira` + * `read:status:jira` + * `read:user:jira` + * `read:field-configuration:jira` + * `read:application-role:jira` + * `read:group:jira` + * `read:issue-type:jira` + * `read:project:jira` + * `read:project.property:jira` + * `read:issue-type-hierarchy:jira` + * `read:project-category:jira` + * `read:project-version:jira` + * `read:project.component:jira` + * `read:permission:jira` + * `write:issue:jira` + * `write:comment:jira` + * `write:comment.property:jira` + * `write:attachment:jira` + + +Jira tokens cannot last longer than 365 days. Once the token expires, you will need to generate a new API token. + + +### Create a new ticket + +You can create a new ticket for any test listed in Trunk Flaky Tests. + +There are 2 ways to create a new ticket in the Flaky Test dashboard: + +* Click on the options menu for any test case on the repo overview dashboard + +
+ +* Use the Create ticket button in the top left corner of the [test case details](/flaky-tests/dashboard#test-case-details) page. + +Before you create the ticket, you will have a preview of the title and description. + +
+ +#### Create with Jira + +If you are connected to Jira, you can click the **Create Jira Ticket** button at the end of the modal, which will automatically create a ticket with the configured labels and assignees. + +#### Link existing tickets to tests + +If you already have a ticket in Jira that you want to link to a test in the dashboard, you can use the [Link Ticket to Test Case API](/flaky-tests#post-flaky-tests-link-ticket-to-test-case). + +### Required Custom Fields + +Some Jira projects require additional fields beyond the standard fields (like summary, description, and issue type) to be specified when creating tickets. Common required custom fields include: + +* **Components** - Categories or modules within your project +* **Affects Version** - Which version of your product is impacted +* **Fix Version** - Target version for the fix +* **Epic Link** - Parent epic for the ticket +* **Sprint** - Sprint assignment +* **Story Points** - Estimation field +* Custom fields specific to your organization + +#### Enterprise Feature + + +**Support for required custom fields is an Enterprise feature.** + + +If your Jira project requires custom fields that aren't supported in the standard Trunk Flaky Tests integration, you'll see an error message when attempting to create a ticket: + +``` +The Jira project [PROJECT_KEY] requires a field "[field_name]". +Contact sales@trunk.io to upgrade your account for custom field support. +``` + +To enable support for your required custom fields, contact our sales team at sales@trunk.io to discuss Enterprise plan options. + +#### Alternative: Remove Field Requirements + +If you don't need Enterprise features, you can modify your Jira project settings to make custom fields optional instead of required. This allows Trunk Flaky Tests to create tickets without needing to specify those fields. + +**To make a field optional in Jira:** + +1. Navigate to **Project Settings** in your Jira project +2. Select **Issue Types** from the sidebar +3. Choose the issue type you're using for flaky test tickets (e.g., Task, Bug) +4. Click **Fields** to see all fields for that issue type +5. Locate the required custom field (e.g., "Components") +6. Click the field to open its configuration +7. Uncheck **Required** or change the field requirement setting +8. Save your changes + +After making the field optional, you should be able to create tickets through Trunk Flaky Tests without encountering the error. + + +**Note:** You may need Jira Administrator permissions to modify project settings. If you don't have access, contact your Jira administrator to make these changes. + diff --git a/flaky-tests/ticketing-integrations/linear-integration.mdx b/flaky-tests/ticketing-integrations/linear-integration.mdx new file mode 100644 index 0000000..090f03e --- /dev/null +++ b/flaky-tests/ticketing-integrations/linear-integration.mdx @@ -0,0 +1,55 @@ +--- +title: "Linear integration" +description: "Triage your flaky tests faster by creating automatically assigned and labeled tickets with the Linear integration" +--- +When Trunk Flaky Tests [detects a flaky test](/flaky-tests/detection), you can create an automatically generated Linear ticket for your team to pick up and fix the test. + +Webhook payloads will also contain ticket information when a Jira ticket is created with the integration or when [existing tickets are linked](#link-existing-tickets-to-tests). + +### Connecting to Linear + +
+ +To connect a Linear project: + +1. Navigate to **Settings** > **Repositories** > **Ticketing Integration.** +2. Select **Linear** as your Ticketing System. +3. Add a [Linear API key](#api-token-permissions) +4. Select a Team and **Connect to Linear**. + +After connecting to Linear, you can specify a default project and a default assignee for new tickets. + +#### API Key permissions + +The following project permissions must be granted to your Linear API key so Trunk can read, create, and assign tickets automatically: + +* *Read* +* *Create issues* + +Selecting *Full Access* will also grant the required permissions. + +### Create a new ticket + +You can create a new ticket for any test listed in Flaky Tests. + +There are 2 ways to create a new ticket in the Flaky Test dashboard: + +* Click on the options menu for any test case on the repo overview dashboard + +
+ +* Use the Create ticket button in the top left corner of the [test case details](/flaky-tests/dashboard#test-case-details) page. + +Before you create the ticket, you get a preview of the title and description. + +
+ +#### Create with Linear + +If you are connected to Linear, you can click the **Create Linear Ticket** button at the end of the modal to automatically create a ticket with the configured team and assignees. + +Note: You can use [Flaky Tests webhooks](/flaky-tests/webhooks/linear-integration) to automate ticket creation, or if you need more control over how tickets are created in Linear. This integration is not required when using webhooks. + +#### Link existing tickets to tests + +If you already have a ticket in Linear that you want to link to a test in the dashboard, you can use the [Link Ticket to Test Case API](/flaky-tests#post-flaky-tests-link-ticket-to-test-case). diff --git a/flaky-tests/ticketing-integrations/other-ticketing-platforms.mdx b/flaky-tests/ticketing-integrations/other-ticketing-platforms.mdx new file mode 100644 index 0000000..2574219 --- /dev/null +++ b/flaky-tests/ticketing-integrations/other-ticketing-platforms.mdx @@ -0,0 +1,23 @@ +--- +title: "Other ticketing platforms" +description: "Triage your flaky tests faster by manually creating tickets from generated markdown" +--- +If you have not set up an integration, Trunk Flaky Tests can still generate a ticket title and description so you can copy and paste the details into your project management software. + +### Create a new ticket + +You can create a new ticket for any test listed in Trunk Flaky Tests. + +There are 2 ways to create a new ticket in the Flaky Test dashboard: + +* Click on the options menu for any test case on the repo overview dashboard + +
+ +* Use the Create ticket button in the top left corner of the [test case details](/flaky-tests/dashboard#test-case-details) page. + +Before you create the ticket, you will have a preview of the title and description. + +
+ +Now you can copy and paste the ticket title and description into your project management or ticketing platform. diff --git a/flaky-tests/reference/cli-reference.mdx b/flaky-tests/uploader.mdx similarity index 52% rename from flaky-tests/reference/cli-reference.mdx rename to flaky-tests/uploader.mdx index eddac1a..05dd66e 100644 --- a/flaky-tests/reference/cli-reference.mdx +++ b/flaky-tests/uploader.mdx @@ -1,6 +1,6 @@ --- -title: "CLI Reference" -description: "CLI tool for uploading test results to Trunk from CI, enabling flaky test detection and quarantining." +title: "Trunk Analytics CLI" +description: "Trunk detects and tracks flaky tests in your repos by receiving uploads from your test runs in CI, uploaded from the Trunk Analytics CLI. These uploads happen in the CI jobs used t" --- Trunk detects and tracks flaky tests in your repos by receiving uploads from your test runs in CI, uploaded from the Trunk Analytics CLI. These uploads happen in the CI jobs used to run tests in your nightly CI, post-commit jobs, and PR checks. @@ -8,25 +8,19 @@ Trunk detects and tracks flaky tests in your repos by receiving uploads from you If you're setting up Trunk Flaky Tests for the first time, you can follow the guides for your CI provider and test framework. - - - - +
Guides by Test Frameworksframeworks
Guides by CI Providerci-providers
-The CLI should be **downloaded as part of your test workflow** in your CI system. You can download the appropriate binary for your platform directly from the [GitHub releases page](https://github.com/trunk-io/analytics-cli/releases). +The CLI should be **downloaded as part of your test workflow** in your CI system. The automatic launcher is platform agnostic and will download the latest version of the uploader for your platform. ### Manual Download You can find the list of releases on [the GitHub release page](https://github.com/trunk-io/analytics-cli/releases). We provide executables for Linux and OS X. It’s a single file inside a tar and upon downloading the tar you will find a single binary - `trunk-analytics-cli` to use. - -```bash Linux (x64) + + + + +```bash SKU="trunk-analytics-cli-x86_64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -34,7 +28,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ``` -```bash Linux (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-unknown-linux.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -42,7 +41,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ``` -```bash macOS (arm64) + + + + + +```bash SKU="trunk-analytics-cli-aarch64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -50,7 +54,12 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ``` -```bash macOS (x64) + + + + + +```bash SKU="trunk-analytics-cli-x86_64-apple-darwin.tar.gz" curl -fL --retry 3 \ "https://github.com/trunk-io/analytics-cli/releases/latest/download/${SKU}" \ @@ -58,7 +67,10 @@ curl -fL --retry 3 \ chmod +x trunk-analytics-cli ``` - + + + + ### Organization Slug and Token @@ -67,30 +79,33 @@ The CLI requires your Trunk organization slug and token passed through `--org-ur You can find your organization slug and token by going to **Settings** > **Manage** > **Organization**. + - - - - + +

Make sure you are getting your Organization Slug, not the Organization Name.

+
+ - - - - + +

Ensure you get your Organization API Token, not your repo token.

+
+
### Uploading Test Results -The uploaded tests are processed by Trunk periodically, not in real-time. Wait for at least an hour after the initial upload before they’re displayed in the [Uploads tab](/flaky-tests/get-started/#id-4.-confirm-your-configuration-analyze-your-dashboard). Multiple uploads are required before a test can be accurately detected as flaky. +The uploaded tests are processed by Trunk periodically, not in real-time. Wait for at least an hour after the initial upload before they’re displayed in the [Uploads tab](/flaky-tests/get-started#step-3-verify-integration). Multiple uploads are required before a test can be accurately detected as flaky. -Trunk accepts uploads in three main report formats, [XML](https://github.com/testmoapp/junitxml), [Bazel Event Protocol JSONs](https://bazel.build/remote/bep#consuming-bep-text-json), and XCode XCResult paths. You can upload each of these test report formats using the `./trunk-analytics-cli upload` command like this: +Trunk accepts uploads in three main report formats, [XML](https://github.com/testmoapp/junitxml), [Bazel Event Protocol JSONs](https://bazel.build/remote/bep#consuming-bep-text-json), and XCode XCResult paths. You can upload each of these test report formats using the `./trunk flakytest upload` command like this: + + Trunk can accept JUnit XMLs through the `--junit-paths` argument: ``` @@ -98,25 +113,33 @@ Trunk can accept JUnit XMLs through the `--junit-paths` argument: --org-url-slug \ --token $TRUNK_API_TOKEN ``` + + + Trunk can accept Bazel through the `--bazel-bep-path` argument: ``` -./trunk-analytics-cli upload --bazel-bep-path \ +./trunk flakytests upload --bazel-bep-path \ --org-url-slug \ --token $TRUNK_API_TOKEN ``` + + + Trunk can accept XCode through the `--xcresult-path` argument: ``` -./trunk-analytics-cli upload --xcresult-path \ +./trunk flakytests upload --xcresult-path \ --org-url-slug \ --token $TRUNK_API_TOKEN ``` + + ### Variants @@ -127,8 +150,8 @@ For example, a test for a mobile app might be flaky on iOS but stable on Android You can specify a variant during upload using the `--variant` option: -```sh Upload an iOS variant -./trunk-analytics-cli upload --junit-paths "test_output.xml" \ +``` +./trunk flakytests upload --junit-paths "test_output.xml" \ --org-url-slug \ --token $TRUNK_API_TOKEN \ --variant ios @@ -136,19 +159,18 @@ You can specify a variant during upload using the `--variant` option: Variant names are displayed in brackets next to test names in your dashboard: - - - - +

The same test, but the first is a macOS variant.

### Running and Quarantining Tests You can also execute tests and upload results to Trunk in a single step using the `test` command to **wrap** your test command. -This is especially useful for [Quarantining](../quarantining/), where the Trunk Analytics CLI will **override the exit code** of the test command if all failures can be quarantined, **preventing** flaky tests from failing your builds in CI. +This is especially useful for [Quarantining](/flaky-tests/quarantining), where the Trunk CLI will **override the exit code** of the test command if all failures can be quarantined, **preventing** flaky tests from failing your builds in CI. + + Trunk can accept JUnit XMLs through the `--junit-paths` argument: ``` @@ -157,8 +179,11 @@ Trunk can accept JUnit XMLs through the `--junit-paths` argument: --token $TRUNK_API_TOKEN \ ``` + + + Trunk can accept Bazel through the `--bazel-bep-path` argument: ``` @@ -167,8 +192,11 @@ Trunk can accept Bazel through the `--bazel-bep-path` argument: --token $TRUNK_API_TOKEN \ ``` + + + Trunk can accept XCode through the `--xcresult-path` argument: ``` @@ -177,13 +205,15 @@ Trunk can accept XCode through the `--xcresult-path` argument: --token $TRUNK_API_TOKEN \ ``` + + #### Service Availability and Graceful Degradation -Trunk Analytics CLI is designed to fail safe when our quarantine service is unavailable. Read more at [Quarantine Service Availability](../quarantining/quarantine-service-availability) +Trunk Analytics CLI is designed to fail safe when our quarantine service is unavailable. Read more at [Quarantine Service Availability](/flaky-tests/quarantine-service-availability) #### Upload failure vs test failure @@ -219,39 +249,23 @@ Navigate to https://app.trunk.io/onboarding?intent=flaky+tests to continue using ### Using custom CI systems -The CLI is preconfigured to work with a set [ci-providers](/flaky-tests/get-started/ci-providers/) but can be used with any CI system by passing [#environment-variables](/flaky-tests/get-started/ci-providers/otherci#environment-variables) to the uploader. +The CLI is preconfigured to work with a set [ci-providers](/flaky-tests/get-started/ci-providers "mention") but can be used with any CI system by passing [#environment-variables](/flaky-tests/get-started/ci-providers/otherci#environment-variables "mention") to the uploader. -> More information on using [otherci.md](/flaky-tests/get-started/ci-providers/otherci) is documented here. +> More information on using [otherci](/flaky-tests/get-started/ci-providers/otherci "mention") is documented here. ### Full command reference The `trunk` command-line tool can upload and analyze test results. The `trunk-analytics-cli` command accepts the following subcommands: -| Command | Description | -| ------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `trunk-analytics-cli upload` | Upload data to Trunk Flaky Tests. | -| `trunk-analytics-cli validate` | Validates if the provided JUnit XML files and prints any errors. | -| `trunk-analytics-cli test ` | Runs tests using the provided command, uploads results, checks whether the failures are [quarantined](../quarantining/#using-the-trunk-cli-directly) tests, and correct the exit code based on that. | +| Command | Description | +| ------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `trunk-analytics-cli upload` | Upload data to Trunk Flaky Tests. | +| `trunk-analytics-cli validate` | Validates if the provided JUnit XML files and prints any errors. | +| `trunk-analytics-cli test ` | Runs tests using the provided command, uploads results, checks whether the failures are [quarantined](/flaky-tests/quarantining) tests, and correct the exit code based on that. | The `upload` and `test` commands accept the following options: -| Argument | Description | -|---|---| -| `--junit-paths ` | Path to the test output files. File globs are supported. Remember to wrap globs in `""` quotes | -| `--bazel-bep-path ` | Path to a JSON serialized [Bazel Build Event Protocol](https://bazel.build/remote/bep). Trunk will use the BEP file to locate test reports. Your test frameworks must still output [compatible report formats](/flaky-tests/get-started/frameworks/). | -| `--xcresult-path ` | Path to a `.xcresult` directory, which contains test reports from `xcodebuild`. | -| `--org-url-slug ` | Trunk Organization slug, from the Settings page. | -| `--token ` | Trunk Organization (not repo) token, from the Settings page. Defaults to the `TRUNK_API_TOKEN` variable. | -| `-h, --help` | Additional detailed description of the `upload` command. | -| `--repo-root` | Path to the repository root. Defaults to the current directory. | -| `--repo-url ` | Value to override URL of repository. **Optional**. | -| `--repo-head-sha` `` | Value to override SHA of repository head. **Optional**. | -| `--repo-head-branch ` | Value to override branch of repository head. **Optional**. | -| `--repo-head-commit-epoch ` | Value to override commit epoch of repository head. **Optional**. | -| `--codeowners-path ` | Value to override CODEOWNERS file or directory path. **Optional**. | -| `--allow-empty-test-results` | Don't fail commands if test results are empty or missing. Use it when you sometimes skip all tests for certain CI jobs. Defaults to `true`. | -| `--variant ` | Upload tests to a specific variant group. **Optional**. | -| `--test-process-exit-code` `` | Specify the exit code of the test previously run. This is used by the upload command to identify errors that happen outside of the context of the test execution (such as build errors). | +
ArgumentDescription
--junit-paths <JUNIT_PATHS>Path to the test output files. File globs are supported. Remember to wrap globs in "" quotes
--bazel-bep-path <BEP_JSON_PATH>Path to a JSON serialized Bazel Build Event Protocol. Trunk will use the BEP file to locate test reports. Your test frameworks must still output compatible report formats.
--xcresult-path <XCRESULT_PATH>Path to a .xcresult directory, which contains test reports from xcodebuild.
--org-url-slug <ORG_URL_SLUG>Trunk Organization slug, from the Settings page.
--token <TOKEN>Trunk Organization (not repo) token, from the Settings page. Defaults to the TRUNK_API_TOKEN variable.
-h, --helpAdditional detailed description of the upload command.
--repo-rootPath to the repository root. Defaults to the current directory.
--repo-url <REPO_URL>Value to override URL of repository. Optional.
--repo-head-sha <REPO_HEAD_SHA>Value to override SHA of repository head. Optional.
--repo-head-branch <REPO_HEAD_BRANCH>Value to override branch of repository head. Optional.
--repo-head-commit-epoch <REPO_HEAD_COMMIT_EPOCH>Value to override commit epoch of repository head. Optional.
--codeowners-path <CODEOWNERS_PATH>Value to override CODEOWNERS file or directory path. Optional.
--allow-empty-test-resultsDon't fail commands if test results are empty or missing. Use it when you sometimes skip all tests for certain CI jobs. Defaults to true.
--variant <VARIANT_NAME>Upload tests to a specific variant group. Optional.
--test-process-exit-code <EXIT_CODE>Specify the exit code of the test previously run. This is used by the upload command to identify errors that happen outside of the context of the test execution (such as build errors).
**Memory Overhead** @@ -260,11 +274,12 @@ Running tests via `trunk-analytics-cli test` adds negligible memory overhead. This subcommand is a thin wrapper around your existing test command and doesn't modify or parallelize test execution. -During execution, it: +During execution, it simply: * Runs your provided test command directly. * Records start and end times. * Captures the exit code for quarantine decisions. +\ You can safely run the CLI even with large or memory-intensive suites, without risking additional OOMs in your CI agents. diff --git a/flaky-tests/use-mcp-server.mdx b/flaky-tests/use-mcp-server.mdx new file mode 100644 index 0000000..312c9dc --- /dev/null +++ b/flaky-tests/use-mcp-server.mdx @@ -0,0 +1,32 @@ +--- +title: "Use MCP Server" +description: "Leverage the power of CI Autopilot from your IDE, or the AI application of your choosing" +--- +CI Autopilot comes with a [Model Context Protocol (MCP)](https://modelcontextprotocol.io/docs/getting-started/intro) server. AI applications like Claude Code or Cursor can use MCP servers to connect to data sources, tools, and workflows - enabling them to access key information and perform tasks. + +### Supported AI applications + +The following applications are currently supported: Cursor, Claude Code, Gemini CLI, and GitHub Copilot. + + +Gemini Code Assist and Windsurf are not supported due to their limited support for MCP servers + + +### API + +Our MCP server is available at `https://mcp.trunk.io/mcp` and exposes the following tools: + +
ToolCapability
fix-flaky-testExperimental: Retrieve insights around a failing/flaky test
setup-trunk-uploadsExperimental: Create a setup plan to upload test results
+ +### Authorization + +The Trunk MCP server supports the OAuth 2.0 + OpenID Connect standard for MCP authorization. + +### Get started + +**To get started, configure your AI application to communicate with Trunk's MCP server:** + +* [Cursor](/flaky-tests/use-mcp-server/configuration/cursor-ide) +* [GitHub Copilot](/flaky-tests/use-mcp-server/configuration/github-copilot-ide) +* [Claude Code CLI](/flaky-tests/use-mcp-server/configuration/claude-code-cli) +* [Gemini CLI](/flaky-tests/use-mcp-server/configuration/gemini-cli) diff --git a/flaky-tests/use-mcp-server/configuration.mdx b/flaky-tests/use-mcp-server/configuration.mdx new file mode 100644 index 0000000..96ca2df --- /dev/null +++ b/flaky-tests/use-mcp-server/configuration.mdx @@ -0,0 +1,5 @@ +--- +title: "Configuration" +description: "Cover imageClaude Codeclaude.pngclaude-code-cliCursorcursor.pngcursor-ideGitHub Copilotgithub copilot.pnggithub-copilot-ideGeminigemini.pnggemini-cli" +--- +
Cover image
Claude Codeclaude.pngclaude-code-cli
Cursorcursor.pngcursor-ide
GitHub Copilotgithub copilot.pnggithub-copilot-ide
Geminigemini.pnggemini-cli
diff --git a/flaky-tests/reference/mcp-reference/configuration/claude-code-cli.mdx b/flaky-tests/use-mcp-server/configuration/claude-code-cli.mdx similarity index 65% rename from flaky-tests/reference/mcp-reference/configuration/claude-code-cli.mdx rename to flaky-tests/use-mcp-server/configuration/claude-code-cli.mdx index 30205c7..74fa8d1 100644 --- a/flaky-tests/reference/mcp-reference/configuration/claude-code-cli.mdx +++ b/flaky-tests/use-mcp-server/configuration/claude-code-cli.mdx @@ -27,7 +27,7 @@ Add the following [configuration](https://docs.anthropic.com/en/docs/claude-code } ``` -### Authentication with OAuth (default) +### Authentication After the MCP server was added to Claude Code, users need to authorize to communicate with the server. Follow these steps to complete auth. @@ -39,9 +39,7 @@ In your terminal, run `claude` . Claude Code should recognize that auth is required. Run `/mcp` to authenticate, select trunk, and hit Enter: - -![](/assets/Screenshot_2025-09-10_at_12.02.48_PM.png) - +
**Step 3: Login & authorize** @@ -56,25 +54,3 @@ Authentication successful. Connected to trunk. ``` **With auth completed, Claude Code will be able to fetch the tools exposed by Trunk's MCP server.** - -### Alternative: Authentication with API token - -If you are in a CI or headless environment, or prefer not to use the OAuth browser flow, you can authenticate with your Trunk organization API token instead. - -Find your token under **Settings > API** in the Trunk dashboard, then add it to your `.mcp.json`: - -```json -{ - "mcpServers": { - "trunk": { - "url": "https://mcp.trunk.io/mcp", - "type": "http", - "headers": { - "Authorization": "Bearer ${TRUNK_API_TOKEN}" - } - } - } -} -``` - -Set the `TRUNK_API_TOKEN` environment variable to your org API token. Claude Code interpolates environment variables in MCP configuration files automatically. diff --git a/flaky-tests/reference/mcp-reference/configuration/cursor-ide.mdx b/flaky-tests/use-mcp-server/configuration/cursor-ide.mdx similarity index 58% rename from flaky-tests/reference/mcp-reference/configuration/cursor-ide.mdx rename to flaky-tests/use-mcp-server/configuration/cursor-ide.mdx index b594fed..fbfbfc1 100644 --- a/flaky-tests/reference/mcp-reference/configuration/cursor-ide.mdx +++ b/flaky-tests/use-mcp-server/configuration/cursor-ide.mdx @@ -6,13 +6,11 @@ description: "Add Trunk's MCP Server to Cursor" Use the "Add to Cursor" action to add the Trunk MCP server: -

Add trunk MCP server to Cursor

+

Add trunk MCP server to Cursor

Once clicked, follow instructions to open the MCP configuration in Cursor. A new settings window to confirm the installation of the MCP server will be shown. Click on "Install" to proceed. - -![](/assets/Screenshot_2025-09-10_at_11.28.24_AM.png) - +
### Alternative: Update MCP configuration @@ -28,7 +26,7 @@ Add the following [configuration](https://docs.cursor.com/en/context/mcp#model-c } ``` -### Authentication with OAuth (default) +### Authentication After the MCP server was added to Cursor, users need to authorize Cursor to communicate with the server. Follow these steps to complete auth. @@ -40,40 +38,14 @@ Run `CMD+Shift+P` to open the command palette and choose `View: Open MCP Setting A "Needs authentication" status will be shown: - -![](/assets/Screenshot_2025-09-10_at_11.28.34_AM.png) - +
**Step 3: Login & authorize** -A new webpage will be opened. Login with your Trunk account and follow instructions to authorize Cursor to communicate with the MCP server. +A new webpage will be opened. Login with your Trunk account and follow insturctions to authorize Cursor to communicate with the MCP server. **Step 4: Confirm** Follow instructions to get back to Cursor. With auth completed, Cursor will be able to fetch the tools exposed by Trunk's MCP server: - -![](/assets/Screenshot_2025-09-10_at_11.29.00_AM.png) - - - -### Alternative: Authentication with API token - -If you prefer not to use the OAuth flow, you can authenticate with your Trunk organization API token. Find your token under **Settings > API** in the Trunk dashboard. - -Add the token to your `.cursor/mcp.json`: - -```json -{ - "mcpServers": { - "trunk": { - "url": "https://mcp.trunk.io/mcp", - "headers": { - "Authorization": "Bearer ${TRUNK_API_TOKEN}" - } - } - } -} -``` - -Set `TRUNK_API_TOKEN` as an environment variable. Cursor interpolates environment variables in MCP configuration files automatically. +
diff --git a/flaky-tests/reference/mcp-reference/configuration/gemini-cli.mdx b/flaky-tests/use-mcp-server/configuration/gemini-cli.mdx similarity index 63% rename from flaky-tests/reference/mcp-reference/configuration/gemini-cli.mdx rename to flaky-tests/use-mcp-server/configuration/gemini-cli.mdx index bb6d7c1..0e5a4c3 100644 --- a/flaky-tests/reference/mcp-reference/configuration/gemini-cli.mdx +++ b/flaky-tests/use-mcp-server/configuration/gemini-cli.mdx @@ -12,11 +12,9 @@ gemini mcp add --transport http trunk https://mcp.trunk.io/mcp --scope project Once completed, reopen Gemini. - - ### Alternative: Update MCP configuration -Add the following [configuration](https://github.com/google-gemini/gemini-cli/blob/v0.1.19/docs/tools/mcp-server.md#oauth-support-for-remote-mcp-servers) to your project's `.gemini/settings.json` file. +Add the following [configuration](https://github.com/google-gemini/gemini-cli/blob/v0.1.19/docs/tools/mcp-server#oauth-support-for-remote-mcp-servers) to your project's `.gemini/settings.json` file. ```json { @@ -28,63 +26,34 @@ Add the following [configuration](https://github.com/google-gemini/gemini-cli/bl } ``` - - -### Authentication with OAuth (default) +### Authentication After the MCP server was added to Gemini, users need to authorize to communicate with the server. Follow these steps to complete auth. - - -**Step 1: Start Gemini CLI** +**Step 1: Start Gemini CLI** In your terminal, run `gemini` . - - **Step 2: Run the mcp auth command** Run `/mcp auth trunk` to initiate the authentication and authorization flow. - - **Step 3: Login & authorize** A new webpage will be opened. Log in with your Trunk account and follow the instructions to authorize Gemini to communicate with the MCP server. - - **Step 4: Confirm** Follow instructions to get back to Gemini. A confirmation should be shown: ``` ℹ✅ Successfully authenticated with MCP server 'trunk'! + ℹRe-discovering tools from 'trunk'... + ℹSuccessfully authenticated and refreshed tools for 'trunk'. ``` **With auth completed, Gemini will be able to fetch the tools exposed by Trunk's MCP server.** - -### Alternative: Authentication with API token - -If you prefer not to use the OAuth flow, you can authenticate with your Trunk organization API token. Find your token under **Settings > API** in the Trunk dashboard. - -Add the token to your `.gemini/settings.json`: - -```json -{ - "mcpServers": { - "trunk": { - "httpUrl": "https://mcp.trunk.io/mcp", - "headers": { - "Authorization": "Bearer ${TRUNK_API_TOKEN}" - } - } - } -} -``` - -Set `TRUNK_API_TOKEN` as an environment variable. Gemini CLI interpolates environment variables in MCP configuration files automatically. diff --git a/flaky-tests/reference/mcp-reference/configuration/github-copilot-ide.mdx b/flaky-tests/use-mcp-server/configuration/github-copilot-ide.mdx similarity index 60% rename from flaky-tests/reference/mcp-reference/configuration/github-copilot-ide.mdx rename to flaky-tests/use-mcp-server/configuration/github-copilot-ide.mdx index 5a543a3..0aa44a0 100644 --- a/flaky-tests/reference/mcp-reference/configuration/github-copilot-ide.mdx +++ b/flaky-tests/use-mcp-server/configuration/github-copilot-ide.mdx @@ -1,12 +1,12 @@ --- title: "GitHub Copilot (IDE)" -description: "Add Trunk's MCP Server to GitHub Copilot" +description: "Add Trunk's MCP Server to Github Copilot" --- ## One-click setup Use the "Add to VS Code" action to add the Trunk MCP server -

Add trunk MCP server to VS Code

+

Add trunk MCP server to VS Code

### Command Palette setup @@ -26,8 +26,6 @@ A new window will open to confirm the MCP configuration. It should show: } ``` - - ### Alternative: Update MCP configuration Add the following [configuration](https://code.visualstudio.com/docs/copilot/chat/mcp-servers) to your project's `.vscode/mcp.json` file. @@ -43,23 +41,17 @@ Add the following [configuration](https://code.visualstudio.com/docs/copilot/cha } ``` -### Authentication with OAuth (default) - -After the MCP server was added, users need to authorize GitHub Copilot to communicate with the server. Follow these steps to complete auth. - +### Authentication +After the MCP server was added to Cursor, users need to authorize Cursor to communicate with the server. Follow these steps to complete auth. **Step 1: Start MCP server** Run `CMD+Shift+P` to open the Command Palette and choose `MCP: List Servers`. Choose `trunk` and select `Start Server` to authenticate. - - **Step 2: Login & authorize** -A new webpage will be opened. Login with your Trunk account and follow instructions to authorize GitHub Copilot to communicate with the MCP server. - - +A new webpage will be opened. Login with your Trunk account and follow insturctions to authorize GitHub Copilot to communicate with the MCP server. **Step 3: Confirm** @@ -68,27 +60,3 @@ Follow instructions to get back to GitHub Copilot. With auth completed, GitHub C ``` 2025-09-10 12:49:16.975 [info] Discovered 2 tools ``` - -### Alternative: Authentication with API token - -If you prefer not to use the OAuth flow, you can authenticate with your Trunk organization API token. Find your token under **Settings > API** in the Trunk dashboard. - -Add the token to your `.vscode/mcp.json`: - -```json -{ - "mcpServers": { - "trunk": { - "url": "https://mcp.trunk.io/mcp", - "type": "http", - "headers": { - "Authorization": "Bearer ${env:TRUNK_API_TOKEN}" - } - } - } -} -``` - - -VS Code uses `${env:VARIABLE_NAME}` syntax for environment variable interpolation in MCP configuration files, unlike other clients which use `${VARIABLE_NAME}`. - \ No newline at end of file diff --git a/flaky-tests/use-mcp-server/mcp-tool-reference.mdx b/flaky-tests/use-mcp-server/mcp-tool-reference.mdx new file mode 100644 index 0000000..139cb89 --- /dev/null +++ b/flaky-tests/use-mcp-server/mcp-tool-reference.mdx @@ -0,0 +1,6 @@ +--- +title: "MCP Tool Reference" +description: "- Get root cause analysis: MCP tool reference: fix-flaky-test - Set up test uploads: MCP tool reference: setup-trunk-uploads" +--- +- [Get root cause analysis](/flaky-tests/use-mcp-server/mcp-tool-reference/get-root-cause-analysis): MCP tool reference: fix-flaky-test +- [Set up test uploads](/flaky-tests/use-mcp-server/mcp-tool-reference/set-up-test-uploads): MCP tool reference: setup-trunk-uploads diff --git a/flaky-tests/reference/mcp-reference/fix-flaky-test.mdx b/flaky-tests/use-mcp-server/mcp-tool-reference/get-root-cause-analysis.mdx similarity index 54% rename from flaky-tests/reference/mcp-reference/fix-flaky-test.mdx rename to flaky-tests/use-mcp-server/mcp-tool-reference/get-root-cause-analysis.mdx index b56efcd..6d05822 100644 --- a/flaky-tests/reference/mcp-reference/fix-flaky-test.mdx +++ b/flaky-tests/use-mcp-server/mcp-tool-reference/get-root-cause-analysis.mdx @@ -1,13 +1,13 @@ --- -title: "Fix Flaky Test" +title: "Get root cause analysis" description: "MCP tool reference: fix-flaky-test" --- ### Overview -The `fix-flaky-test` tool retrieves insights and historical failure analysis about a flaky test. This tool allows AI assistants to access investigation results and apply fixes directly in your development environment. For more information, see [Autofix Flaky Tests](../../agents/autofix-flaky-tests). +The `fix-flaky-test` tool retrieves insights and historical failure analysis about a flaky test. This tool allows AI assistants to access investigation results and apply fixes directly in your development environment. - -**Return Type:** Structured analysis data with fix recommendations. Structure: metadata, summary, facts +\ +**Return Type:** Structured analysis data with fix recommendations. Structure: issue, root cause, proposed fix ### Parameters @@ -16,15 +16,13 @@ The `fix-flaky-test` tool retrieves insights and historical failure analysis abo | Parameter | Type | Description | | ---------- | ------ | --------------------------------------------------------------- | | `repoName` | string | Repository name in `owner/repo` format (e.g., `trunk-io/trunk`) | -| `testCaseId` | string | UUID of the test case to retrieve investigations for | #### Optional Parameters | Parameter | Type | Description | | --------- | ------ | ---------------------------------------------------------------------- | +| `fixId` | string | Specific fix identifier from CI Autopilot comment (e.g., `FIX-abc123`) | | `orgSlug` | string | The name of your organization in the Trunk app | -| `investigationId` | string | Specific fix identifier from previous investigation queries | -| `createNewInvestigation` | boolean | Whether or not to trigger a new investigation (may take up to 1 minute) | ### Getting Parameter Values @@ -40,30 +38,33 @@ Look for the repository name in the output (e.g., `trunk-io/trunk` from `git@git ### Usage Examples -#### With Test ID +#### With Fix ID ``` -Fix the flaky test with ID +Fix the flaky test with ID ``` -#### Create New Investigation +### Sample Response ``` -Run a new analysis to help me fix flaky test with ID -``` +Fix Flaky Tests Insight for -#### With Existing Investigation +Issue: The CI failure occurred during the "Run Mysql Migrations" step due to a ValidationException from AWS Secrets Manager. -``` -Retrieve the investigation for test with investigationId +Root Cause: The SECRET_NAME being used to retrieve the secret value is malformed. The grep -oP "adminsecret.*" command is extracting the secret name along with surrounding JSON formatting (like quotes), which creates an invalid secret ID when passed to aws secretsmanager get-secret-value. + +Proposed Fix: Replace the problematic grep command with a proper JSON parser: + +- SECRET_NAME=$(aws secretsmanager list-secrets --filters Key=name,Values=adminsecret | grep Name | grep -oP "adminsecret.*") ++ SECRET_NAME=$(aws secretsmanager list-secrets --filters Key=name,Values=adminsecret | jq -r '.SecretList[0].Name') + +This fix is located in .github/actions/setup-k8s-and-migrate/action.yml at line 11. ``` ### Error Handling | Error | Cause | Resolution | | ------------------------------ | --------------------------------------------- | --------------------------------------------------------- | -| `Investigation {investigationId} not found` | Invalid or non-existent fix ID | Verify the investigationId from the previous query | -| `testCaseId must be provided` | Missing required query parameter | Test ID is required | -| `This investigation was skipped before producing a completed summary.` | Investigation was skipped | The setting may be disabled, revisit prerequisites in [Autofix Flaky Tests](../../agents/autofix-flaky-tests) | -| `This investigation failed before producing a completed summary. Please contact Trunk support.` | Investigation error | This feature is still in Beta, please contact support | +| `Fix {fixId} not found` | Invalid or non-existent fix ID | Verify the fix ID from the original CI Autopilot comment | +| `fixId must be provided` | Missing required query parameter | Fix ID is required | | Repository authorization error | Insufficient permissions or invalid repo name | Verify repository name format and your access permissions | diff --git a/flaky-tests/reference/mcp-reference/set-up-test-uploads.mdx b/flaky-tests/use-mcp-server/mcp-tool-reference/set-up-test-uploads.mdx similarity index 92% rename from flaky-tests/reference/mcp-reference/set-up-test-uploads.mdx rename to flaky-tests/use-mcp-server/mcp-tool-reference/set-up-test-uploads.mdx index 374a84e..7dc8c31 100644 --- a/flaky-tests/reference/mcp-reference/set-up-test-uploads.mdx +++ b/flaky-tests/use-mcp-server/mcp-tool-reference/set-up-test-uploads.mdx @@ -4,13 +4,11 @@ description: "MCP tool reference: setup-trunk-uploads" --- ### Overview -The `setup-trunk-uploads` tool helps configure test result uploads for Trunk Flaky Tests. This tool provides step-by-step instructions tailored to your specific test framework and CI provider combination. - - +The `setup-trunk-uploads` tool helps configure test result uploads to Trunk for flaky test detection and enhanced CI Autopilot analysis. This tool provides step-by-step instructions tailored to your specific test framework and CI provider combination. The tool guides you through a 4-step process: -* [ ] **Configure Test Framework** - Modify your test configuration to output JUnit XML reports +* [ ] **Configure Test Framework** - Modify your test configuration to output JUnit XML reports * [ ] **Run Tests** - Execute at least one test to generate reports * [ ] **Test Upload** - Manually upload a test report to verify connectivity * [ ] **Configure CI** - Set up automated uploads in your CI pipeline @@ -18,8 +16,6 @@ The tool guides you through a 4-step process: \ **Return Type:** Structured setup plan to generate test reports and upload to Trunk. Structure: project analysis and setup plan - - ### Parameters @@ -30,12 +26,10 @@ This agent needs to be called **once per test framework** used in your repositor | Parameter | Type | Description | | --------------- | ------ | -------------------------------------------------------------------------------------------------------------------- | -| `testFramework` | string | The test framework used in your repository (e.g., `jest`, `pytest`, `mocha`) | +| `testFramework` | string | The test framework used in your repository (e.g., `jest`, `pytest`, `mocha`) | | `ciProvider` | string | Your CI provider (e.g., `github`, `circleci`) | | `orgSlug` | string | Your organization slug. If not provided and you belong to multiple organizations, you'll be prompted to specify one. | - - ### Supported values #### Test Frameworks @@ -70,8 +64,6 @@ This agent needs to be called **once per test framework** used in your repositor * `travis` - Travis CI * `other` - Other CI providers (manual configuration) - - ### Usage examples #### Basic setup @@ -93,8 +85,6 @@ Use the setup-trunk-uploads tool with testFramework="jest" and ciProvider="githu Use the setup-trunk-uploads tool with testFramework="playwright" and ciProvider="github" ``` - - ### Sample response The tool returns detailed setup instructions as plain text: @@ -121,15 +111,13 @@ Run a command to upload your first test results to Trunk using your API token. Add a step to your GitHub Actions workflow to automatically upload test results on every CI run. ``` - - ### Error handling | Error | Cause | Resolution | | ------------------------------------------ | --------------------------------------------- | ------------------------------------------------------ | | `Test framework is required` | `testFramework` parameter missing | Provide a supported test framework from the list above | | `CI provider is required` | `ciProvider` parameter missing | Provide a supported CI provider from the list above | -| `User is not authenticated` | Missing or invalid authentication | Make sure you are properly authenticated with Trunk | +| `User is not authenticated` | Missing or invalid authentication | Ensure you're properly authenticated with Trunk | | `User is not a member of any organization` | No organization access | Create or join a Trunk organization | | `No organizations found` | No accessible organizations | Create an organization in the Trunk app | | Multiple organizations note | User belongs to multiple orgs, none specified | Provide explicit `orgSlug` parameter | diff --git a/flaky-tests/webhooks.mdx b/flaky-tests/webhooks.mdx new file mode 100644 index 0000000..d496550 --- /dev/null +++ b/flaky-tests/webhooks.mdx @@ -0,0 +1,19 @@ +--- +title: "Webhooks" +description: "Use webhooks to automate custom flaky test workflows" +--- +Trunk provides webhooks for you to build custom integrations to automate workflows, like notifying your team when a test becomes flaky or automatically creating tickets to investigate flaky tests. Trunk already provides a Jira integration, and more are planned. Webhooks lets you build custom integrations for use cases that are not supported out of the box. + +[Svix](https://docs.svix.com/) powers webhooks for Trunk. You'll be using Svix to configure webhooks and you should familiarize yourself with the [Svix App Portal docs](https://docs.svix.com/app-portal) to learn more. + +### Supported Events + +Trunk lets you create custom workflows with **event-triggered webhooks**. Flaky Test events are named with a `test_case` prefix. You can find all the events that Trunk supports in the event catalog: + + +Open the referenced resource in a new tab. + + +You can also find guides for specific examples here: + +
Send a Slack Messageslack-integration
Create a GitHub Issuegithub-issues-integration
Send a Microsoft Teams Messagemicrosoft-teams-integration
Create a Linear Issuelinear-integration
diff --git a/flaky-tests/webhooks/github-issues-integration.mdx b/flaky-tests/webhooks/github-issues-integration.mdx index 36c8949..369a510 100644 --- a/flaky-tests/webhooks/github-issues-integration.mdx +++ b/flaky-tests/webhooks/github-issues-integration.mdx @@ -1,9 +1,11 @@ --- title: "GitHub Issues integration" -description: "Learn how to automatically create GitHub Issues with Flaky Tests webhooks" +description: "Learn how to automatically create GitHub Issues with Flaky Test webhooks" --- Trunk allows you to automate GitHub Issue creation through webhooks. This will allow you to create GitHub issues and auto-assign them to [CODEOWNERS](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners) using Webhooks. +

GitHub Issue created automatically with webhooks.

+ This guide will walk you through integrating Trunk Flaky Tests with GitHub Issues through webhooks. You will be able to automatically generate GitHub issues for new flaky tests. This guide should take 15 minutes to complete. ### 1. Create a GitHub Token @@ -14,12 +16,9 @@ Before you can create a webhook to automate GitHub Issue creation, you need to c 2. Under **Personal access token** > **Fine-grained tokens** > Click **Generate new token** 3. Name the token something like `Trunk Flaky Tests` so you can recognize your token and set it never to expire. 4. Select the repositories you need to create issues to under **Repository access** -5. Under **Permissions** > **Repository Permissions**, select **Read and Write** access for **Issues.** +5. Under **Permissions** > **Repository Permissions**, select **Read and Write** access for **Issues.** - - - - +
6. Click **Generate Token** and copy your API token. ### 2. Add a new webhook @@ -30,14 +29,11 @@ You can create a new endpoint by: 1. Login to [Trunk Flaky Tests](https://app.trunk.io/login?intent=flaky%20tests) 2. From your profile on the top right, navigate to **Settings** -3. Under **Organization > Webhooks**, click **Automate GitHub Issue Creation** - - - - - -4. Paste your GitHub repo's Issues endpoint into **Endpoint URL.** Your **Endpoint URL** should be formatted as: `https://api.github.com/repos/{OWNER}/{REPO}/issues`. You can verify the URL by visiting it in your browser, such as [https://api.github.com/repos/trunk-io/docs/issues](https://api.github.com/repos/trunk-io/docs/issues). -5. Review the transformation code automatically generated for GitHub issues. You can customize this transformation at any time. Learn more about [customizing transformations](./github-issues-integration#id-5.-customize-your-transformation). +3. Under **Organization > Webhooks**, click **Automate GitHub Issue Creation** + +
+4. Paste your GitHub repo's Issues endpoint into **Endpoint URL.** Your **Endpoint URL** should be formatted as: `https://api.github.com/repos/{OWNER}/{REPO}/issues`. You can verify the URL by visiting it in your browser, such as https://api.github.com/repos/trunk-io/docs/issues. +5. Review the transformation code automatically generated for GitHub issues. You can customize this transformation at any time. Learn more about [customizing transformations](#id-5.-customize-your-transformation). 6. Create the new endpoint. You will be redirected to the endpoint configuration view. If you're having trouble adding a new webhook endpoint with Svix, please see the [Adding Endpoint docs from Svix](https://docs.svix.com/receiving/using-app-portal/adding-endpoints). @@ -63,18 +59,16 @@ Transformations are custom code snippets you can write to customize the GitHub i 1. In the endpoint configuration view, navigate to the **Advanced** tab. Under **Transformation**, toggle the **Enabled** switch. 2. Click **Edit transformation** to update your transformation code, and click **Save** to update the transformation. -3. You can test the transformation by selecting the `v2.test_case.status_changed` payload and clicking **Run Test**. This will test the transformation but not send a message. You will learn to send a test message in [step 5](./github-issues-integration#id-5.-test-your-webhook). +3. You can test the transformation by selecting the `test_case.status_changed` payload and clicking **Run Test**. This will test the transformation but not send a message. You will learn to send a test message in [step 5](#id-5.-test-your-webhook). -The generated webhook template contains a configurable constant out of the box: +The generated webhook template contains several configurable constants out of the box: -| Constant | Description | -|---|---| -| `GITHUB_ISSUE_LABEL_IDS` | **(Optional)** GitHub labels that will be assigned to issues created by Trunk. | +
ConstantDescription
GITHUB_ISSUE_LABEL_IDS(Optional) GitHub labels that will be assigned to issues created by Trunk.
PRS_IMPACTED_THRESHOLDIssues will be created only for flaky tests that have impacted more PRs than the PRS_IMPACTED_THRESHOLD.

You can adjust this value if you see many issues about low-impact flaky tests.
Here is the provided transformation for context. You can customize your GitHub Issues integration by following the [GitHub](https://docs.github.com/en/rest/issues/issues?apiVersion=2022-11-28#create-an-issue) and [Svix transformations](https://docs.svix.com/transformations#using-transformations) documentation. -The default transformation only creates issues when `new_status === "FLAKY"`. If you also want to create issues for tests marked as **Broken** (consistently failing at a high rate), update the filter condition. For example, change `new_status !== "FLAKY"` to `new_status !== "FLAKY" && new_status !== "BROKEN"` to handle both statuses. +The default transformation only creates issues when `newStatus === "flaky"`. If you also want to create issues for tests marked as **Broken** (consistently failing at a high rate), update the filter condition. For example, change `newStatus !== "flaky"` to `newStatus !== "flaky" && newStatus !== "broken"` to handle both statuses. ```javascript @@ -89,20 +83,26 @@ The default transformation only creates issues when `new_status === "FLAKY"`. If // IDs of any labels you want added to the GitHub issue. const GITHUB_ISSUE_LABEL_IDS = []; +// Below are various configs to fine-tune when an issue is created. + +// At least this many PRs need to be impacted for an issue to be created. +const PRS_IMPACTED_THRESHOLD = 2; + function handler(webhook) { - const new_status = webhook.payload.new_status; + const impacted_prs = webhook.payload.test_case.pull_requests_impacted_last_7d; + const newStatus = webhook.payload.status_change.current_status.value; - // Filter for only tests that transitioned to flaky - if (new_status !== "FLAKY") { + // Filter for only flaky tests that impact more than the provided threshold + if (newStatus !== "flaky" || impacted_prs < PRS_IMPACTED_THRESHOLD) { webhook.payload = "canceled"; webhook.cancel = true; return webhook; } webhook.payload = { - "title":`Flaky Test: ${webhook.payload.test_case.name.substring(0, 25)} transitioned to ${new_status}`, + "title":`Flaky Test: ${webhook.payload.test_case.name.substring(0, 25)} transitioned to ${webhook.payload.status_change.current_status.value}`, "body": summarizeTestCase(webhook.payload), "labels": GITHUB_ISSUE_LABEL_IDS, - // Uncomment this function for auto assignment + // Uncomment this function for auto asignment // "assignees": webhook.payload.test_case.codeowners.map((assignee)=>{ // // Strip the `@` symbol from codeowners // return assignee.slice(1) @@ -113,32 +113,45 @@ function handler(webhook) { function summarizeTestCase(payload) { const { - previous_status, - new_status, - timestamp, - repository, + status_change: { + previous_status + }, test_case: { name, file_path, - quarantined, + status, + quarantine, + repository, codeowners, + failure_rate_last_7d, + most_common_failures, + pull_requests_impacted_last_7d, + ticket, html_url } } = payload; // Construct a comprehensive issue body with key details - const issueBody = `See all details on the [Trunk Test Detail page](./${html_url}) - -Transition: ${previous_status} → ${new_status} - -Transition time: ${timestamp} - -File path: ${file_path || 'N/A'} - -Quarantined: ${quarantined ? 'Yes' : 'No'} - -Ownership: this test is owned by ${(codeowners && codeowners.length ? codeowners : ['@unassigned']).join(', ')} - -Repository: ${repository.html_url} + const issueBody = `See all details on the [Trunk Test Detail page](${html_url}) + +Transition time: ${status.timestamp} + +Latest failure: Dec 9, 2024 + +Severity (last 7 days): ${(failure_rate_last_7d * 100).toFixed(2)}% failure rate; impacting ${pull_requests_impacted_last_7d} PRs + +Ownership: this test is owned by ${(codeowners || ['@unassigned']).join(', ')} + +___ +__The most common failure reason (out of ${most_common_failures.length} identified failure reason) are:__ + +${ + // most_common_failures is a beta feature currently being tested + // If you are not on the beta it will be an empty array + // Want to try it out? Ask in slack.trunk.io + most_common_failures.map((failure, index) => { + return `**Reason #${index + 1}**: "${failure.summary}" \n` + }) +} ` return issueBody } @@ -148,7 +161,7 @@ Repository: ${repository.html_url} If you have [CODEOWNERS](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners) configured for your GitHub repo, you can create issues with assignees using CODEOWNERS.\ \ -You can uncomment the code block on lines 25-29 or use a snippet similar to: +You can uncomment the code block on lines 31-35 or use a snippet similar to: ```javascript "assignees": webhook.payload.test_case.codeowners.map((assignee)=>{ @@ -158,11 +171,11 @@ You can uncomment the code block on lines 25-29 or use a snippet similar to: ``` -#### Limitations of CODEOWNERS +**Limitations of CODEOWNERS** 1. CODEOWNERS supports assigning files to teams, but GitHub doesn't support assigning issues to teams. **If you have team owners in your CODEOWNERS file, the requests will fail**. 2. If your code owners do not map 1:1 with GitHub users, you will need to provide your own mapping, or webhooks will fail. -3. The example payload provided for testing has the CODEOWNERS assigned to `@backend`. If you're testing following the instructions in [step 5](./github-issues-integration#id-5.-test-your-webhook), the delivery attempt can fail. +3. The example payload provided for testing has the CODEOWNERS assigned to `@backend`. If you're testing following the instructions in [step 5](#id-5.-test-your-webhook), the delivery attempt can fail. ### 5. Test your webhook @@ -170,7 +183,7 @@ You can uncomment the code block on lines 25-29 or use a snippet similar to: You can create test issues by delivering a mock webhook. You can do this by: 1. In the endpoint configuration view, navigate to the **Testing** tab and select a **Send event** -2. Under **Subscribed events,** select `v2.test_case.status_changed`as the event type to send. +2. Under **Subscribed events,** select `test_case.status_changed`as the event type to send. 3. Click **Send Example** to test your webhook ### 6. Monitoring webhooks @@ -179,21 +192,17 @@ You can monitor the events and the webhook's delivery logs in the **Overview** t You can see an overview of how many webhook deliveries have been attempted, how many are successful, how many are in flight, and how many fail in the **Attempt Delivery Status** modal. - -![](/assets/example-webhook-delivery-status.png) - +
You can see a list of past delivery attempts in the **Message Attempts** modal. You can filter this list by **Succeeded** and **Failed** status, and you can click on each message to see the **Message content**, response code, and error message of each attempt. You can learn more about [replaying messages](https://docs.svix.com/receiving/using-app-portal/replaying-messages) and [filtering logs](https://docs.svix.com/receiving/using-app-portal/filtering-logs) in the Svix docs. - -![](/assets/example-webhook-logs.png) - +
### Congratulations! A GitHub Issue will now be created when a test's health status changes. You can further modify your transformation script to customize your issues. -[See the Trunk webhook event catalog](https://www.svix.com/event-types/us/org_2eQPL41Ew5XSHxiXZIamIUIXg8H/#v2.test_case.status_changed) +[See the Trunk webhook event catalog](https://www.svix.com/event-types/us/org_2eQPL41Ew5XSHxiXZIamIUIXg8H/#test_case.status_changed) [Learn more about consuming webhooks in the Svix docs](https://docs.svix.com/receiving/introduction) diff --git a/flaky-tests/webhooks/index.mdx b/flaky-tests/webhooks/index.mdx deleted file mode 100644 index e8ed1c8..0000000 --- a/flaky-tests/webhooks/index.mdx +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: "Webhooks" -description: "Use webhooks to automate custom flaky test workflows" ---- -Trunk provides webhooks for you to build custom integrations to automate workflows, like notifying your team when a test becomes flaky or automatically creating tickets to investigate flaky tests. Trunk provides built-in connectors for [Linear](./linear-integration) and [Jira](./jira-integration) to automate ticket creation, and webhooks lets you build custom integrations for use cases that are not supported out of the box. - -[Svix](https://docs.svix.com/) powers webhooks for Trunk. You'll be using Svix to configure webhooks and you should familiarize yourself with the [Svix App Portal docs](https://docs.svix.com/app-portal) to learn more. - -### Supported Events - -Trunk lets you create custom workflows with **event-triggered webhooks**. Flaky Tests events are named with a `test_case` prefix. You can find all the events that Trunk supports in the event catalog: - - - www.svix.com - - -Trunk publishes three Flaky Tests event types to Svix. Each event includes a full JSON schema with field descriptions visible in the Svix app portal. - -#### `test_case.monitor_status_changed` - -Emitted when a monitor activates or resolves for a test case. - -| Field | Type | Description | -|---|---|---| -| `type` | string | Always `test_case.monitor_status_changed` | -| `timestamp` | string (ISO 8601) | When the event occurred | -| `monitor.id` | string (UUID) | Unique identifier for the monitor | -| `monitor.type` | string | The type of monitor (e.g., `pass_on_retry`) | -| `monitor.status` | string | Current monitor status (`active` or `resolved`) | -| `evidence` | object | Data supporting the status change; structure varies by monitor type | -| `repository.id` | string (UUID) | Unique identifier for the repository | -| `repository.html_url` | string | URL of the repository | -| `test_case.id` | string (UUID) | Stable unique identifier for the test | -| `test_case.name` | string | Name of the test | -| `test_case.classname` | string | Test classname | -| `test_case.file_path` | string | File path of the test | -| `test_case.html_url` | string | URL to the test detail page in Trunk | -| `test_case.codeowners` | array of strings | Code owners associated with the test | -| `test_case.quarantined` | boolean | Whether the test is quarantined | -| `test_case.variant` | string | Test variant name | - -#### `v2.test_case.status_changed` - -Emitted when a test case changes status (e.g., becomes flaky or is resolved), as triggered by a monitor. - -| Field | Type | Description | -|---|---|---| -| `type` | string | Always `v2.test_case.status_changed` | -| `timestamp` | string (ISO 8601) | When the event occurred | -| `previous_status` | string | The prior status of the test case | -| `new_status` | string | The updated status of the test case | -| `triggered_by.monitor_id` | string (UUID) | Unique identifier of the triggering monitor | -| `triggered_by.monitor_type` | string | Type of monitor that triggered the change | -| `triggered_by.monitor_status` | string | Status of the monitor at the time of the trigger | -| `repository` | object | See `repository` fields above | -| `test_case` | object | See `test_case` fields above | - -#### `test_case.investigation_completed` - -Emitted when an AI-powered flaky test analysis finishes for a test case. - -| Field | Type | Description | -|---|---|---| -| `type` | string | Always `test_case.investigation_completed` | -| `investigation_id` | string (UUID) | Unique identifier for the investigation | -| `confidence` | number | Overall confidence score (0-1) for the findings | -| `created_at` | string (ISO 8601) | When the investigation completed | -| `markdown_summary` | string | Markdown-formatted summary of findings and recommendations | -| `failure_message` | string | The original failure message that triggered the investigation | -| `facts` | array | Facts discovered during the investigation | -| `facts[].fact_type` | string | Category of the fact (e.g., `GIT_BLAME`) | -| `facts[].content` | string | Detailed description with citations to supporting evidence | -| `facts[].confidence` | number | Confidence score (0-1) for this individual fact | -| `repository` | object | See `repository` fields above | -| `test_case` | object | See `test_case` fields above | - -You can also find guides for specific examples here: - - - - - - - - - - - - - diff --git a/flaky-tests/webhooks/jira-integration.mdx b/flaky-tests/webhooks/jira-integration.mdx deleted file mode 100644 index c0deeb0..0000000 --- a/flaky-tests/webhooks/jira-integration.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: "Jira integration" -description: "Learn how to automatically create Jira issues with Flaky Test webhooks" ---- -Trunk allows you to automate Jira issue creation through webhooks. When a test becomes flaky, a Jira issue is created automatically with context including the status transition, ownership, and a link to the test details. - -This guide will walk you through integrating Trunk Flaky Tests with Jira through webhooks. You will be able to automatically generate Jira issues for **new flaky tests** found in your repo. This guide should take 15 minutes to complete. - -Trunk also has a [built-in Jira integration](../management/ticketing/jira-integration) for manual ticket creation. You only need to use webhooks if you want to automate ticket creation or need additional customization. - -### 1. Create a Jira API Token - -Before you can create a webhook to automate Jira issue creation, you need to create an API token to authorize your requests. - -1. Go to [Atlassian API token management](https://id.atlassian.com/manage-profile/security/api-tokens). -2. Click **Create API token**, give it a label (e.g., "Trunk Webhooks"), and click **Create**. -3. Copy the token and save it in a secure location. You'll need it later. - -You'll also need to generate a Base64-encoded credential string for authentication. Run this in your terminal: - -```bash -echo -n "your-email@example.com:your-api-token" | base64 -``` - -Replace `your-email@example.com` with the email associated with your Jira account and `your-api-token` with the token you just created. Save the output for step 3. - -### 2. Add a new webhook in Trunk - -Trunk uses Svix to integrate with other services, such as creating Jira issues through webhooks. - -You can create a new endpoint by: - -1. Login to [Trunk Flaky Tests](https://app.trunk.io/login?intent=flaky%20tests) -2. From your profile on the top right, navigate to **Settings** -3. Under **Organization > Webhooks**, click **Automate Jira Issues Creation**. -4. Set the **Endpoint URL** to your Jira Cloud REST API endpoint: `https://.atlassian.net/rest/api/2/issue`. Replace `` with your Jira Cloud domain (e.g., `acme` if your Jira URL is `acme.atlassian.net`). -5. Review the transformation code automatically generated for Jira issues. You can customize this transformation at any time. Learn more about [customizing transformations](#5-customize-your-transformation). -6. Create the new endpoint. You will be redirected to the endpoint configuration view. - -If you're having trouble adding a new webhook endpoint with Svix, please see the [Adding Endpoint docs from Svix](https://docs.svix.com/receiving/using-app-portal/adding-endpoints). - -### 3. Add custom headers - -The Jira REST API requires authentication headers. You can configure custom headers in the endpoint configuration: - -1. Navigate to **Webhooks > Advanced > Custom Headers.** -2. Fill in the **Key** and **Value** referencing the table below, and click the **+** button to add each header. - -You'll need to configure the following headers: - -| Key | Value | -| --------------- | ------------------------ | -| `Authorization` | `Basic ` | -| `Content-Type` | `application/json` | - -Replace `` with the Base64-encoded string you generated in [step 1](#1-create-a-jira-api-token). - -### 4. Find your Jira project key and issue type - -You'll need your Jira project key and preferred issue type to configure the transformation. - -**Project key:** This is the short prefix on your Jira issues (e.g., `ENG`, `PROJ`, `KAN`). You can find it in the URL when viewing your Jira project: `https://your-domain.atlassian.net/jira/software/projects//board`. - -**Issue type:** The type of issue to create. Common values are `Bug`, `Task`, or `Story`. The default is `Bug`. - -### 5. Customize your transformation - -Transformations are custom code snippets you can write to customize the Jira issues created by the webhook. A working template transformation will be added automatically for your webhook, but you can further customize the behavior. - -1. In the endpoint configuration view, navigate to the **Advanced** tab. Under **Transformation**, toggle the **Enabled** switch. -2. Click **Edit transformation** to update your transformation code, and click **Save** to update the transformation. -3. You can test the transformation by selecting the `v2.test_case.status_changed` payload and clicking **Run Test**. This will test the transformation but not send a message. You will learn to send a test message [in step 6](#6-test-your-webhook). - -The generated webhook template contains several configurable constants out of the box: - -| Constant | Description | -|---|---| -| `JIRA_PROJECT_KEY` | (**Required)** Your Jira project key (e.g., `ENG`, `PROJ`). | -| `JIRA_ISSUE_TYPE` | **(Optional)** The issue type to create. Defaults to `Bug`. | -| `JIRA_LABELS` | (**Optional)** Array of labels to add to the issue. Defaults to `["flaky-test"]`. | -| `JIRA_CUSTOM_FIELDS` | (**Optional)** Object of custom field key-value pairs for projects that require additional fields. | - -Here is the provided transformation for context. You can customize your Jira issues integration by following the [Jira REST API docs](https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issues/#api-rest-api-2-issue-post) and [Svix transformations](https://docs.svix.com/transformations#using-transformations) documentation. - - -The default transformation only creates issues when `new_status === "FLAKY"`. If you also want to create issues for tests marked as **Broken** (consistently failing at a high rate), update the filter condition. For example, change `new_status !== "FLAKY"` to `new_status !== "FLAKY" && new_status !== "BROKEN"` to handle both statuses. - - -```javascript -/** - * @param webhook the webhook object - * @param webhook.method destination method. Allowed values: "POST", "PUT" - * @param webhook.url current destination address - * @param webhook.eventType current webhook Event Type - * @param webhook.payload JSON payload - * @param webhook.cancel whether to cancel dispatch of the given webhook - */ - -// Your Jira project key (e.g., "PROJ", "ENG"). This is required! -const JIRA_PROJECT_KEY = ""; -// The Jira issue type to create (e.g., "Bug", "Task", "Story"). Defaults to "Bug". -const JIRA_ISSUE_TYPE = "Bug"; -// Labels to add to the Jira issue. Optional. -const JIRA_LABELS = ["flaky-test"]; - -// Add any custom required fields your Jira project needs. Optional. -// Example: { "customfield_10042": { "value": "Platform" }, "customfield_10043": "some-value" } -const JIRA_CUSTOM_FIELDS = {}; - -function handler(webhook) { - const new_status = webhook.payload.new_status; - - // Filter for only tests that transitioned to flaky - if (new_status !== "FLAKY") { - webhook.payload = "canceled"; - webhook.cancel = true; - return webhook; - } - - const description = summarizeTestCase(webhook.payload); - - webhook.payload = { - fields: { - project: { key: JIRA_PROJECT_KEY }, - issuetype: { name: JIRA_ISSUE_TYPE }, - summary: `Flaky Test: ${webhook.payload.test_case.name}`, - description: description, - labels: JIRA_LABELS, - ...JIRA_CUSTOM_FIELDS, - }, - }; - return webhook; -} - -function summarizeTestCase(payload) { - const { - previous_status, - new_status, - timestamp, - repository, - test_case: { - name, file_path, quarantined, codeowners, html_url - } - } = payload; - - const issueBody = `See all details on the [Trunk Test Detail page|${html_url}] - -Transition: ${previous_status} → ${new_status} - -Transition time: ${timestamp} - -File path: ${file_path || 'N/A'} - -Quarantined: ${quarantined ? 'Yes' : 'No'} - -Ownership: this test is owned by ${(codeowners && codeowners.length ? codeowners : ['@unassigned']).join(', ')} - -Repository: ${repository.html_url} - -View the full stack trace on the [Test Detail page|${html_url}] - `; - return issueBody; -} -``` - - -The description uses [Jira wiki markup](https://jira.atlassian.com/secure/WikiRendererHelpAction.jspa?section=texteffects) for formatting. Links use the `[text|url]` syntax rather than markdown. - - -### 6. Test your webhook - -You can create test issues by delivering a mock webhook. You can do this by: - -1. In the endpoint configuration view, navigate to the **Testing** tab and select a **Send event** -2. Under **Subscribed events,** select `v2.test_case.status_changed` as the event type to send -3. Click **Send Example** to test your webhook - -### 7. Monitoring webhooks - -You can monitor the events and the webhook's delivery logs in the **Overview** tab of an endpoint configuration view. - -You can see an overview of how many webhook deliveries have been attempted, how many are successful, how many are in flight, and how many fail in the **Attempt Delivery Status** modal. - -You can see a list of past delivery attempts in the **Message Attempts** modal. You can filter this list by **Succeeded** and **Failed** status, and you can click on each message to see the **Message content**, response code, and error message of each attempt. You can learn more about [replaying messages](https://docs.svix.com/receiving/using-app-portal/replaying-messages) and [filtering logs](https://docs.svix.com/receiving/using-app-portal/filtering-logs) in the Svix docs. - -### Congratulations! - -A Jira issue will now be created when a test's health status changes to **flaky**. You can further modify your transformation script to customize your issues. - -[See the Trunk webhook event catalog](https://www.svix.com/event-types/us/org_2eQPL41Ew5XSHxiXZIamIUIXg8H/#v2.test_case.status_changed) - -[Learn more about consuming webhooks in the Svix docs](https://docs.svix.com/receiving/introduction) - -[Learn more about Jira's REST API](https://developer.atlassian.com/cloud/jira/platform/rest/v2/intro/) diff --git a/flaky-tests/webhooks/linear-integration.mdx b/flaky-tests/webhooks/linear-integration.mdx index 6cc37f5..49637c4 100644 --- a/flaky-tests/webhooks/linear-integration.mdx +++ b/flaky-tests/webhooks/linear-integration.mdx @@ -1,12 +1,14 @@ --- title: "Linear integration" -description: "Learn how to automatically create Linear issues with Flaky Tests webhooks" +description: "Learn how to automatically create Linear issues with Flaky Test webhooks" --- Trunk allows you to automate Linear Issue creation through webhooks. This will allow you to create Linear issues and auto-assign according to [CODEOWNERS](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners). -This guide will walk you through integrating Trunk Flaky Tests with Linear Issues through webhooks. You will be able to automatically generate Linear issues for **new flaky tests** found in your repo. This guide should take 15 minutes to complete. +
-Trunk also has a [built-in Linear integration](./linear-integration) for ticket creation. You only need to use webhooks if you want to automate ticket creation or need additional customization. +This guide will walk you through integrating Trunk Flaky Tests with Linear Issues through webhooks. You will be able to automatically generate Linear issues for **new flaky tests** found in your repo that **impact more than 2 PRs**. This guide should take 15 minutes to complete. + +Trunk also has a [built-in Linear integration](/flaky-tests/webhooks/linear-integration) for ticket creation. You only need to use webhooks if you want to automate ticket creation or need additional customization. ### 1. Create a Linear Personal Access Token @@ -24,14 +26,11 @@ You can create a new endpoint by: 1. Login to [Trunk Flaky Tests](https://app.trunk.io/login?intent=flaky%20tests) 2. From your profile on the top right, navigate to **Settings** -3. Under **Organization > Webhooks**, click **Automate Linear Issues Creation.** +3. Under **Organization > Webhooks**, click **Automate Linear Issues Creation.** - - - - +
4. Paste the Linear GraphQL API endpoint into **Endpoint URL**, which is: `https://api.linear.app/graphql`. -5. Review the transformation code automatically generated for Linear issues, you can customize this transformation at any time. Learn more about [customizing transformations](./linear-integration#id-5.-customize-your-transformation). +5. Review the transformation code automatically generated for Linear issues, you can customize this transformation at any time. Learn more about [customizing transformations](#id-5.-customize-your-transformation). 6. Create the new endpoint. You will be redirected to the endpoint configuration view. If you're having trouble adding a new webhook endpoint with Svix, please see the [Adding Endpoint docs from Svix](https://docs.svix.com/receiving/using-app-portal/adding-endpoints). @@ -57,7 +56,7 @@ You need to find your Linear team, project, and label IDs to create issues with First, you'll need to find your team ID so you can create Linear issues under the correct team. You can make a request in your terminal using cURL, or a similar tool. -You'll need your Linear API key from [step 1](./linear-integration#id-1.-create-a-linear-personal-access-token). +You'll need your Linear API key from [step 1](#id-1.-create-a-linear-personal-access-token). ```bash curl \ @@ -168,20 +167,16 @@ Transformations are custom code snippets you can write to customize the Linear i 1. In the endpoint configuration view, navigate to the **Advanced** tab. Under **Transformation**, toggle the **Enabled** switch. 2. Click **Edit transformation** to update your transformation code, and click **Save** to update the transformation. -3. You can test the transformation by selecting the `v2.test_case.status_changed` payload and clicking **Run Test**. This will test the transformation but not send a message. You will learn to send a test message[ in step 6](./linear-integration#id-6.-test-your-webhook). +3. You can test the transformation by selecting the `test_case.status_changed` payload and clicking **Run Test**. This will test the transformation but not send a message. You will learn to send a test message[ in step 6](#id-6.-test-your-webhook). The generated webhook template contains several configurable constants out of the box: -| Constant | Description | -|---|---| -| `LINEAR_TEAM_ID` | (**Required)** Your Linear team ID. [Learn about finding your team ID](#team-id). | -| `LINEAR_PROJECT_ID` | **(Optional)** The Linear project ID assigned to new issues. [Learn more about finding your project ID](#project-id). | -| `LINEAR_LABEL_IDS` | (**Optional)** Array of label IDs assigned to new issues. [Learn about finding your label IDs](#label-id). | +
ConstantDescription
LINEAR_TEAM_ID(Required) Your Linear team ID. Learn about finding your team ID.
LINEAR_PROJECT_ID(Optional) The Linear project ID assigned to new issues. Learn more about finding your project ID.
LINEAR_LABEL_IDS(Optional) Array of label IDs assigned to new issues. Learn about finding your label IDs.
PRS_IMPACTED_THRESHOLDIssues will be created only for flaky tests that have impacted more PRs than the PRS_IMPACTED_THRESHOLD.

You can adjust this value if you see many issues about low-impact flaky tests.
Here is the provided transformation for context. You can customize your Linear Issues integration by following the[ Linear API](https://studio.apollographql.com/public/Linear-API/variant/current/schema/reference) and [Svix transformations](https://docs.svix.com/transformations#using-transformations) documentation. -The default transformation only creates issues when `new_status === "FLAKY"`. If you also want to create issues for tests marked as **Broken** (consistently failing at a high rate), update the filter condition. For example, change `new_status !== "FLAKY"` to `new_status !== "FLAKY" && new_status !== "BROKEN"` to handle both statuses. +The default transformation only creates issues when `newStatus === "flaky"`. If you also want to create issues for tests marked as **Broken** (consistently failing at a high rate), update the filter condition. For example, change `newStatus !== "flaky"` to `newStatus !== "flaky" && newStatus !== "broken"` to handle both statuses. ```javascript @@ -201,14 +196,20 @@ const LINEAR_PROJECT_ID = ""; // IDs of any labels you want added to the linear issue. Optional. const LINEAR_LABEL_IDS = []; +// Below are various configs to fine-tune when an issue is created. + +// At least many PRs need to be impacted for an issue to be created. +const PRS_IMPACTED_THRESHOLD = 2; + function handler(webhook) { - const new_status = webhook.payload.new_status; + const impacted_prs = webhook.payload.test_case.pull_requests_impacted_last_7d; + const newStatus = webhook.payload.status_change.current_status.value; const resolvedProjectId = LINEAR_PROJECT_ID ? `"${LINEAR_PROJECT_ID}"` : undefined; const resolvedLinearLabels = LINEAR_LABEL_IDS.map((id) => `"${id}"`).join(","); - // Filter for only tests that transitioned to flaky - if (new_status !== "FLAKY") { + // Filter for only flaky tests that impact more than the provided threshold + if (newStatus !== "flaky" || impacted_prs < PRS_IMPACTED_THRESHOLD) { webhook.payload = "canceled"; webhook.cancel = true; return webhook; @@ -239,34 +240,47 @@ function handler(webhook) { function summarizeTestCase(payload) { const { - previous_status, - new_status, - timestamp, - repository, + status_change: { + previous_status + }, test_case: { name, file_path, - quarantined, + status, + quarantine, + repository, codeowners, + failure_rate_last_7d, + most_common_failures, + pull_requests_impacted_last_7d, + ticket, html_url } } = payload; // Construct a comprehensive issue body with key details - const issueBody = `See all details on the [Trunk Test Detail page](./${html_url}) - -Transition: ${previous_status} → ${new_status} - -Transition time: ${timestamp} - -File path: ${file_path || 'N/A'} - -Quarantined: ${quarantined ? 'Yes' : 'No'} - -Ownership: this test is owned by ${(codeowners && codeowners.length ? codeowners : ['@unassigned']).join(', ')} - -Repository: ${repository.html_url} + const issueBody = `See all details on the [Trunk Test Detail page](${html_url}) + +Transition time: ${status.timestamp} + +Latest failure: Dec 9, 2024 + +Severity (last 7 days): ${(failure_rate_last_7d * 100).toFixed(2)}% failure rate; impacting ${pull_requests_impacted_last_7d} PRs + +Ownership: this test is owned by ${(codeowners || ['@unassigned']).join(', ')} + +___ +__The most common failure reason (out of ${most_common_failures.length} identified failure reason) are:__ + +${ + // most_common_failures is a beta feature currently being tested + // If you are not on the beta it will be an empty array + // Want to try it out? Ask in slack.trunk.io + most_common_failures.map((failure, index) => { + return `**Reason #${index + 1}**: "${failure.summary}" \n` + }) +} -View the full stack trace on the [Test Detail page](./${html_url}) +View the full stack trace on the [Test Detail page](${html_url}) ` return issueBody } @@ -307,7 +321,7 @@ webhook.payload = {query: `mutation IssueCreate { You can create test issues by delivering a mock webhook. You can do this by: 1. In the endpoint configuration view, navigate to the **Testing** tab and select a **Send event** -2. Under **Subscribed events,** select `v2.test_case.status_changed`as the event type to send +2. Under **Subscribed events,** select `test_case.status_changed`as the event type to send 3. Click **Send Example** to test your webhook ### 7. Monitoring webhooks @@ -316,21 +330,17 @@ You can monitor the events and the webhook's delivery logs in the **Overview** t You can see an overview of how many webhook deliveries have been attempted, how many are successful, how many are in flight, and how many fail in the **Attempt Delivery Status** modal. - -![](/assets/example-webhook-delivery-status.png) - +
You can see a list of past delivery attempts in the **Message Attempts** modal. You can filter this list by **Succeeded** and **Failed** status, and you can click on each message to see the **Message content**, response code, and error message of each attempt. You can learn more about [replaying messages](https://docs.svix.com/receiving/using-app-portal/replaying-messages) and [filtering logs](https://docs.svix.com/receiving/using-app-portal/filtering-logs) in the Svix docs. - -![](/assets/example-webhook-logs.png) - +
### Congratulations! -A Linear Issue will now be created when a test's health status changes to **flaky**. You can further modify your transformation script to customize your issues. +A Linear Issue will now be created when a test's health status changes to **flaky** and **impacts more than 2 PRs**. You can further modify your transformation script to customize your issues. -[See the Trunk webhook event catalog](https://www.svix.com/event-types/us/org_2eQPL41Ew5XSHxiXZIamIUIXg8H/#v2.test_case.status_changed) +[See the Trunk webhook event catalog](https://www.svix.com/event-types/us/org_2eQPL41Ew5XSHxiXZIamIUIXg8H/#test_case.status_changed) [Learn more about consuming webhooks in the Svix docs](https://docs.svix.com/receiving/introduction) diff --git a/flaky-tests/webhooks/microsoft-teams-integration.mdx b/flaky-tests/webhooks/microsoft-teams-integration.mdx index 99d22eb..b2ec6ec 100644 --- a/flaky-tests/webhooks/microsoft-teams-integration.mdx +++ b/flaky-tests/webhooks/microsoft-teams-integration.mdx @@ -1,8 +1,11 @@ --- title: "Microsoft Teams integration" +description: "Trunk allows you to create custom workflows to send customized messages to Microsoft Teams channels through webhooks." --- Trunk allows you to create custom workflows to send customized messages to Microsoft Teams channels through webhooks. +
+ This guide will walk you through sending Microsoft Teams messages using event-triggered webhooks. By the end of this tutorial, you'll receive Microsoft Teams messages for test status changes. This guide should take 10 minutes to complete. ### 1. Configure incoming webhooks for your team @@ -23,14 +26,11 @@ You can create a new endpoint by: 1. Login to [Trunk Flaky Tests](https://app.trunk.io/login?intent=flaky%20tests) 2. From your profile on the top right, navigate to **Settings** -3. Under **Organization > Webhooks**, click **Teams** - - - - - -4. Paste your Microsoft Teams Workflow URL from [the previous step ](./microsoft-teams-integration#id-1.-configure-incoming-webhooks-for-your-team)into **Endpoint URL**. -5. Review the transformation code automatically generated for Teams messages. You can customize this transformation at any time. Learn more about [customizing transformations](./microsoft-teams-integration#id-3.-customize-your-transformation). +3. Under **Organization > Webhooks**, click **Teams** + +
+4. Paste your Microsoft Teams Workflow URL from [the previous step ](#id-1.-configure-incoming-webhooks-for-your-team)into **Endpoint URL**. +5. Review the transformation code automatically generated for Teams messages. You can customize this transformation at any time. Learn more about [customizing transformations](#id-3.-customize-your-transformation). 6. Create the new endpoint. You will be redirected to the endpoint configuration view. ### 3. Customize your transformation @@ -39,7 +39,7 @@ Transformations are custom code snippets you can write to customize the Microsof 1. In the endpoint configuration view, navigate to the **Advanced** tab. Under **Transformation**, toggle the **Enabled** switch. 2. Click **Edit transformation** to update your transformation code, and click **Save** to update the transformation. -3. You can test the transformation by selecting the `v2.test_case.status_changed` payload and clicking **Run Test**. This will test the transformation but not send a message. You will learn to send a test message in [step 4](./microsoft-teams-integration#id-4.-test-your-webhook). +3. You can test the transformation by selecting the `test_case.status_changed` payload and clicking **Run Test**. This will test the transformation but not send a message. You will learn to send a test message in [step 4](#id-4.-test-your-webhook). Below is an example of a webhook transformation to format the messages as [Actionable Messages](https://learn.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/connectors-using?tabs=cURL%2Ctext1). If you're having trouble adding a new webhook endpoint with Svix, please see the [Adding Endpoint docs from Svix](https://docs.svix.com/receiving/using-app-portal/adding-endpoints). @@ -81,25 +81,34 @@ function summarizeTestCase(payload) { } const { - previous_status = "Unknown", - new_status = "Unknown", - timestamp, - repository = {}, test_case: { name = "N/A", - classname = "", - file_path = "", - quarantined = false, + file_path = "N/A", + status = {}, + quarantine = false, + repository = {}, codeowners = [], + failure_rate_last_7d = 0, + most_common_failures = [], + pull_requests_impacted_last_7d = 0, + ticket = {}, html_url = "N/A" } } = payload; - const statusTimestamp = timestamp - ? new Date(timestamp).toLocaleString() + const statusTimestamp = status.timestamp + ? new Date(status.timestamp).toLocaleString() : "Unknown"; - const subtitle = file_path || classname || ""; + // most_common_failures is a beta feature currently being tested + // If you are not on the beta it will be an empty array + // Want to try it out? Ask in slack.trunk.io + const failureBlocks = most_common_failures.map(failure => ({ + type: "TextBlock", + text: `• ${failure.summary}`, + wrap: true, + spacing: "small" + })); return { type: "message", @@ -119,7 +128,7 @@ function summarizeTestCase(payload) { }, { type: "TextBlock", - text: subtitle, + text: file_path, isSubtle: true, spacing: "none" }, @@ -128,7 +137,7 @@ function summarizeTestCase(payload) { facts: [ { title: "Status", - value: `${previous_status} → ${new_status}` + value: `${status.value || "Unknown"} (${status.reason?.trim() || "N/A"})` }, { title: "Last Updated", @@ -136,7 +145,15 @@ function summarizeTestCase(payload) { }, { title: "Quarantine Status", - value: quarantined ? "Quarantined" : "Not Quarantined" + value: quarantine ? "Quarantined" : "Not Quarantined" + }, + { + title: "Failure Rate (7d)", + value: `${(failure_rate_last_7d * 100).toFixed(1)}%` + }, + { + title: "PRs Impacted (7d)", + value: pull_requests_impacted_last_7d.toString() }, { title: "Codeowners", @@ -144,6 +161,13 @@ function summarizeTestCase(payload) { } ] }, + { + type: "TextBlock", + text: "Most Common Failures", + weight: "bolder", + spacing: "medium" + }, + ...failureBlocks, { type: "ActionSet", actions: [ @@ -156,6 +180,11 @@ function summarizeTestCase(payload) { type: "Action.OpenUrl", title: "View Test Details", url: html_url || "#" + }, + { + type: "Action.OpenUrl", + title: "View Related Ticket", + url: ticket.html_url || "#" } ] } @@ -171,7 +200,7 @@ function summarizeTestCase(payload) { You can send test messages to your Microsoft Teams channels as you make updates. You can do this by: 1. In the endpoint configuration view, navigate to the **Testing** tab and select a **Send event** -2. Under **Subscribed events,** select `v2.test_case.status_changed`as the event type to send. +2. Under **Subscribed events,** select `test_case.status_changed`as the event type to send. 3. Click **Send Example** to test your webhook ### 5. Monitoring webhooks @@ -180,21 +209,19 @@ You can monitor the events and the webhook's delivery logs in the **Overview** t You can see an overview of how many webhook deliveries have been attempted, how many are successful, how many are in flight, and how many fail in the **Attempt Delivery Status** modal. - -![](/assets/example-webhook-delivery-status.png) - +
You can see a list of past delivery attempts in the **Message Attempts** modal. You can filter this list by **Succeeded** and **Failed** status, and you can click on each message to see the **Message content**, response code, and error message of each attempt. You can learn more about [replaying messages](https://docs.svix.com/receiving/using-app-portal/replaying-messages) and [filtering logs](https://docs.svix.com/receiving/using-app-portal/filtering-logs) in the Svix docs. - -![](/assets/example-webhook-logs.png) - +
### Congratulations! You should now receive notifications in your Teams channel when a test's status changes. You can further modify your transformation script to customize your messages. -[See the Trunk webhook event catalog](https://www.svix.com/event-types/us/org_2eQPL41Ew5XSHxiXZIamIUIXg8H/#v2.test_case.status_changed) +
+ +[See the Trunk webhook event catalog](https://www.svix.com/event-types/us/org_2eQPL41Ew5XSHxiXZIamIUIXg8H/#test_case.status_changed) [Learn more about consuming webhooks in the Svix docs](https://docs.svix.com/receiving/introduction) diff --git a/flaky-tests/webhooks/slack-integration.mdx b/flaky-tests/webhooks/slack-integration.mdx index c3fcb4c..4499bdf 100644 --- a/flaky-tests/webhooks/slack-integration.mdx +++ b/flaky-tests/webhooks/slack-integration.mdx @@ -1,12 +1,10 @@ --- -title: "Integration for Slack" +title: "Slack integration" description: "Learn how to use flaky test webhooks to power Slack notifications" --- Trunk allows you to create custom workflows to send customized messages to Slack through webhooks. - -For details on how Trunk collects, manages, and stores your data, see our [Security and Privacy](../../setup-and-administration/security) page. - +
This guide will walk you through sending Slack messages using event-triggered webhooks. By the end of this tutorial, you'll receive Slack messages for test status changes. This guide should take 10 minutes to complete. @@ -18,18 +16,14 @@ You can add the new Slack Webhook URL to Svix by following these steps: 1. Login to [Trunk Flaky Tests](https://app.trunk.io/login?intent=flaky%20tests) 2. From your profile on the top right, navigate to **Settings** -3. Under **Organization > Webhooks**, click **Slack** - - - ![](/assets/example-webhook-connector-light.png) - -4. Click **Connect to Slack** and select the server and channel to connect to. - - - ![](/assets/example-webhook-connector-slack_(1).png) - -5. Review the transformation code automatically generated for GitHub issues. You can customize this transformation at any time. Learn more about [customizing transformations](./slack-integration#id-2.-customize-your-transformation). -6. By default, this connection will send messages about Trunk Merge and Flaky Tests events. If you only want Flaky Tests events, unselect all events other than `v2.test_case.status_changed`. +3. Under **Organization > Webhooks**, click **Slack** + +
+4. Click **Connect to Slack** and select the server and channel to connect to. + +
+5. Review the transformation code automatically generated for GitHub issues. You can customize this transformation at any time. Learn more about [customizing transformations](#id-2.-customize-your-transformation). +6. By default, this connection will send messages about Trunk Merge and Flaky Tests events. If you only want Flaky Test events, unselect all events other than `test_case.status_changed`. 7. Create the new endpoint. You will be redirected to the endpoint configuration view. If you're having trouble adding a new webhook endpoint with Svix, please see the [Adding Endpoint docs from Svix](https://docs.svix.com/receiving/using-app-portal/adding-endpoints). @@ -40,7 +34,7 @@ Transformations are custom code snippets you can write to customize the Slack me 1. In the endpoint configuration view, navigate to the **Advanced** tab. Under **Transformation**, toggle the **Enabled** switch. 2. Click **Edit transformation** to update your transformation code, and click **Save** to update the transformation. -3. You can test the transformation by selecting the `v2.test_case.status_changed` payload and clicking **Run Test**. This will test the transformation but not send a message. You will learn to send a test message in [step 3](./slack-integration#id-3.-test-your-webhook). +3. You can test the transformation by selecting the `test_case.status_changed` payload and clicking **Run Test**. This will test the transformation but not send a message. You will learn to send a test message in [step 3](#id-3.-test-your-webhook). An example transformation script is provided below and you can customize your Slack integration by following the [Slack](https://api.slack.com/messaging/webhooks) and [Svix transformations](https://docs.svix.com/transformations#using-transformations) documentation. @@ -66,32 +60,43 @@ function summarizeTestCase(payload) { } const { - previous_status = "Unknown", - new_status = "Unknown", - timestamp, - repository = {}, test_case: { name = "N/A", - classname = "", - file_path = "", - quarantined = false, + file_path = "N/A", + status = {}, + quarantine = false, + repository = {}, codeowners = [], + failure_rate_last_7d = 0, + most_common_failures = [], + pull_requests_impacted_last_7d = 0, + ticket = {}, html_url = "N/A" } } = payload; - const statusSummary = `Status: ${previous_status} → ${new_status} ` - + `(Updated: ${timestamp ? new Date(timestamp).toLocaleString() : "Unknown"})`; + const statusSummary = `Status: ${status.value || "Unknown"} ` + + `(Reason: ${status.reason?.trim() || "N/A"}, ` + + `Updated: ${status.timestamp ? new Date(status.timestamp).toLocaleString() : "Unknown"})`; - const quarantineStatus = quarantined + const quarantineStatus = quarantine ? "This test is currently quarantined." : "This test is not quarantined."; + // most_common_failures is a beta feature currently being tested + // If you are not on the beta it will be an empty array + // Want to try it out? Ask in slack.trunk.io + const failureSummary = most_common_failures.map(failure => + `- ${failure.summary} (${failure.occurrence_count || 0} occurrences, ` + + `last seen: ${failure.last_occurrence ? new Date(failure.last_occurrence).toLocaleString() : "Unknown"})` + ).join("\n"); + const repoLink = `Repository: ${repository.html_url || "N/A"}`; const testLink = `Test Details: ${html_url}`; + const ticketLink = `Related Ticket: ${ticket.html_url || "N/A"}`; const ownerSummary = `Codeowners: \`${codeowners.join(", ") || "None"}\``; - const classnameSummary = classname ? `Classname: \`${classname}\`` : null; - const filePathSummary = file_path ? `File Path: \`${file_path}\`` : null; + const statsSummary = `Failure rate (last 7 days): ${(failure_rate_last_7d * 100).toFixed(1)}% ` + + `| PRs Impacted: ${pull_requests_impacted_last_7d}`; return { blocks: [ @@ -107,14 +112,16 @@ function summarizeTestCase(payload) { text: { type: "mrkdwn", text: [ - filePathSummary, - classnameSummary, + `File Path: \`${file_path}\``, statusSummary, quarantineStatus, + `Most Common Failures:\n${failureSummary}`, ownerSummary, + statsSummary, repoLink, - testLink - ].filter(Boolean).join("\n"), + testLink, + ticketLink + ].join("\n"), }, }, ], @@ -127,7 +134,7 @@ function summarizeTestCase(payload) { You can send test messages to your Slack channels as you make updates. You can do this by: 1. In the endpoint configuration view, navigate to the **Testing** tab and select a **Send event** -2. Under **Subscribed events,** select `v2.test_case.status_changed`as the event type to send. +2. Under **Subscribed events,** select `test_case.status_changed`as the event type to send. 3. Click **Send Example** to test your webhook ### 4. Monitoring webhooks @@ -136,21 +143,19 @@ You can monitor the events and the webhook's delivery logs in the **Overview** t You can see an overview of how many webhook deliveries have been attempted, how many are successful, how many are in flight, and how many fail in the **Attempt Delivery Status** modal. - -![](/assets/example-webhook-delivery-status.png) - +
You can see a list of past delivery attempts in the **Message Attempts** modal. You can filter this list by **Succeeded** and **Failed** status, and you can click on each message to see the **Message content**, response code, and error message of each attempt. You can learn more about [replaying messages](https://docs.svix.com/receiving/using-app-portal/replaying-messages) and [filtering logs](https://docs.svix.com/receiving/using-app-portal/filtering-logs) in the Svix docs. - -![](/assets/example-webhook-logs.png) - +
### Congratulations! +
+ You should now receive notifications in your Slack workspace when a test's status changes. You can further modify your transformation script to customize your messages. -[See the Trunk webhook event catalog](https://www.svix.com/event-types/us/org_2eQPL41Ew5XSHxiXZIamIUIXg8H/#v2.test_case.status_changed) +[See the Trunk webhook event catalog](https://www.svix.com/event-types/us/org_2eQPL41Ew5XSHxiXZIamIUIXg8H/#test_case.status_changed) [Learn more about consuming webhooks in the Svix docs](https://docs.svix.com/receiving/introduction) diff --git a/index.mdx b/index.mdx deleted file mode 100644 index 3c61138..0000000 --- a/index.mdx +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: "Trunk Platform" -description: "Ship Software as Fast as AI Writes It" ---- -AI generates code at machine speed, but code review, CI, and delivery still move at human pace. That gap is widening, and it gets worse every time you add another AI agent to the loop. - -Trunk enables continuous delivery: any commit on your main branch could be deployed to production. We do this by eliminating the two bottlenecks that prevent it. Flaky tests that waste developer time, and serialized merge queues that cap your throughput. - -Teams start with whichever problem hurts more, then expand. Caseware cut merge time from 6 hours to 90 minutes. Zillow eliminated all pipeline blockages from flaky tests. Faire prevented 20% of main branch failures from green-green conflicts. - - -[Schedule time here](https://calendly.com/trunk/demo) or email [support@trunk.io](mailto:support@trunk.io) - - -#### Why This Matters Now - -You check out `main` on Monday morning, grab your coffee, open a pull request, and CI fails for reasons that have nothing to do with your code. You're reading logs, pinging Slack, trying to figure out who broke what. That happens to every engineer, every day. Continuous delivery means every commit on main is known-good. Every CI failure is yours to fix, not something you inherited. - -That problem has existed for years. What makes it urgent now is volume. AI agents are generating 50+ PRs a day, and they hit the same merge queue serialization and flaky test noise that slows humans. Except agents can't context-switch to other work while they wait. Every bottleneck in your CI pipeline that used to cost you hours now costs you days. - -### Trunk Flaky Tests - -At tens of thousands of tests, even a 1% flake rate means false failures on nearly every run. Each flake costs 10 to 15 minutes: the developer waits, reads logs, reruns, confirms it was noise. If your CI target is five-minute PR jobs, every flake doubles or triples that. - -Trunk detects flakes through branch-aware analysis that treats main, PRs, and merge queues differently. We fingerprint failure modes using stack trace embeddings, which means quarantine decisions are based on the actual failure pattern, not just the test name. If a quarantined test fails with a known flaky pattern, CI passes. If it fails in a new way, CI fails normally. Business-critical tests can be pinned as never-quarantine. Developers see all of this in PR comments: what failed, why, and whether it's their code or a known issue. No code changes required. - -On the repair side, we're working with design partners on AI-powered fixing through MCP integration with the likes of Claude Code, Codex, or Cursor. Trunk provides the failure data and CI context, and the agent uses that to iterate on the actual fix. Think of it as a Roomba for flaky tests. Teams in the program are already running it to detect flakes, figure out root causes, and submit fixes without a human in the loop. - -[Full Flaky Tests documentation →](./flaky-tests/overview) - -### Trunk Merge Queue - -Traditional merge queues guarantee main stability by testing PRs one at a time. At 100+ PRs/day, that becomes a bottleneck. Monorepos make this easier to solve. If you have mobile, frontend, and backend code in the same repo, those PRs can test and merge independently because they don't touch the same targets. Linear merge queues don't know that. They put everything in one line. - -Trunk's merge queue runs in parallel mode. It knows which targets each PR affects, finds non-overlapping sets, and tests them at the same time. When queue depth grows, it batches multiple PRs into a single CI run and bisects automatically if the batch fails. Anti-flake protection keeps flaky failures from stalling the queue: if a later batch that includes the same code passes, both merge. - -Validated at 250+ PRs/hour sustained over 24 hours. Peaked at 300+ simultaneous PRs in parallel testing. - -[Full Merge Queue documentation →](./merge-queue/merge-queue) - -### How They Work Together - -Without flaky test handling, a merge queue backs up every time a test becomes unreliable. One recurring flake means batches fail, need re-isolation, and the queue goes serial again. With both products running, flakes get quarantined by failure mode so CI stays clean, parallel mode and batching keep the queue moving, and anti-flake protection in the queue catches what slips through. - -#### Trusted by - - - - -#### Works With Your Stack - -* **CI providers**: Works with any CI provider. Common setups include GitHub Actions, GitLab CI, Jenkins, BuildKite, CircleCI, and Azure DevOps. Integrates via CLI that uploads test results from existing pipelines. -* **Languages**: Works with any language. We analyze test output formats, not source code, so there's nothing language-specific to configure. -* **Test frameworks**: Works with any runner that produces JUnit XML, XCResult, or Bazel BEP. That covers Jest, Pytest, XCTest, Cypress, Playwright, RSpec, JUnit, GoogleTest, and most others. -* **Build systems**: Bazel, Nx, Gradle with native impacted-target calculation. API for custom build systems. -* **Integrations**: Full APIs for both products, webhooks with Svix transformations, CLI for local and CI use, Slack notifications, Jira and Linear ticket creation. [API documentation →](./setup-and-administration/apis/) - -#### Why Teams Choose Trunk - -**vs. GitHub native merge queue, Bors, Mergify.** Sequential by design. No parallel lane logic, no flake protection, no batching with bisection. - -**vs. Datadog, Buildkite Analytics.** They show you flake data but don't quarantine at runtime or integrate with your merge queue. Most stop running quarantined tests entirely, which hides the problem. Trunk keeps running them to collect evidence for root cause analysis. - -**vs. building in-house.** Merge queues at scale need parallel graph computation, bisection, and robust GitHub API orchestration. Flaky test detection at 50k+ tests needs real-time ETL, embeddings, and classification. If the engineers who built your internal system leave, you're maintaining deployment-path infrastructure without the knowledge to fix it. - -#### Getting Started - -Most teams schedule a 30-minute call before integrating. We help plan for security reviews, understand your CI architecture, and flag common gotchas. - -* [Schedule a call](https://calendly.com/trunk/demo) **← Recommended** - -Or explore on your own: [Create a Trunk account →](https://app.trunk.io/signup) - -* [Flaky Tests Integration Guide](/flaky-tests/get-started) -* [Merge Queue Setup Guide](/merge-queue/getting-started) - -We set up a direct Slack Connect channel with our engineers for your team. Feature requests, debugging, planning. Not a vendor you file tickets with. - -#### Security & Compliance - -SOC 2 Type II certified. TLS/HSTS in transit, AES-256 at rest. AWS-hosted in U.S. data centers with private VPCs. MFA, least privilege, access logging. Regular vulnerability scans, annual third-party pen tests. 45-day test result retention. We don't access your source code, secrets, environment variables, or customer data. [Request SOC 2 report](mailto:security@trunk.io). - - -**Want to see how it works? Have questions?** [**Schedule time here**](https://calendly.com/trunk/demo) **or email** [**support@trunk.io**](mailto:support@trunk.io) - - -#### Learn More - - - - A merge queue to make merging code in GitHub safer and easier - - - Detect, quarantine, and eliminates flaky tests from your codebase - - diff --git a/introduction.mdx b/introduction.mdx new file mode 100644 index 0000000..5705aef --- /dev/null +++ b/introduction.mdx @@ -0,0 +1,263 @@ +--- +title: "Home" +description: "Ship software as fast as AI writes it" +mode: "custom" +--- + +
+
+
+

+ Ship software as fast as AI writes it +

+

+ AI generates code at machine speed, but code review, CI, and delivery + still move at human pace. Trunk closes that gap with flaky test + management and a merge queue built for high-throughput teams. +

+ +
+
+
+ +
+

+ Quick Start +

+

+ Most teams schedule a 30-minute call before integrating. You can also + explore the platform on your own in four steps. +

+ + + Sign up in the [Trunk app](https://app.trunk.io/signup) and create an + organization for your team. + + + Follow [Account + Setup](/setup-and-administration/connecting-to-trunk) to create your + workspace and install the Trunk GitHub App with the right repository + access. + + + Start with the [Merge Queue getting + started guide](/merge-queue/getting-started) or the [Flaky Tests getting + started guide](/flaky-tests/get-started), depending on whether queue + depth or CI noise hurts more today. + + + Use your API token to query Trunk programmatically. + + ```bash + curl -X POST https://api.trunk.io/v1/getQueue \ + -H "Content-Type: application/json" \ + -H "x-api-token: $TRUNK_API_TOKEN" \ + -d '{ + "repo": { + "host": "github.com", + "owner": "my-org", + "name": "my-repo" + }, + "targetBranch": "main" + }' + ``` + + + +
+ +
+

+ Product Guides +

+

+ Trunk ships two products that work independently or together to keep + delivery fast and reliable. +

+ + + Detect, quarantine, and fix flaky tests with branch-aware analysis, + failure-type tracking, PR comments, and ticketing integrations. + + + Run a parallel merge queue with impacted targets, batching, bisection, + priority handling, and anti-flake protection. + + +
+ +
+

+ Platform Highlights +

+

+ The platform is built to work with the tools you already use while removing + the bottlenecks that slow delivery down at scale. +

+ + + Route independent pull requests through separate lanes instead of forcing + every change through one serialized line. + + + Keep flaky tests visible while preventing known failure modes from + blocking CI and backing up your queue. + + + Integrate Trunk with your own automation through REST APIs, webhooks, CLI + workflows, and the MCP server. + + +
+ +
+

+ Resources +

+

+ Dive deeper into setup, administration, integrations, and reference material + once your first workflow is live. +

+ + + Create your organization, manage access, review GitHub App permissions, + and understand billing, security, and support. + + + Explore Trunk REST APIs, webhook payloads, and auth patterns for custom + integrations. + + + Generate compatible test output across frameworks like Jest, Pytest, + XCTest, Cypress, Playwright, RSpec, JUnit, and more. + + + Connect Trunk's MCP server to Cursor, Claude Code, GitHub Copilot, or + Gemini for CI-assisted root cause analysis and automation. + + +
+ +
+

+ Support +

+

+ Get help with onboarding, evaluation, and ongoing rollout through the + channels your team already uses. +

+ + + Talk through your CI architecture, security review, and rollout plan with + the Trunk team. + + + Ask questions, share feedback, and get help from other engineers using + Trunk. + + + Review support availability, response expectations, and enterprise support + details. + + +
+ +
+
+
+
+

+ Ready to ship faster? +

+

+ Start with the guides, explore the APIs, or schedule time with the team + if you want help planning your first rollout. +

+
+ +
+
+
diff --git a/links/app.mdx b/links/app.mdx deleted file mode 100644 index 72f93d8..0000000 --- a/links/app.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Open app -url: https://app.trunk.io ---- diff --git a/links/changelog.mdx b/links/changelog.mdx deleted file mode 100644 index aa2a4af..0000000 --- a/links/changelog.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Changelog -url: https://trunk.io/changelog ---- \ No newline at end of file diff --git a/links/feature-requests.mdx b/links/feature-requests.mdx deleted file mode 100644 index b8fd868..0000000 --- a/links/feature-requests.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Feature requests -url: https://features.trunk.io ---- \ No newline at end of file diff --git a/links/flaky-tests-api.mdx b/links/flaky-tests-api.mdx deleted file mode 100644 index 1b0abdd..0000000 --- a/links/flaky-tests-api.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Flaky Tests -url: /flaky-tests/reference/api-reference ---- \ No newline at end of file diff --git a/links/mcp-reference.mdx b/links/mcp-reference.mdx deleted file mode 100644 index e776151..0000000 --- a/links/mcp-reference.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: MCP reference -url: /flaky-tests/reference/mcp-reference ---- \ No newline at end of file diff --git a/links/merge-queue-api.mdx b/links/merge-queue-api.mdx deleted file mode 100644 index d3783e5..0000000 --- a/links/merge-queue-api.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Merge Queue -url: /merge-queue/reference/merge ---- \ No newline at end of file diff --git a/links/slack.mdx b/links/slack.mdx deleted file mode 100644 index a4b4055..0000000 --- a/links/slack.mdx +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Slack community -url: https://slack.trunk.io ---- \ No newline at end of file diff --git a/merge-queue/administration/index.mdx b/merge-queue/administration.mdx similarity index 67% rename from merge-queue/administration/index.mdx rename to merge-queue/administration.mdx index 622d6a8..95432aa 100644 --- a/merge-queue/administration/index.mdx +++ b/merge-queue/administration.mdx @@ -6,23 +6,18 @@ These pages are for repository administrators and platform engineers who configu ### Configuration -[**Settings and configurations**](./advanced-settings)\ +[**Settings and configurations**](/merge-queue/administration/advanced-settings)\ Manage queue behavior, GitHub integration, CI/CD configuration, and user preferences. -### Infrastructure as Code - -[**Terraform provider**](./terraform)\ -Manage merge queue configuration as code using the `trunk-io/trunk` Terraform provider. - ### Integrations -[**Integration for Slack**](../integration-for-slack)\ +[**Slack integration**](/merge-queue/integration-for-slack)\ Send real-time queue notifications to Slack channels. -[**Webhooks**](../webhooks)\ +[**Webhooks**](/merge-queue/webhooks)\ Integrate with external tools via HTTP webhooks for custom automation. ### Analytics -[**Metrics and monitoring**](./metrics)\ +[**Metrics and monitoring**](/merge-queue/administration/metrics)\ Track queue performance, identify bottlenecks, and measure optimization impact. diff --git a/merge-queue/administration/advanced-settings.mdx b/merge-queue/administration/advanced-settings.mdx index ebafce9..899249b 100644 --- a/merge-queue/administration/advanced-settings.mdx +++ b/merge-queue/administration/advanced-settings.mdx @@ -11,18 +11,17 @@ All of the following settings are specific to individual Merge Queues and can be Note that you must be an Organization admin to adjust any of these settings.
-*** +--- ## Merge Queue state You can change the state of your Merge Queue to control whether new PRs can enter the queue and whether tested PRs will merge. PRs already testing will always complete their tests regardless of state. Below are the possible states: | State | Will PRs Enter the Queue? | Will PRs Merge After Testing? | Example use case | -|-------|--------------------------|-------------------------------|------------------| -| `Running` | Yes ✅ | Yes ✅ | **Everyday merging**: protect your mainline and merges successful PRs. | -| `Paused` | No ❌ | No ❌ | **CI failure recovery**: stop merges and testing in the queue until failure is resolved. | -| `Draining` | No ❌ | Yes ✅ | **Code freeze**: merge PRs currently in the queue but don't start testing additional PRs. | - +| --- | --- | --- | --- | +| `Running` | Yes | Yes | Everyday merging: protect your mainline and merges successful PRs. | +| `Paused` | No | No | CI failure recovery: stop merges and testing in the queue until failure is resolved. | +| `Draining` | No | Yes | Code freeze: merge PRs currently in the queue but don't start testing additional PRs. | **Note:** The Merge Queue may automatically enter a `Switching Modes` state, which functions exactly like `Draining`. This occurs when you switch the queue mode while PRs are still being tested. @@ -34,38 +33,13 @@ The `Running` state is the default state of your merge queue, and will be the no `Draining` is useful for managing events like code freezes. PRs currently in the queue will be tested and merged, but no new PRs will start testing. -*** - -## Multiple queues per repository - -You can create multiple merge queues within a single repository, with each queue targeting a different branch. This is useful for teams that maintain separate branches for different environments (e.g., `main`, `staging`, `release/v2`). - -A branch can only be associated with one queue. Attempting to create a second queue against the same branch returns the error `A merge queue already exists for branch "" in this repository`. - -Each queue operates independently. PRs submitted to one queue don't interact with PRs in another queue for the same repo, and every queue has its own settings, including merge method, required statuses, batching, and concurrency. - -### Creating additional queues - -1. Navigate to **Merge Queue** and click **New Queue** at the top right -2. Select the same repository and enter a different target branch -3. Click **Create Queue** - -### Navigating between queues - -The Merge Queue dashboard groups queues by repository: - -* **Single-queue repos**: The repository row is itself a link that goes directly to the queue. -* **Multi-queue repos**: The repository row expands inline to list each queue with its target branch label. Click any queue to open it. - -In the Settings page, when a repository has more than one queue, a **Merge Queues** selector appears so you can switch between queues. The currently selected branch is shown next to the **Merge Queue Settings** heading. - -*** +--- ## Merge Queue mode -> Merge Queues operate in one of two modes, **Single** (default) or [**Parallel**](../optimizations/parallel-queues/)**.** +> Merge Queues operate in one of two modes, **Single** (default) or [**Parallel**](/merge-queue/optimizations/parallel-queues)**.** -**Single Queue** processes all pull requests in one line, testing each PR predictively against all changes ahead of it. Multiple PRs can be tested and merged simultaneously based on your [Testing Concurrency](./advanced-settings#testing-concurrency) and [Batching](./advanced-settings#batching) settings. +**Single Queue** processes all pull requests in one line, testing each PR predictively against all changes ahead of it. Multiple PRs can be tested and merged simultaneously based on your [Testing Concurrency](#testing-concurrency) and [Batching](#batching) settings. **Parallel Queues** dynamically creates multiple independent testing lanes based on each PR's impacted targets (the parts of the codebase it changes). PRs affecting different parts of the code can be tested in separate lanes, reducing wait times for repositories with distinct, independently-testable components. @@ -74,17 +48,15 @@ In the Settings page, when a repository has more than one queue, a **Merge Queue * Requires configuring a workflow to calculate and upload impacted targets for each PR * The queue will wait for impacted targets before processing PRs -Read more about [Trunk's implementation of Parallel merge queues](../optimizations/parallel-queues/), supported build systems ([Bazel](../../flaky-tests/get-started/frameworks/bazel), [Nx](../optimizations/parallel-queues/nx), or [custom AP](../optimizations/parallel-queues/api)I), and [what impacted targets are](../optimizations/parallel-queues/#what-are-impacted-targets). +Read more about [Trunk's implementation of Parallel merge queues](/merge-queue/optimizations/parallel-queues), supported build systems ([Bazel](/flaky-tests/get-started/frameworks/bazel), [Nx](/merge-queue/optimizations/parallel-queues/nx), or [custom AP](/merge-queue/optimizations/parallel-queues/api)I), and [what impacted targets are](/merge-queue/optimizations/parallel-queues#what-are-impacted-targets). -*** +--- ## Merge Method Choose how your PRs get merged into the target branch. Options are Squash (default), Merge Commit, or Rebase. - -![](/assets/SCR-20260202-obcl.png) - +
### Available Methods @@ -131,19 +103,7 @@ You can change your merge method at any time: The merge method is configured per repository, so different repositories in your organization can use different methods based on their needs. -### Custom merge commit titles - -You can override the merge commit title on a per-PR basis by adding a `merge-commit-title:` directive on its own line anywhere in the PR body: - -``` -merge-commit-title: feat(auth): add OAuth2 login flow [PROJ-123] -``` - -When present, Trunk uses this title for the merge commit instead of the default GitHub-generated title. The commit body follows the usual behavior for the configured merge method. When the directive is not present, the default behavior is preserved. - -See [Submit and cancel pull requests](../using-the-queue/reference#custom-merge-commit-titles) for more details and examples. - -*** +--- ## Testing concurrency @@ -162,7 +122,7 @@ For example, assuming a concurrency of 3: * At 12:10, Charlie submits PR 777 to the Merge Queue, and it starts testing. * At 12:15, Alice submits PR 1001 to the Merge Queue. Tests do not start because the Merge Queue is at its concurrency limit. -*** +--- ## Timeout for tests to complete @@ -176,32 +136,21 @@ For example, assuming a timeout of 4 hours: * At 3:05, PR 456 starts testing using Bob's CI system. * At 7:05, Trunk cancels PR 456 since PR 456 is still testing. -*** +--- ## Required Status Checks > Configure which CI status checks must pass before a PR can merge through the queue. -There are three ways to tell Merge Queue which status checks to wait on while testing a PR: - -1. **GitHub branch protection rules** (default) — Trunk infers required statuses from the protected branch's required status checks. -2. **Trunk UI override** — Configure required statuses directly in the Trunk UI. -3. **`.trunk/trunk.yaml` override** — Declare required statuses in `merge.required_statuses`. - -All three work regardless of which testing mode you chose (Draft PR or Push-Triggered). +By default, Trunk infers required status checks from your GitHub branch protection rules. You can override this by configuring required statuses directly in the Trunk UI, giving you independent control over which checks gate the merge queue. - -**These checks are what Merge Queue waits on while a PR is already in the queue and testing. They do not control which PRs are admitted into the queue.** - - -**When to override the default:** +**When to configure in Trunk:** * **Different checks for the queue** - Your branch protection requires checks that shouldn't gate the merge queue (e.g., code coverage reports, deployment previews) * **Stricter queue requirements** - You want the merge queue to require additional checks beyond what branch protection enforces * **Multiple queues** - Each queue can have its own set of required statuses -* **No GitHub branch protection** - You don't use branch protection rules and need to tell Merge Queue what to wait on -### Configure in the Trunk UI +### How to configure 1. Navigate to **Settings** > **Repositories** > your repository > **Merge Queue** 2. Find the **Required Status Checks** section @@ -212,29 +161,15 @@ All three work regardless of which testing mode you chose (Draft PR or Push-Trig When required statuses are configured in Trunk, only those statuses are required for the merge queue. When not configured, Trunk falls back to your GitHub branch protection required checks.
-### Configure in `.trunk/trunk.yaml` - -Alternatively, declare required statuses in your `.trunk/trunk.yaml` file at the root of your repository: - -```yaml -version: 0.1 -merge: - required_statuses: - - Unit Tests - - Integration Tests -``` - -The status check names must exactly match the CI job names that report status to GitHub. - -*** +--- ## Optimistic Merge Queue > Toggle this feature **Enabled** or **Disabled**. Default is **Disabled**. -[**Optimistic Merging**](../optimizations/optimistic-merging) allows multiple PRs to merge together at once when testing completes out of order. When [Testing Concurrency](./advanced-settings#testing-concurrency) allows multiple PRs to test simultaneously, a PR later in the queue may finish before PRs ahead of it. Since that PR's tests include all the changes ahead of it, the system can safely merge all verified PRs together instead of waiting for each one individually, reducing merge time. +[**Optimistic Merging**](/merge-queue/optimizations/optimistic-merging) allows multiple PRs to merge together at once when testing completes out of order. When [Testing Concurrency](#testing-concurrency) allows multiple PRs to test simultaneously, a PR later in the queue may finish before PRs ahead of it. Since that PR's tests include all the changes ahead of it, the system can safely merge all verified PRs together instead of waiting for each one individually, reducing merge time. -*** +--- ## Direct Merge to Main @@ -245,27 +180,27 @@ Merge PRs immediately when they're already based on the tip of main and the queu * **Benefit:** Eliminates 5-30 minutes of wait time for up-to-date PRs * **Best for:** Teams that keep PRs current with main before merging -Toggle this setting in **Settings** > **Repositories** > your repository > **Merge Queue**. Learn more in [Direct Merge to Main](../optimizations/direct-merge-to-main). +Toggle this setting in **Settings** > **Repositories** > your repository > **Merge Queue**. Learn more in [Direct Merge to Main](/merge-queue/optimizations/direct-merge-to-main). -*** +--- ## Pending Failure Depth > Pending Failure Depth can be set to any value, options are **0** (default), **1**, **2**, **3**, and **Custom**. -[**Pending Failure Depth**](../optimizations/pending-failure-depth) controls how many levels of successor test runs the system waits on before transitioning a failed group out of the Pending Failure state. When combined with [optimistic merging](../optimizations/optimistic-merging), this allows a passing successor to retroactively clear a failure caused by a transient issue (flake). +[**Pending Failure Depth**](/merge-queue/optimizations/pending-failure-depth) allows a failed PR to remain in the queue temporarily while a configurable number of PRs behind it complete testing. Since predictive testing means the failed PR's code is retested as part of later PRs, this gives flaky tests multiple chances to pass before the PR is evicted from the queue. -When set to **0** (default), the successor check is skipped and groups transition as soon as predecessor groups finish testing. When set to a value greater than 0, the system additionally waits for that many successor levels to finish testing before transitioning. +When set to **0** (default), failed PRs are immediately evicted from the queue. Any PRs behind the failed PR that were already testing will be restarted, since they were testing against a predicted future state of the branch that is no longer accurate. -*** +--- ## Draft pull request creation > Toggle this feature **Enabled** or **Disabled**. Default is **Enabled**. -[**Draft PR Creation**](../getting-started/configure-branch-protection#draft-pr-mode-recommended---default) determines whether Trunk Merge Queue creates draft PRs or push-triggered branches when testing changes. When enabled (default), the queue creates draft PRs to trigger your existing PR-based CI checks. When disabled, the queue creates `trunk-merge/` branches instead, requiring you to configure push-triggered workflows to run your required status checks. +[**Draft PR Creation**](/merge-queue/getting-started/configure-branch-protection#draft-pr-mode-recommended---default) determines whether Trunk Merge Queue creates draft PRs or push-triggered branches when testing changes. When enabled (default), the queue creates draft PRs to trigger your existing PR-based CI checks. When disabled, the queue creates `trunk-merge/` branches instead, requiring you to configure push-triggered workflows to run your required status checks. -*** +--- ## GitHub comments @@ -278,28 +213,9 @@ When enabled, Trunk posts comments on pull requests with merge queue status upda * **Testing and evaluation** - Validate the merge queue works with your CI setup without notifying your development team. Once configured and ready, re-enable comments to roll out to developers. * **Custom tooling** - You're building your own bot or integration that will provide merge queue instructions to developers, making Trunk's default comments redundant. - -![](/assets/merge-github-comment_(1).png) - - -*** +
-## GitHub Statuses - -> Toggle this feature **Enabled** or **Disabled**. Default is **Enabled**. - -When enabled, Trunk posts a GitHub check on PRs that are in the merge queue. The check appears in the PR's Checks section with the name `Trunk Merge Queue ()` (for example, `Trunk Merge Queue (main)` for a queue on `main`) and updates as the PR moves through the queue, from queued to testing to a final outcome. - -Each check includes a **Details** link that goes directly to the PR's page in the Trunk dashboard. This gives developers visibility into their PR's queue position without leaving GitHub. - -**When to enable:** - -* **Team adoption** - Makes the merge queue visible in developers' existing GitHub workflow -* **Status-based automation** - Other tools or workflows can react to the queue check - -See [GitHub status check](../using-the-queue/monitor-queue-status#github-status-check) for details on each status value. - -*** +--- ## GitHub commands @@ -312,11 +228,11 @@ Whether or not GitHub slash commands like `/trunk merge` are enabled for this me * **API-only workflows** - You want all queue submissions to go through the public API (e.g., via a bot or custom automation) rather than individual developer commands. * **Holding pattern** - You're temporarily restricting queue submissions while investigating issues, performing maintenance, or coordinating with your team. (Note: Consider using the Paused or Draining queue state if you want to stop all new PRs from entering the queue.) -*** +--- ## Connect with Slack -[Connect Trunk Merge Queue to Slack](../integration-for-slack) to receive real-time notifications about queue activity. After [installing the Trunk Slack app](../integration-for-slack#installing-the-trunk-slack-app) for your organization, you can route notifications to **multiple Slack channels** per repository, each with its own set of enabled topics. Individual users can also receive **personal DMs** about their PRs. +[Connect Trunk Merge Queue to Slack](/merge-queue/integration-for-slack) to receive real-time notifications about queue activity in a designated channel. After connecting, you can choose which events trigger notifications. **Available notifications include:** @@ -326,13 +242,13 @@ Whether or not GitHub slash commands like `/trunk merge` are enabled for this me * Queue configuration changes (pausing, mode changes, concurrency adjustments) * Pull request cancellations -*** +--- ## Batching > Toggle this feature **Enabled** or **Disabled**. Default is **Disabled**. -[**Batching**](../optimizations/batching) tests multiple pull requests as a single unit instead of individually, dramatically reducing CI costs. +[**Batching**](/merge-queue/optimizations/batching) tests multiple pull requests as a single unit instead of individually, dramatically reducing CI costs. ### Bisection Testing Concurrency @@ -345,21 +261,21 @@ Configure how many PRs can be tested simultaneously during batch failure isolati #### How to Configure 1. Navigate to **Settings** > **Repositories** > your repository > **Merge Queue** > **Batching** -2. Make sure **Batching** is enabled +2. Ensure **Batching** is enabled 3. Set **Bisection Testing Concurrency** to your desired value 4. Monitor CI resource usage and adjust as needed -For detailed guidance on using this setting effectively, see [Bisection Testing Concurrency in the Batching](../optimizations/batching#bisection-testing-concurrency) documentation. +For detailed guidance on using this setting effectively, see [Bisection Testing Concurrency in the Batching](/merge-queue/optimizations/batching#bisection-testing-concurrency) documentation. -*** +--- ## Delete Merge Integration - + CAUTION: Any queued merge requests will not be merged and all data will be lost. -**Before deleting:** Make sure all important PRs in the queue are either merged manually or that you're prepared to resubmit them to a new queue. - +**Before deleting:** Ensure all important PRs in the queue are either merged manually or that you're prepared to resubmit them to a new queue. + This setting will delete the Merge Queue configuration and any queued merge requests will not be merged and all data will be lost. diff --git a/merge-queue/administration/metrics.mdx b/merge-queue/administration/metrics.mdx index b197319..f6aa1da 100644 --- a/merge-queue/administration/metrics.mdx +++ b/merge-queue/administration/metrics.mdx @@ -1,13 +1,12 @@ --- title: "Metrics and monitoring" +description: "The Metrics and Monitoring dashboard provides deep analytics on your merge queue's performance, helping you identify bottlenecks, measure improvements, and optimize your workflow." --- The Metrics and Monitoring dashboard provides deep analytics on your merge queue's performance, helping you identify bottlenecks, measure improvements, and optimize your workflow. Your merge experience directly impacts the velocity and productivity of your development team. Merge Queue Metrics provides observability for the **health** of your Trunk Merge Queue, so you can discover issues early and make informed optimizations. - -![Health tab of the Trunk web app showing a Conclusion Counts stacked bar chart and a Time in Queue p50 line chart over a 30-day range](/assets/merge-health.png) - +

The Health tab showing metrics in the Trunk Web App.

### Access metrics @@ -21,9 +20,7 @@ CI Time and CI Jobs Triggered charts are only available for **GitHub Actions**. When running in Parallel Mode, you can filter your merge queue health metrics by impacted targets to analyze performance for specific parts of your codebase. - -![Health dashboard filtered by PR Conclusion: Failed and Impacted Targets: //trunk/all-ts:node_modules/@trunkio, showing a single failure bar and a p50 Time in Queue line](/assets/1768426992-impacted-target-filtering.avif) - +
#### Why Filter by Impacted Targets? @@ -33,7 +30,7 @@ In repositories with multiple teams or distinct components (like a TypeScript/Py * **Identify bottlenecks by component** - Determine if certain targets have slower merge times * **Optimize strategically** - Focus queue configuration improvements on your highest-priority code paths * **Demonstrate value** - Show engineering leadership how parallel mode benefits specific teams or projects -* **Check fairness** - Verify that all teams experience similar queue performance +* **Ensure fairness** - Verify that all teams experience similar queue performance #### How to Use the Filter @@ -48,7 +45,7 @@ In repositories with multiple teams or distinct components (like a TypeScript/Py **Impacted targets are set when a PR enters the queue** -Each PR's impacted targets are calculated based on which files changed and which parts of your codebase are affected. For details on how impacted targets are computed, see [Parallel Queues - Impacted Targets](../optimizations/parallel-queues/#posting-impacted-targets-from-your-pull-requests). +Each PR's impacted targets are calculated based on which files changed and which parts of your codebase are affected. For details on how impacted targets are computed, see [Parallel Queues - Impacted Targets](/merge-queue/optimizations/parallel-queues#posting-impacted-targets-from-your-pull-requests). **PRs can affect multiple targets** @@ -79,7 +76,7 @@ The date ranges selector at the top left of the dashboard allows you to filter t The metrics displayed only include data that have **completed within the time range**, jobs started but not completed during the selected time **will not be displayed**. -When working across multiple time zones, enable **Time in UTC** so everyone sees the same data. +When working across multiple time zones, enable **Time in UTC** to ensure everyone sees the same data. ### Conclusion count @@ -90,25 +87,7 @@ Conclusion counts are an important signal to potential bottlenecks or underlying Conclusions are tagged with a reason to give further insights into how merges pass or fail in the queue. You can show or hide conclusions of a particular reason by using the **+ Add** button. -| Category | Reason | Description | -| --- | --- | --- | -| ✅ Pass | Merged by Trunk | Passed all tests in Merge Queue and merged by Trunk | -| ✅ Pass | Merged manually | User manually merged the PR in Git | -| ❌ Failure | Test run timeout | User-defined timeout for tests exceeded | -| ❌ Failure | Failed Tests | Required test failed while testing the PR in the merge queue | -| ❌ Failure | Merge conflict | A (git) merge conflict encountered | -| ❌ Failure | Config parsing failure | Malformed `trunk.yaml` that couldn't be parsed | -| ❌ Failure | Config bad version | Invalid version field in `trunk.yaml` | -| ❌ Failure | Config bad required statuses | Failed to parse required statuses in `trunk.yaml` | -| ❌ Failure | No required statuses | No source for required tests was found in `trunk.yaml` or branch protection settings | -| ❌ Failure | GitHub API Failed | GitHub returned an error to us that could not be resolved while processing the PR | -| ❌ Failure | PR updated at merge time | PR updated as Trunk was attempting to merge it | -| 🚫 Cancel | Canceled by user | PR explicitly canceled by user | -| 🚫 Cancel | PR closed | PR closed (not merged) | -| 🚫 Cancel | PR pushed to | New commits pushed to the PR branch while in the merge queue | -| 🚫 Cancel | PR draft | PR was converted to a draft, which cannot be merged | -| 🚫 Cancel | PR base branch changed | Base branch of PR in the merge queue changed | -| 🚫 Cancel | Admin requested | Trunk employee canceled PR during a support session (extreme cases) | +
CategoryReasonDescription
✅ PassMerged by TrunkPassed all tests in Merge Queue and merged by Trunk
✅ PassMerged manuallyUser manually merged the PR in Git
❌ FailureTest run timeoutUser-defined timeout for tests exceeded
❌ FailureFailed TestsRequired test failed while testing the PR in the merge queue
❌ FailureMerge conflictA (git) merge conflict encountered
❌ FailureConfig parsing failureMalformed trunk.yaml that couldn't be parsed
❌ FailureConfig bad versionInvalid version field in trunk.yaml
❌ FailureConfig bad required statusesFailed to parse required statuses in trunk.yaml
❌ FailureNo required statusesNo source for required tests was found in trunk.yaml or branch protection settings
❌ FailureGitHub API FailedGitHub returned an error to us that could not be resolved while processing the PR
❌ FailurePR updated at merge timePR updated as Trunk was attempting to merge it
🚫 CancelCanceled by userPR explicitly canceled by user
🚫 CancelPR closedPR closed (not merged)
🚫 CancelPR pushed toNew commits pushed to the PR branch while in the merge queue
🚫 CancelPR draftPR was converted to a draft, which cannot be merged
🚫 CancelPR base branch changedBase branch of PR in the merge queue changed
🚫 CancelAdmin requestedTrunk employee canceled PR during a support session (extreme cases)
🚫 CancelA PR in the stack had its base branch changedA member of the PR stack had its base branch changed while in the queue (stacked PRs only)
🚫 CancelA PR in a PR stack was closedA member of the PR stack was closed while in the queue (stacked PRs only)
🚫 CancelPR was merged as part of a different stackThe PR was already merged through a different stack (stacked PRs only)
🚫 CancelPart of this PR's stack was pushed toNew commits were pushed to a PR in the stack while in the queue (stacked PRs only)
### Time in queue @@ -128,55 +107,7 @@ The time in queue can be displayed as different statistical measures. You can sh | P95 | The value below 95% of the time in queue falls. | | P99 | The value below 99% of the time in queue falls. | -### Drill down into metrics - -From the **Conclusion count** and **Time in queue** charts, you can drill into any point or window on the graph to see the exact pull requests that made up those numbers. - -#### Why Drill Down? - -Aggregated charts tell you _that_ something happened — drilling down tells you _which PRs_ caused it. This makes it easy to: - -* **Track down outliers** — if the P99 on Time in queue spikes, drill into that bucket to find the specific PR that dragged the tail out. -* **Investigate failure spikes** — click a bar on Conclusion count where failures jumped and see exactly which PRs failed and why. -* **Audit a time window** — pull the full list of PRs merged, failed, or canceled during an incident window or release cut. -* **Answer one-off questions** — "which PRs merged between 2pm and 4pm yesterday?" without writing a query against the Prometheus endpoint. - -#### Select Data Points - -You have two ways to select: - -* **Click a single data point** to see the PRs in that time bucket. -* **Click and drag across the chart** to select a range of data points spanning multiple time buckets. The selected range stays highlighted and the rest of the chart dims, giving you a focused view of just that window. The same range syncs across both charts so you can correlate Conclusion count and Time in queue data for the period you picked. - -Once a selection is made, a **View PRs** button appears. Click it to open the list of PRs that make up the selection. - - -![PR Outcomes and Time in Queue charts with a selected Apr 20–21 range broken out into 998 merged, 30 cancelled, and 30 failed, and a selection bar showing the View PRs button](/assets/drill-down-overview.png) - - -To pick a different window, drag a new selection. To clear the selection, change the time range, time bucket, or **Time in UTC** setting at the top of the dashboard. - -#### Review the PR List - -The PR list page shows every PR included in your selection, along with: - -* **Conclusion** — whether the PR merged, failed, or was cancelled. -* **Reason** — the specific cause behind the conclusion (for example, Merged by Trunk, Required status failed, PR closed). See the [Conclusion count](#conclusion-count) table for the full list. -* **Time in queue** — how long the PR spent in the merge queue from entry to exit. - -Both columns are sortable, so you can quickly surface the longest-running PRs in a window or group all failures of the same type together. - - -![PRs in Range table listing individual PRs with Conclusion (Merged or Failed), Reason, and Time in Queue columns, sorted by Time in Queue descending](/assets/pr-drill-down-list.png) - - -The PR list page shows the selected date range as a subtitle and a **Back to Health** link to return to the charts. If the selection contains more than 2,500 PRs, the list shows the first 2,500 with a notice indicating the total. Narrow the time bucket on the chart to drill into a smaller window. - - -Drill down and range selection are currently available on the Conclusion count and Time in queue charts. Additional Health charts will support the same interactions as they land in the UI. - - -*** +--- ### Prometheus metrics endpoint @@ -192,14 +123,14 @@ The Prometheus metrics endpoint is available to all Merge Queue users. GET https://api.trunk.io/v1/getMergeQueueMetrics ``` -Authenticate with your [Trunk API token](../../setup-and-administration/apis/#authentication) using the `x-api-token` header. +Authenticate with your [Trunk API token](/setup-and-administration/apis#authentication) using the `x-api-token` header. **Query parameters:** -| Parameter | Required | Description | -| --- | --- | --- | -| `repo` | No | Repository in `owner/name` format (e.g., `my-org/my-repo`). If omitted, returns metrics for all repositories in the organization. Must be provided together with `repoHost`. | -| `repoHost` | Conditional | Repository host (e.g., `github.com`). Required if `repo` is specified. | +| Parameter | Required | Description | +| ---------- | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `repo` | No | Repository in `owner/name` format (e.g., `my-org/my-repo`). If omitted, returns metrics for all repositories in the organization. Must be provided together with `repoHost`. | +| `repoHost` | Conditional | Repository host (e.g., `github.com`). Required if `repo` is specified. | The response uses content type `text/plain; version=0.0.4; charset=utf-8` (standard Prometheus format). @@ -207,37 +138,33 @@ The response uses content type `text/plain; version=0.0.4; charset=utf-8` (stand All metrics include these labels: -| Label | Description | Example values | -| --- | --- | --- | -| `repo` | Repository name | `my-org/my-repo` | -| `branch` | Base branch name | `main`, `develop` | -| `queue_type` | Queue type | `main` or `bisection` | +| Label | Description | Example values | +| ------------ | ---------------- | --------------------- | +| `repo` | Repository name | `my-org/my-repo` | +| `branch` | Base branch name | `main`, `develop` | +| `queue_type` | Queue type | `main` or `bisection` | -##### Point-in-time gauges +**Point-in-time gauges** These metrics reflect the current state of your merge queue. -| Metric | Type | Description | -| --- | --- | --- | -| `mq_depth_current` | Gauge | Number of PRs currently in the queue (excludes PRs that are waiting to be mergeable before being admitted to the queue) | -| `mq_awaiting_mergeability` | Gauge | Number of PRs waiting for prerequisites like required reviews or status checks | -| `mq_testing_slots_active` | Gauge | Number of PRs currently in TESTING state (active CI slots in use) | +| Metric | Type | Description | +| -------------------------- | ----- | ----------------------------------------------------------------------------------------------------------------------- | +| `mq_depth_current` | Gauge | Number of PRs currently in the queue (excludes PRs that are waiting to be mergeable before being admitted to the queue) | +| `mq_awaiting_mergeability` | Gauge | Number of PRs waiting for prerequisites like required reviews or status checks | +| `mq_testing_slots_active` | Gauge | Number of PRs currently in TESTING state (active CI slots in use) | -##### Rolling 1-hour window metrics +**Rolling 1-hour window metrics** These metrics summarize activity over a sliding 1-hour window. They update continuously as the window advances. -| Metric | Type | Extra labels | Description | -| --- | --- | --- | --- | -| `mq_pr_conclusions_1h_total` | Gauge | `conclusion` (merged, failed, cancelled) | PRs that exited the queue in the last hour | -| `mq_pr_restarts_1h_total` | Gauge | — | PR restarts (TESTING to PENDING transitions) in the last hour | -| `mq_pr_wait_duration_1h_seconds` | Histogram | `le` (bucket boundary) | Distribution of time PRs spent waiting before testing starts | -| `mq_pr_test_duration_1h_seconds` | Histogram | `le` (bucket boundary) | Distribution of time PRs spent in the testing phase | -| `mq_pr_time_in_queue_1h_seconds` | Histogram | `le` (bucket boundary) | Distribution of total time PRs spent in the queue, from entry to exit (includes waiting, testing, and any other phases, such as [pending failure](../optimizations/pending-failure-depth)). | - -Each histogram emits `_bucket{le="..."}`, `_sum`, and `_count` series. Bucket boundaries (in seconds): 60, 300, 600, 900, 1800, 3600, 5400, 7200, +Inf. - -For clarity, PRs in the "Waiting to Enter Queue" state (submitted to the queue but still waiting on prerequisites such as GitHub mergeability before they can be admitted to the queue) are not considered to be "in the queue" yet. So any time spent in this state is not counted in the Wait Duration or Time in Queue metrics. +| Metric | Type | Extra labels | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | ---------------------------------------- | ------------------------------------------------------------- | +| `mq_pr_conclusions_1h_total` | Gauge | `conclusion` (merged, failed, cancelled) | PRs that exited the queue in the last hour | +| `mq_pr_restarts_1h_total` | Gauge | — | PR restarts (TESTING to PENDING transitions) in the last hour | +| `mq_pr_wait_duration_1h_seconds` | Histogram | `le` (bucket boundary) | Distribution of time PRs spent waiting before testing starts | +| `mq_pr_test_duration_1h_seconds` | Histogram | `le` (bucket boundary) | Distribution of time PRs spent in the testing phase | +| Each histogram emits `_bucket{le="..."}`, `_sum`, and `_count` series. Bucket boundaries (in seconds): 60, 300, 600, 900, 1800, 3600, 5400, 7200, +Inf. | | | | Rolling window metrics use **gauge semantics**, not true Prometheus counters. They represent a snapshot of the last hour, not cumulative totals. PromQL functions like `rate()` and `increase()` are **not meaningful** on these metrics. Use the values directly instead. diff --git a/merge-queue/administration/terraform.mdx b/merge-queue/administration/terraform.mdx deleted file mode 100644 index 71fcf9a..0000000 --- a/merge-queue/administration/terraform.mdx +++ /dev/null @@ -1,212 +0,0 @@ ---- -title: "Terraform Provider" -description: "Manage Trunk Merge Queue configuration as code using the trunk-io/trunk Terraform provider." ---- -The [trunk-io/trunk](https://registry.terraform.io/providers/trunk-io/trunk/latest) Terraform provider lets you manage merge queue configuration as infrastructure as code. Define your queue settings in Terraform, track changes in version control, and apply them consistently across repositories. - -The provider currently supports the `trunk_merge_queue` resource for creating, updating, importing, and deleting merge queues. - -**Current version:** `0.1.2` - -## Prerequisites - -* [Terraform](https://developer.hashicorp.com/terraform/install) >= 1.0 -* An org-level API token from your Trunk organization. See [Organization slug and token](../../setup-and-administration/managing-your-organization#organization-slug-and-token) for how to generate one. -* A repository connected to Trunk - -## Authentication - -Set your org-level API token using the `TRUNK_API_KEY` environment variable: - -```bash -export TRUNK_API_KEY="your-org-api-token" -``` - -Alternatively, you can pass it directly in the provider block: - -```hcl -provider "trunk" { - api_key = var.trunk_api_key -} -``` - - -Never commit your API key to version control. Use environment variables or a secrets manager to supply the `TRUNK_API_KEY` value. - - -*** - -## Quick Start - -```hcl -terraform { - required_version = ">= 1.0" - - required_providers { - trunk = { - source = "trunk-io/trunk" - version = "0.1.2" - } - } -} - -provider "trunk" {} - -resource "trunk_merge_queue" "main" { - repo = { - host = "github.com" - owner = "my-org" - name = "my-repo" - } - target_branch = "main" - concurrency = 5 -} -``` - -Run `terraform plan` to preview changes and `terraform apply` to apply them. If a merge queue already exists for the specified repository and branch, the provider will import it automatically rather than creating a duplicate. - -*** - -## Importing Existing Queues - -Merge queues created through the UI or API can be imported into Terraform. This lets you start managing an existing queue as code without recreating it. - -```bash -terraform import trunk_merge_queue.main github.com/my-org/my-repo/main -``` - -The import ID format is `{host}/{owner}/{name}/{target_branch}`. - -After importing, run `terraform plan` to compare the Terraform configuration against the current queue settings. Resolve any differences before running `terraform apply`. - -*** - -## Resource Reference: `trunk_merge_queue` - -### Required Attributes - -| Attribute | Type | Description | -| --- | --- | --- | -| `repo.host` | string | Repository host (e.g., `github.com`). Changing this forces a new resource. | -| `repo.owner` | string | Repository owner or organization. Changing this forces a new resource. | -| `repo.name` | string | Repository name. Changing this forces a new resource. | -| `target_branch` | string | Branch the merge queue targets (e.g., `main`). Changing this forces a new resource. | - - -The `repo` and `target_branch` attributes are immutable. Changing any of them will destroy the existing queue and create a new one. - - -### Optional Attributes With API Defaults - -These attributes are computed by the API if not specified. You only need to set them if you want to override the defaults. - -| Attribute | Type | Default | Description | -| --- | --- | --- | --- | -| `mode` | string | `"single"` | Queue mode: `"single"` or `"parallel"`. See [Merge Queue mode](./advanced-settings#merge-queue-mode). | -| `concurrency` | integer | API default | Number of PRs that can test simultaneously (minimum 1). See [Testing concurrency](./advanced-settings#testing-concurrency). | -| `state` | string | `"RUNNING"` | Queue state: `"RUNNING"`, `"PAUSED"`, or `"DRAINING"`. See [Merge Queue state](./advanced-settings#merge-queue-state). | - -### Other Optional Attributes - -| Attribute | Type | Description | -| --- | --- | --- | -| `testing_timeout_minutes` | integer | Maximum minutes to wait for tests before auto-cancellation. See [Timeout for tests to complete](./advanced-settings#timeout-for-tests-to-complete). | -| `pending_failure_depth` | integer | Number of successor test runs to wait on before transitioning a failed group. See [Pending failure depth](../optimizations/pending-failure-depth). | -| `can_optimistically_merge` | Boolean | Enable [optimistic merging](../optimizations/optimistic-merging). | -| `batch` | Boolean | Enable [batching](../optimizations/batching). | -| `batching_max_wait_time_minutes` | integer | Maximum minutes to wait for a batch to fill. | -| `batching_min_size` | integer | Minimum number of PRs in a batch before testing begins. | -| `merge_method` | string | How PRs are merged: `"MERGE_COMMIT"`, `"SQUASH"`, or `"REBASE"`. See [Merge Method](./advanced-settings#merge-method). | -| `comments_enabled` | Boolean | Whether Trunk posts status comments on PRs. See [GitHub comments](./advanced-settings#github-comments). | -| `commands_enabled` | Boolean | Whether `/trunk` slash commands are enabled. See [GitHub commands](./advanced-settings#github-commands). | -| `create_prs_for_testing_branches` | Boolean | Create draft PRs for testing branches. See [Draft pull request creation](./advanced-settings#draft-pull-request-creation). | -| `status_check_enabled` | Boolean | Whether Trunk posts a status check on PRs. | -| `direct_merge_mode` | string | `"OFF"` or `"ALWAYS"`. See [Direct merge to main](../optimizations/direct-merge-to-main). | -| `optimization_mode` | string | `"OFF"` or `"BISECTION_SKIP_REDUNDANT_TESTS"`. | -| `bisection_concurrency` | integer | Concurrency for bisection testing during batch failure isolation. See [Bisection Testing Concurrency](./advanced-settings#bisection-testing-concurrency). | -| `required_statuses` | list(string) | CI status checks that must pass. Set to `null` to use branch protection defaults. Set to `[]` to explicitly require no statuses. See [Required Status Checks](./advanced-settings#required-status-checks). | - -*** - -## Managing Drift - -When a merge queue is managed by Terraform, the Trunk UI displays a banner indicating that the queue is under Terraform management. - - -![](/assets/terraform-in-sync.png) - - -Users can still adjust merge queue settings through the UI. However, any changes made in the UI will cause **drift** between the live configuration and your Terraform state. The UI highlights when drift exists so your team is aware of the discrepancy. - - -![](/assets/terraform-drift-mergequeue.png) - - -To detect drift, run: - -```bash -terraform plan -``` - -This shows any differences between your Terraform configuration and the current queue state. Run `terraform apply` to reconcile the configuration back to what is defined in Terraform, or update your `.tf` files to match the desired state. - - -If your team adjusts settings through the UI, run `terraform plan` periodically to detect drift. Apply to reconcile, or update your Terraform configuration to match the desired state. - - -*** - -## Deleting a Queue - -A merge queue must be empty before it can be deleted. If the queue still has PRs in it, `terraform destroy` will fail. - -To empty a queue, you can set `state = "DRAINING"` and wait for all in-flight PRs to finish testing and merge. Once the queue is empty, run `terraform destroy` or remove the resource from your configuration and apply. - - -Terraform will fail to delete a queue that still has PRs in it. Ensure the queue is empty before destroying the resource. - - -*** - -## Examples - -### High-Throughput Queue With Batching - -```hcl -resource "trunk_merge_queue" "main" { - repo = { - host = "github.com" - owner = "my-org" - name = "my-repo" - } - target_branch = "main" - mode = "parallel" - concurrency = 20 - batch = true - batching_min_size = 4 - batching_max_wait_time_minutes = 5 - can_optimistically_merge = true -} -``` - -### Queue With Explicit Required Statuses - -```hcl -resource "trunk_merge_queue" "main" { - repo = { - host = "github.com" - owner = "my-org" - name = "my-repo" - } - target_branch = "main" - concurrency = 3 - merge_method = "SQUASH" - commands_enabled = true - comments_enabled = true - required_statuses = [ - "ci/build", - "ci/test", - "ci/lint", - ] -} -``` diff --git a/merge-queue/chrome-extension.mdx b/merge-queue/chrome-extension.mdx deleted file mode 100644 index f31da5f..0000000 --- a/merge-queue/chrome-extension.mdx +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: "Chrome Extension" -description: "Submit, cancel, and track Trunk Merge Queue pull requests directly from GitHub with the Trunk Chrome Extension." ---- -The Trunk Chrome Extension overlays merge queue controls and status onto your normal GitHub experience, so you can submit a PR to the queue, cancel it, and watch its testing progress without leaving the pull request page.\ -
- - -![](/assets/image_(51).png) - - - -The extension is a companion to Trunk Merge Queue — you still need a configured queue for your repository. The extension only surfaces controls and status for queues your Trunk organization already owns. - - -**Install the extension** - -1. Open the [Trunk for GitHub](https://chromewebstore.google.com/detail/liggeliamkammmieidmmfmmdnjilabgn?utm_source=item-share-cb) listing in the Chrome Web Store. -2. Click **Add to Chrome** and approve the requested permissions. -3. Pin the Trunk icon to your toolbar so the popup is one click away. -4. Click the Trunk icon and sign in. The extension uses your existing browser session at [app.trunk.io](https://app.trunk.io/) - if you're already logged in, no additional sign-in is needed. - -**Signed-out indicator** - -When you are not signed in to Trunk, the extension toolbar icon shows a lock badge. On any pull request page in a queue-enabled repository, a **Sign in to Trunk** row appears in place of the normal merge queue panel, reminding you to sign in before queue controls become available. - -### Submit a pull request to the queue - -On any pull request in a queue-enabled repository, the extension adds a **Merge Queue** panel replacing GitHub's native merge controls. - -1. Open the pull request on GitHub. -2. In the Trunk panel, click **Add to Merge Queue**. -3. Optionally choose a [priority](./optimizations/priority-merging) before submitting. -4. If batching is enabled for the repository, you can toggle **Skip batching** to enqueue this PR without grouping it into a batch — useful for hotfixes or PRs that need to merge without waiting for a batch window. - -Submission goes through the same backend as the `/trunk merge` comment and the Trunk web app, so behavior is identical. See [Submit and cancel pull requests](./using-the-queue/reference) for the full lifecycle. - -### Remove a pull request from the queue - -If a PR is already in the queue, the panel shows a **Cancel** action. - -1. Click **Cancel** in the Trunk panel on the PR page. -2. The PR is removed from the queue immediately, the same as running `/trunk cancel`. - -### Track testing progress - -Once a PR is in the queue, the extension panel updates in real time as it moves through each state: - -* **Queued** - waiting for prerequisites such as branch protection or mergeability -* **Pending** - admitted to the queue, waiting for capacity -* **Testing** - actively running required status checks against a merge candidate -* **Tests Passed** - waiting for upstream PRs before merging -* **Merged**, **Failed** - terminal states - - -### Rolling the Extension out to an entire Org - -Chrome admins can install the Trunk extension for everyone in a Google Workspace organization using the [Chrome Web Store ID](https://chromewebstore.google.com/detail/liggeliamkammmieidmmfmmdnjilabgn) `liggeliamkammmieidmmfmmdnjilabgn`. See Google's [Automatically install apps and extensions](https://support.google.com/chrome/a/answer/6306504?hl=en) guide for the admin console steps. - -### Authentication and security - -The extension does **not** ask you for credentials, API tokens, or a separate password. It authenticates by reusing your existing browser session at [app.trunk.io](https://app.trunk.io/) — the same session you already use for the Trunk web app. - -* **Session-based auth.** When you take an action in the extension, the request is sent to the Trunk API with the cookies your browser already holds for `app.trunk.io`. If you aren't signed in, the extension prompts you to sign in once via the normal Trunk login flow; from then on it piggybacks on that session. -* **No new credentials are stored.** The extension does not generate, store, or transmit a long-lived token. Signing out of [app.trunk.io](https://app.trunk.io/) signs the extension out as well. -* **Permissions are unchanged.** The extension can only see queues and act on PRs that your Trunk user already has access to - it cannot escalate permissions. Every action is recorded against your Trunk user, just as it would be from the web app or CLI. -* **Scoped to GitHub PR pages.** The content script runs on `github.com` pull request URLs so it can render the overlay; it does not read or transmit page contents beyond the repository and PR identifiers needed to query the Trunk API. -* **Same transport guarantees as the rest of Trunk.** All extension traffic to Trunk uses TLS, and your data is handled per the [Trunk Security policy](../setup-and-administration/security). - -### Frequently asked questions - - - - -Yes - the extension is an add-on on top of Trunk Merge Queue. Your repository must have the [Trunk GitHub App installed and a queue configured](./getting-started/) before the overlay does anything useful. - - - - - -The overlay only appears on pull requests in repositories that your Trunk organization has configured a queue for. If you're signed in and still don't see it, confirm the repository in **Settings → Repositories** in the Trunk web app. - - - - - -The extension targets Chrome. Chromium-based browsers (Edge, Brave, Arc) generally work via the Chrome Web Store, but only Chrome is officially supported. - - - - - -Both go through the same Trunk Merge Queue backend. The extension is a faster, in-page surface for the same actions and adds live status without polling the PR comments. - - - - -Click the Trunk extension icon and open **Options** (or right-click the icon and choose **Options**) to access the extension settings page. - -| Setting | Description | Default | -|---|---|---| -| **Celebration** | When enabled, a confetti burst plays each time you add a pull request to the merge queue. | Off | - -The celebration effect respects your operating system's reduced motion preference. If you have **Reduce motion** enabled in your system accessibility settings, no animation plays regardless of this toggle. - - diff --git a/merge-queue/getting-started/index.mdx b/merge-queue/getting-started.mdx similarity index 68% rename from merge-queue/getting-started/index.mdx rename to merge-queue/getting-started.mdx index 73b4a08..1047bd7 100644 --- a/merge-queue/getting-started/index.mdx +++ b/merge-queue/getting-started.mdx @@ -1,18 +1,18 @@ --- title: "Getting Started" -description: "Set up Trunk Merge Queue for your repository by installing the GitHub App, creating a queue, and configuring branch protection." +description: "This guide walks you through setting up Trunk Merge Queue for your repository. The setup process involves installing the GitHub App, creating a queue, and configuring branch protec" --- This guide walks you through setting up Trunk Merge Queue for your repository. The setup process involves installing the GitHub App, creating a queue, and configuring branch protection rules to allow the merge queue to function properly. ### Step 1: Install the GitHub App and create a Queue -**The Trunk GitHub App is required for Merge Queue to function.** It grants Trunk Merge Queue the necessary permissions to create test branches, read CI results, and merge PRs in your repository. View [detailed permissions and what Trunk uses them for](../../setup-and-administration/github-app-permissions). +**The Trunk GitHub App is required for Merge Queue to function.** It grants Trunk Merge Queue the necessary permissions to create test branches, read CI results, and merge PRs in your repository. View [detailed permissions and what Trunk uses them for](/setup-and-administration/github-app-permissions). The Trunk GitHub app can be added and removed from repositories within your org as needed. -1. [Sign in to app.trunk.io](https://app.trunk.io/login) and navigate to the **Merge Queue** tab. (First-time users will [create an organization](../../setup-and-administration/connecting-to-trunk) before accessing Merge Queue.) +1. [Sign in to app.trunk.io](https://app.trunk.io/login) and navigate to the **Merge Queue** tab. (First-time users will [create an organization](/setup-and-administration/connecting-to-trunk) before accessing Merge Queue.) 2. Click the **Create New Queue** button. @@ -36,17 +36,15 @@ The GitHub App installation must be initiated from the Trunk web app to properly 4. Select a repository from the dropdown and enter the target branch to merge into. Click **Create Queue.** - -![](/assets/merge-add-repo_(1).png) - +
### Step 2: Configure Branch Protection -The merge queue needs specific GitHub permissions to function. Follow the [Branch Protection & Required Status Checks](./configure-branch-protection) guide to: +The merge queue needs specific GitHub permissions to function. Follow the [Branch Protection & Required Status Checks](/merge-queue/getting-started/configure-branch-protection) guide to: 1. **Configure push restrictions** - Allow the `trunk-io` bot to push to your protected branch 2. **Disable “Require branches to be up to date before merging.” -** This setting is one of the most common sources of confusion. Many teams enable it to keep their branch green, but it conflicts with how merge queues work. If this is on, PRs will often sit in the “Queued” state forever because GitHub blocks Trunk from updating them. -3. **Exclude Trunk's temporary branches** - Make sure `trunk-temp/*` and `trunk-merge/*` branches are not protected. They are created and cleaned up automatically by the queue. +3. **Exclude Trunk's temporary branches** - Ensure `trunk-temp/*` and `trunk-merge/*` branches are not protected. They are created and cleaned up automatically by the queue. **Without proper branch protection configuration, the merge queue will not work.** You may see errors like "Permission denied on `trunk-merge/*` branch" or PRs will remain stuck in "Queued" state. @@ -54,12 +52,12 @@ The merge queue needs specific GitHub permissions to function. Follow the [Branc #### Optional: Enforce Merge Queue-Only Merges -If you want your organization to merge _exclusively_ through the merge queue: +If you want your organization to merge *exclusively* through the merge queue: * Restrict who can push to your protected branch (e.g., main). * Then allow the Trunk GitHub App as the only actor permitted to push to that branch. -This setup makes sure all merges flow through the queue and prevents developers from bypassing it accidentally. +This setup ensures all merges flow through the queue and prevents developers from bypassing it accidentally. ### Step 3: Test your setup @@ -70,9 +68,7 @@ Now that branch protection is configured, test that the merge queue works correc * **Checking the box** in the Trunk bot comment on your PR, or * **Commenting** `/trunk merge` on the pull request - -![](/assets/merge-github-comment.png) - +
You can submit a PR to the merge queue at any time, even before CI checks pass or code review is complete. The PR will remain in "**Queued**" state until all required conditions are met, then automatically begin testing. @@ -83,17 +79,17 @@ You can submit a PR to the merge queue at any time, even before CI checks pass o #### Troubleshooting common issues -Visit [Trunk Support](../../setup-and-administration/support) for additional assistance or to contact the support team. +Visit [Trunk Support](/setup-and-administration/support) for additional assistance or to contact the support team. If your test PR doesn't merge automatically: * **Check the status comments for the PR in** the [Trunk Dashboard](https://app.trunk.io/) to see what it's waiting for -* **Stuck in "Queued"**: Usually means branch protection rules haven't passed (missing required status checks or code review) or there are merge conflicts. If the status looks correct but the PR still won't enter the queue, try [removing](../using-the-queue/reference#submitting-and-cancelling-pull-requests) and re-adding by commenting `/trunk merge` again on the PR. +* **Stuck in "Queued"**: Usually means branch protection rules haven't passed (missing required status checks or code review) or there are merge conflicts. If the status looks correct but the PR still won't enter the queue, try [removing](/merge-queue/using-the-queue/reference#submitting-and-cancelling-pull-requests) and re-adding by commenting `/trunk merge` again on the PR. * **Fails when attempting to merge**: Check that squash merges are enabled for your repository in GitHub settings (`Settings > General > Allow squash merging`). Trunk Merge Queue requires squash merges to be enabled. -* **"Permission denied" errors**: Review the [Branch Protection](./configure-branch-protection) guide to make sure `trunk-temp/*` and `trunk-merge/*` branches aren't protected by wildcard rules like `*/*`. -* **Status checks not running**: Verify your CI is configured to run on draft PRs (or `trunk-merge/**` branches if using push-triggered mode). See the [Branch Protection](./configure-branch-protection) guide for details. +* **"Permission denied" errors**: Review the [Branch Protection](/merge-queue/getting-started/configure-branch-protection) guide to ensure `trunk-temp/*` and `trunk-merge/*` branches aren't protected by wildcard rules like `*/*`. +* **Status checks not running**: Verify your CI is configured to run on draft PRs (or `trunk-merge/**` branches if using push-triggered mode). See the [Branch Protection](/merge-queue/getting-started/configure-branch-protection) guide for details. ### Step 4: Configure advanced features -Once the basic merge queue is working, you can enable optimizations to improve performance, such as [batching](../optimizations/batching) PRs together or [allowing failed pull requests to merge](../using-the-queue/handle-failed-pull-requests) if others are passing. +Once the basic merge queue is working, you can enable optimizations to improve performance, such as [batching](/merge-queue/optimizations/batching) PRs together or [allowing failed pull requests to merge](/merge-queue/using-the-queue/handle-failed-pull-requests) if others are passing. diff --git a/merge-queue/getting-started/configure-branch-protection.mdx b/merge-queue/getting-started/configure-branch-protection.mdx index 8fdb63b..00b8142 100644 --- a/merge-queue/getting-started/configure-branch-protection.mdx +++ b/merge-queue/getting-started/configure-branch-protection.mdx @@ -1,6 +1,6 @@ --- title: "Configure branch protection" -description: "Set up GitHub branch protection so Trunk Merge Queue can admit, test, and merge pull requests through your protected branch." +description: "Before configuring branch protection:" --- ### Prerequisites @@ -11,14 +11,9 @@ Before configuring branch protection: * [ ] CI runs on pull requests and reports status checks to GitHub * [ ] You have admin access to repository settings -### How Branch Protection Affects the Queue +### How Trunk Merge Queue works -Trunk Merge Queue respects GitHub's branch protection rules and works with both Classic branch protection rules and Rulesets. Branch protection plays two distinct roles in how the queue operates: - -* **Admission into the queue** — Trunk doesn't admit a submitted PR for testing until GitHub considers it ready to merge. Branch protection (required reviews, required status checks, conversation resolution, etc.) is what determines when GitHub marks a PR as ready to merge, so it directly controls when a PR enters the queue. -* **Required checks during testing (optional)** — By default, Trunk waits on the same required status checks defined in your branch protection rules while testing a PR in the queue. You can override this with the Trunk UI or `.trunk/trunk.yaml` if you want a different set of checks required during queue testing. See [Required Status Checks](../administration/advanced-settings#required-status-checks). - -The configurations on this page (push restrictions for the `trunk-io` bot, and excluding `trunk-temp/**/*` and `trunk-merge/**/*` from protection) ensure branch protection doesn't *block* Trunk from doing its job. They don't change either of the roles above. +Trunk Merge Queue respects GitHub's branch protection rules and works with both Classic branch protection rules and Rulesets. Since Merge Queue ultimately merges pull requests through GitHub, any protection rules on your target branch (like required code reviews or status checks) will still apply. ### Choose your testing approach @@ -51,7 +46,7 @@ Things to look out for: **Best for:** Teams who need different CI behavior for merge queue testing versus pull request review. -When a pull request enters the queue, Trunk creates a branch under `trunk-merge/` and pushes to it. You configure specific CI jobs to run on these branches. +When a pull request enters the queue, Trunk creates a `trunk-merge/*` branch and pushes to it. You configure specific CI jobs to run on these branches. **Advantages:** @@ -61,186 +56,64 @@ When a pull request enters the queue, Trunk creates a branch under `trunk-merge/ **Requirements:** -* Configure push-triggered workflows in your CI provider for `trunk-merge/**` branches (see [Configure CI status checks](./configure-ci-status-checks#if-using-push-triggered-mode)) +* Configure push-triggered workflows in your CI provider for `trunk-merge/**` branches +* Define required status checks in your `.trunk/trunk.yaml` [configuration file](/merge-queue/getting-started/configure-ci-status-checks#if-using-draft-pr-mode-default) **To enable:** Go to **Settings** > **Repositories** > repository > **Merge Queue** > toggle **off** **Trunk Draft PR Creation**. ### Configure Branch Protection Rules -#### Rulesets vs. Classic branch protection - -GitHub offers two systems for branch protection: [Rulesets](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/managing-rulesets/about-rulesets) and Classic branch protection rules. Both can coexist on the same branch. - -The Trunk Merge Queue GitHub App is fully supported on both systems. **Rulesets are recommended** for two reasons: - -* **Rulesets allow more granular protections.** You can layer multiple targeted rulesets on the same branch with per-actor bypass control — finer-grained than what Classic rules can express. -* **Repository admins can be held to the rule.** With Classic branch protection, admins bypass by default and see a green **Merge** button on every PR — easy to misclick into a merge that skips the queue. Rulesets flip the default: admins are subject to the rule unless they're explicitly on the bypass list, and even when they are, GitHub displays a red warning that they're circumventing branch protection — much harder to misclick through than a green button. - -#### Option A — GitHub Rulesets (recommended) - -Trunk Merge Queue requires **at least two rulesets** on your protected branch: - -* **Ruleset #1** restricts who can update the branch and lists Trunk on its bypass list as **Exempt** so Trunk can push merges through the queue. -* **Ruleset #2** holds your mergeability requirements (required reviews, required status checks, conversation resolution, etc.) and does **not** bypass Trunk. The queue uses these rules to decide when GitHub considers a PR ready, which is what gates [admission into the queue](#how-branch-protection-affects-the-queue). - -Splitting them keeps Trunk's bypass scope minimal: GitHub bypass permissions apply to the whole ruleset, so a single combined ruleset would force Trunk to bypass review and status checks too — the opposite of what you want. - -##### Ruleset #1 — Branch update (Trunk bypasses this) - -This ruleset lets the Trunk GitHub App update your protected branch when merging from the queue, while still preventing direct pushes from anyone else. - -1. In GitHub, go to **Settings → Rules → Rulesets** and create a new ruleset (e.g., name it `main - force push`). -2. Under **Target branches**, target the protected branch only (e.g., `main`). No exclude pattern is needed *for this ruleset* — Trunk's `trunk-temp/**/*` and `trunk-merge/**/*` branches are not in the include list, so they aren't matched. Other rulesets (especially at the organization level) may still need explicit excludes; see [Exempt Trunk's temporary branches from other rulesets](#exempt-trunk-temporary-branches) below. -3. Under **Rules → Branch rules**, enable **Restrict updates** ("Only allow users with bypass permission to update matching refs"). You can optionally co-locate **Restrict deletions** and **Restrict creations** in the same ruleset; the bypass list applies to the entire ruleset. -4. Under **Bypass list**, add the Trunk GitHub App (`trunk-io`) and set its bypass mode to **Exempt**. -5. If you also use [Trunk Sudo](../../setup-and-administration/trunk-sudo-app), add **Trunk Sudo** to the bypass list as **Exempt** as well. -6. Save. - - -![GitHub ruleset with Restrict updates and Restrict deletions enabled, Trunk.io and Trunk Sudo on the bypass list as Exempt](/assets/merge-github-ruleset-push.png) - - - -**Bypass mode defaults to Always — change it to Exempt.** When you add an actor to a ruleset's bypass list, GitHub defaults its bypass mode to **Always**, which sounds permissive but does not cover branch updates from a GitHub App. Trunk must be set to **Exempt**. If Trunk isn't Exempt, merges will fail with permission errors on the protected branch. - - -##### Ruleset #2 — Mergeability requirements (Trunk does NOT bypass this) - -This ruleset encodes the rules that determine when a PR is ready to merge. Trunk reads these to decide when to admit a PR into the queue. - -1. Create a second ruleset (e.g., name it `main - PRs`). -2. Target the same protected branch (e.g., `main`) with the same single-include targeting. -3. Under **Rules → Branch rules**, add the rules that gate mergeability — typically **Require a pull request before merging** and **Require status checks to pass**. Add others (signed commits, linear history, etc.) as your team requires. -4. **Do not** add the Trunk GitHub App (`trunk-io`) to the bypass list. The queue relies on GitHub reporting the PR as not-yet-ready until these rules pass. -5. Optionally, add **Trunk Sudo** to the bypass list as **Exempt** if you use [Force merge](../using-the-queue/force-merge). See the [Trunk Sudo page](../../setup-and-administration/trunk-sudo-app) for the full guidance. -6. Save. - - -![GitHub ruleset with Require a pull request before merging and Require status checks to pass enabled, Trunk.io not on the bypass list](/assets/merge-github-ruleset-prs.png) - - -See [Required Status Checks](../administration/advanced-settings#required-status-checks) for how the queue uses required status checks while testing PRs already in the queue. - -##### Exempt Trunk's temporary branches from other rulesets - -The two rulesets above target only your protected branch, so they don't match `trunk-temp/**/*` or `trunk-merge/**/*`. But any **other** Branch ruleset — at the **organization** level or elsewhere on this repository — whose targeting is broader (e.g., **All branches**, or a wildcard include like `**/*`) will match Trunk's temporary branches and block the queue. - - -**Symptom:** A PR enters the queue and then fails out shortly after testing starts with a GitHub permission error (e.g., "Permission denied on trunk-merge/\* branch"). You'll see this on the **Trunk Merge Queue** status check on the PR, in Trunk's status comment on the PR, and on the PR's detail page in the [Trunk dashboard](https://app.trunk.io/). This almost always means a Branch ruleset is preventing Trunk from creating, pushing to, or deleting `trunk-temp/**/*` or `trunk-merge/**/*`. - - -**Branch rulesets vs. Push rulesets.** Only **Branch** rulesets need this exemption. Branch vs. Push is a GitHub ruleset type and is unrelated to the [Push-Triggered testing mode](#push-triggered-mode-advanced) above. Push rulesets gate the *content* of pushes (file size limits, secret scanning, restricted file paths, etc.) rather than the branch operations the queue performs, so they can target Trunk's temporary branches without breaking the queue. To tell them apart, open a ruleset's edit page: Branch rulesets have a **Branch targeting criteria** section, while Push rulesets have **Push rules** and target repositories rather than branches. Audit only the Branch rulesets. - -**Where to look:** - -1. **Organization-level rulesets** — at the organization's **Settings → Rules → Rulesets** page. These apply across every repository and are the most commonly missed source of conflicts. -2. **Other repository-level Branch rulesets** — any Branch ruleset on this repo other than the two created above. +#### Using Rulesets vs. Classic Rules -**How to exempt Trunk's branches:** +You can use GitHub's Rulesets feature alongside Classic branch protection rules—both systems work together. However, **push permission restrictions must be configured using Classic branch protection rules only** because GitHub's API does not expose push restriction settings from Rulesets. -For each Branch ruleset whose **Branch targeting criteria** could match `trunk-temp/**/*` or `trunk-merge/**/*` (anything broader than a single protected-branch include): +All other branch protection settings (required reviews, status checks, signed commits, etc.) can be configured using either Classic rules or Rulesets. -1. Edit the ruleset. -2. Under **Branch targeting criteria**, click **Add target → Exclude by pattern** and add both: - * `trunk-temp/**/*` - * `trunk-merge/**/*` - - - The trailing `/*` is required. GitHub treats `trunk-temp/**` and `trunk-temp/**/*` differently, and only the latter actually matches (and therefore excludes) the branches Trunk creates. - -3. Save. - - - GitHub Branch targeting criteria with All branches included and trunk-temp/**/* and trunk-merge/**/* listed as exclude patterns - - -##### Verify your ruleset configuration - -Before submitting your first PR to the queue, confirm: - -* [ ] Ruleset #1 targets only your protected branch and has the Trunk GitHub App on the bypass list as **Exempt**. -* [ ] Ruleset #2 targets only your protected branch and does **not** bypass Trunk. -* [ ] Every other Branch ruleset visible at the organization level and on this repository either does not match `trunk-temp/**/*`/`trunk-merge/**/*`, or explicitly excludes both patterns. -* [ ] (If using [Trunk Sudo](../../setup-and-administration/trunk-sudo-app)) Trunk Sudo is on Ruleset #1's bypass list as **Exempt**. -* [ ] (If using [Trunk Sudo](../../setup-and-administration/trunk-sudo-app) **and** Force merge) Trunk Sudo is also on Ruleset #2's bypass list as **Exempt**. - -#### Migrating from Classic rules to Rulesets - -If you already use Classic branch protection, GitHub provides an **Import a ruleset** action on the [Rulesets](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/managing-rulesets/about-rulesets) page that converts an existing Classic rule into a single ruleset. Use it as a starting point, then split the imported ruleset into the two-ruleset structure above: move **Restrict updates** into Ruleset #1 with Trunk on the bypass list as Exempt, and leave the rest in Ruleset #2 with no bypass on Trunk. - - -Don't delete the original Classic rule until both rulesets are saved and verified — otherwise the branch will be temporarily unprotected. - - -#### Option B — Classic branch protection - -Classic branch protection still works with Trunk Merge Queue, but is no longer the recommended path. Some Classic rules (required status checks and "Require branches to be up to date") cannot be bypassed by any GitHub App, which limits features like [Force merge](../using-the-queue/force-merge). Use Rulesets when you can. - -**Configure push restrictions (required)** +#### Configure Push Restrictions (Required) Trunk Merge Queue needs permission to push to your protected branch. Configure these settings using Classic branch protection rules: - -![](/assets/merge-github-classic-branch-rules.png) - +
-1. Go to **Settings → Branches** in your repository on GitHub. -2. Edit or create a Classic branch protection rule for your target branch (e.g., `main`). +1. Go to **Settings > Branches** in your repository on GitHub. +2. Edit or create a Classic branch protection rule for your target branch (e.g., `main`) 3. Under "Rules applied to everyone including administrators," select: * **Restrict who can push to matching branches** * **Restrict pushes that create matching branches** -4. Add the `trunk-io` bot to the list of allowed actors. -5. Optionally, add Organization admins and repository admins who need emergency merge access. -6. Save your changes. +4. Add the `trunk-io` bot to the list of allowed actors +5. Optionally, add Organization admins and repository admins who need emergency merge access +6. Save your changes -**Important:** Regular users should use [pull request prioritization](../optimizations/priority-merging) with `--priority=urgent` or `--priority=high` to fast-track pull requests through the queue while maintaining validation. Direct push access is only needed for rare emergencies where the queue itself must be bypassed. +**Important:** Regular users should use [pull request prioritization](https://file+.vscode-resource.vscode-cdn.net/merge-queue/pr-prioritization) with `--priority=urgent` or `--priority=high` to fast-track pull requests through the queue while maintaining validation. Direct push access is only needed for rare emergencies where the queue itself must be bypassed. -**Exclude Trunk's temporary branches (critical)** +#### Exclude Trunk's temporary branches (Critical) Trunk Merge Queue creates temporary branches to test pull requests before merging them: -* `trunk-temp/**/*` — temporary testing branches -* `trunk-merge/**/*` — merge testing branches - - -**Trunk needs unrestricted access** to create, push to, and delete these branches. If your branch protection rules apply to these branches, Merge Queue cannot function. - - -To verify and fix: - -1. Go to **Settings → Branches** in your repository. -2. Review all Classic branch protection rules. -3. Check for wildcard patterns like `*/*`, `**/*`, or similar that would match `trunk-temp/**/*` or `trunk-merge/**/*`. -4. If you find matching rules, either: - * Remove the wildcard rules and create more specific rules for your actual branches, or - * Add the `trunk-io` bot to the bypass list for those rules. - -**Example of a problematic rule:** a branch protection rule with pattern `*/*` would protect all branches including `trunk-temp/**/*` and `trunk-merge/**/*`. - -**What happens if these branches are protected:** Merge Queue encounters GitHub permission errors and displays messages like "Permission denied on trunk-merge/\* branch." +* `trunk-temp/*` - Temporary testing branches +* `trunk-merge/*` - Merge testing branches -**Also check rulesets, even if you only use Classic protection.** Organization-level Branch rulesets and other repository-level Branch rulesets apply on top of Classic rules and can match `trunk-temp/**/*`/`trunk-merge/**/*` independently. See [Exempt Trunk's temporary branches from other rulesets](#exempt-trunk-temporary-branches) for how to audit and fix them. +**Trunk needs unrestricted access** to create, push to, and delete these branches. If your branch protection rules apply to these branches, Merge Queue cannot function. - -**Using Force merge or other bypass-dependent features?** Features like [Force merge](../using-the-queue/force-merge) require the separate [Trunk Sudo GitHub App](../../setup-and-administration/trunk-sudo-app), plus additional branch protection configuration to list Trunk Sudo as a bypass actor. That's documented on the Trunk Sudo page. - - -##### Verify your Classic configuration +**To verify and fix:** -Before submitting your first PR to the queue, confirm: +1. Go to **Settings > Branches** in your repository +2. Review all Classic branch protection rules +3. Check for wildcard patterns like `*/*`, `**/*`, or similar that would match `trunk-temp/*` or `trunk-merge/*` +4. If you find matching rules, either: + * **Option A:** Remove the wildcard rules and create more specific rules for your actual branches + * **Option B:** Add the `trunk-io` bot to the bypass list for those rules -* [ ] The `trunk-io` GitHub App is in the list of allowed actors for push restrictions on your protected branch. -* [ ] No Classic branch protection rule on this repository uses a wildcard pattern (e.g., `*/*`, `**/*`) that matches `trunk-temp/**/*` or `trunk-merge/**/*` — or, if one does, the `trunk-io` bot is on its bypass list. -* [ ] Every Branch ruleset visible at the organization level and on this repository either does not match `trunk-temp/**/*`/`trunk-merge/**/*`, or explicitly excludes both patterns. (Push rulesets do not need this exemption — see [Exempt Trunk's temporary branches from other rulesets](#exempt-trunk-temporary-branches).) -* [ ] (If using [Trunk Sudo](/setup-and-administration/trunk-sudo-app)) Trunk Sudo is configured per the Trunk Sudo page. +**Example of a problematic rule:** A branch protection rule with pattern `*/*` would protect all branches including `trunk-temp/*` and `trunk-merge/*`. +**What happens if these branches are protected:** Merge Queue will encounter GitHub permission errors and display messages like "Permission denied on trunk-merge/\* branch." ### Next Steps -→ [**Configure CI status checks**](./configure-ci-status-checks) **-** Configure CI status checks for your branch. +→ [**Configure CI status checks**](/merge-queue/getting-started/configure-ci-status-checks) **-** Configure CI status checks for your branch. -_Having trouble?_ See our [Troubleshooting guide](../reference/troubleshooting) for common installation issues. +*Having trouble?* See our [Troubleshooting guide](/merge-queue/reference/troubleshooting) for common installation issues. diff --git a/merge-queue/getting-started/configure-ci-status-checks.mdx b/merge-queue/getting-started/configure-ci-status-checks.mdx index ed1e58b..f05543e 100644 --- a/merge-queue/getting-started/configure-ci-status-checks.mdx +++ b/merge-queue/getting-started/configure-ci-status-checks.mdx @@ -1,26 +1,31 @@ --- title: "Configure CI status checks" -description: "Make sure your CI runs whenever Trunk Merge Queue tests a pull request." +description: "Your existing pull request-triggered CI workflows will automatically run when Trunk creates draft pull requests to test changes. No additional configuration is required." --- -This page covers how to make sure your CI checks run on the branches Trunk Merge Queue creates while testing a pull request. What you need to do depends on the testing mode you selected in [Configure branch protection](./configure-branch-protection): +### If using Draft PR mode (Default) -* **Draft PR mode (default)** — no additional CI configuration is required. -* **Push-Triggered mode** — you need to add a CI workflow that triggers on pushes to `trunk-merge/**`. +Your existing pull request-triggered CI workflows will automatically run when Trunk creates draft pull requests to test changes. **No additional configuration is required.** -### If using Draft PR mode (default) +Trunk will wait for the same required status checks configured in your branch protection rules (either via Classic rules or Rulesets) before merging. -Your existing pull request-triggered CI workflows will automatically run when Trunk creates draft pull requests to test changes. **No additional configuration is required.** + +You can also configure required status checks directly in the Trunk UI instead of relying on GitHub branch protection. See [Required Status Checks](/merge-queue/administration/advanced-settings#required-status-checks) in the settings documentation. + -See GitHub's documentation for configuring required status checks on your protected branch: +See GitHub's documentation for configuring required status checks: * [Classic branch protection rules](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/managing-protected-branches/about-protected-branches#require-status-checks-before-merging) * [Rulesets](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/managing-rulesets/about-rulesets) -**You're done!** Skip to the [Verification](./test-your-setup) section. +**You're done!** Skip to the Verification section below. ### If using Push-Triggered mode -Set up your CI provider to run status checks whenever Trunk pushes to `trunk-merge/**` branches. +You need to complete two additional steps: + +**Step 1: Configure Push-Triggered CI Workflows** + +Set up your CI provider to run status checks whenever Trunk pushes to `trunk-merge/*` branches. **Example for GitHub Actions:** @@ -41,7 +46,7 @@ jobs: steps: - name: Checkout uses: actions/checkout@v3 - + - name: Run tests run: npm test # Your actual test commands @@ -51,27 +56,29 @@ jobs: steps: - name: Checkout uses: actions/checkout@v3 - + - name: Run integration tests run: npm run test:integration # Your actual test commands ``` **For other CI providers:** Configure workflows triggered by pushes to branches matching `trunk-merge/**`. -### Required Checks During Queue Testing - -By default, Merge Queue waits on the same required status checks defined in your GitHub branch protection rules while testing a PR. If you want a different set of checks required during queue testing — for example, because you don't use GitHub branch protection, or because the queue should require different checks than PR review — you can override that in the Trunk UI or in `.trunk/trunk.yaml` (`merge.required_statuses`). Both overrides work in either testing mode. +**Step 2: Define Required Status Checks in .trunk/trunk.yaml** - -**These checks are what Merge Queue waits on while a PR is already in the queue and testing. They do not control which PRs are admitted into the queue.** - +Create or edit your `trunk.yaml` file in a directory named `.trunk` at the root of your repository (so, `.trunk/trunk.yaml`) to specify which status checks Trunk should wait for before merging: -PR admission is governed separately: Trunk waits until GitHub considers the PR ready to merge (driven by your [branch protection rules](./configure-branch-protection#how-branch-protection-affects-the-queue)) before testing begins. If your queue is running in [parallel mode](../optimizations/parallel-queues/index), Trunk additionally waits for the [impacted targets](../optimizations/parallel-queues/index#what-are-impacted-targets) of that PR to be uploaded. +```yaml +version: 0.1 +merge: + required_statuses: + - Unit Tests + - Integration Tests +``` -See [Required Status Checks](../administration/advanced-settings#required-status-checks) for the full set of options. +**Important:** The status check names in `.trunk/trunk.yaml` must exactly match the job names from your CI workflows. ### Next Steps -→ [**Test your setup**](./test-your-setup) - Verify everything is configured correctly before using Merge Queue in production. +→ [**Test your setup**](/merge-queue/getting-started/test-your-setup) - Verify everything is configured correctly before using Merge Queue in production. -_Having trouble?_ See our [Troubleshooting guide](../reference/troubleshooting) for common installation issues. +*Having trouble?* See our [Troubleshooting guide](/merge-queue/reference/troubleshooting) for common installation issues. diff --git a/merge-queue/getting-started/install-and-create-your-queue.mdx b/merge-queue/getting-started/install-and-create-your-queue.mdx index bd3278f..4905e21 100644 --- a/merge-queue/getting-started/install-and-create-your-queue.mdx +++ b/merge-queue/getting-started/install-and-create-your-queue.mdx @@ -1,6 +1,6 @@ --- title: "Install and create your queue" -description: "Install the Trunk GitHub App, connect your repository, and create your first merge queue." +description: "This guide walks you through setting up Trunk Merge Queue for your repository. The setup process involves installing the GitHub App, creating a queue, and configuring branch protec" --- This guide walks you through setting up Trunk Merge Queue for your repository. The setup process involves installing the GitHub App, creating a queue, and configuring branch protection rules to allow the merge queue to function properly. @@ -19,11 +19,11 @@ The GitHub App installation must be initiated from the Trunk web app to properly ### Install the Trunk GitHub App -1. [Sign in to app.trunk.io](https://app.trunk.io/login) and navigate to the **Merge Queue** tab. (First-time users will [create an organization](../../setup-and-administration/connecting-to-trunk) before accessing Merge Queue.) +1. [Sign in to app.trunk.io](https://app.trunk.io/login) and navigate to the **Merge Queue** tab. (First-time users will [create an organization](/setup-and-administration/connecting-to-trunk) before accessing Merge Queue.) 2. Click the **Create New Queue** button at the top right corner of the window. -**The Trunk GitHub App is required for Merge Queue to function.** It grants Trunk Merge Queue the necessary permissions to create test branches, read CI results, and merge PRs in your repository. View [detailed permissions and what Trunk uses them for](../../setup-and-administration/github-app-permissions). +**The Trunk GitHub App is required for Merge Queue to function.** It grants Trunk Merge Queue the necessary permissions to create test branches, read CI results, and merge PRs in your repository. View [detailed permissions and what Trunk uses them for](/setup-and-administration/github-app-permissions). If the GitHub App is already installed, step 3 will be skipped automatically. @@ -40,23 +40,14 @@ If the GitHub App is already installed, step 3 will be skipped automatically. 4. In the **Merge Queue** tab, click the **Create New Queue** button at the top right corner of the window. 5. Select a repository from the dropdown and enter the target branch to merge into. Click **Create Queue.** - -![](/assets/merge-add-repo_(1).png) - +
### What you just did You've installed the Trunk GitHub App on your organization and created your first merge queue for the specified branch (`main` in the example above). Trunk is now connected to your repository and ready to be configured. Your queue won't start processing pull requests until you complete the branch protection setup in the next step. - -**Need multiple queues?** You can create additional queues for the same repository targeting different branches (e.g., `staging`, `release/v2`). Each queue operates independently with its own settings. See [Multiple queues per repository](../administration/advanced-settings#multiple-queues-per-repository) for details. - - ### Next steps -→ [**Configure branch protection**](./configure-branch-protection) - Set up GitHub rules so Trunk can safely manage your merges - -_Having trouble?_ See our [Troubleshooting guide](../reference/troubleshooting) for common installation issues. - - +→ [**Configure branch protection**](/merge-queue/getting-started/configure-branch-protection) - Set up GitHub rules so Trunk can safely manage your merges +*Having trouble?* See our [Troubleshooting guide](/merge-queue/reference/troubleshooting) for common installation issues. diff --git a/merge-queue/getting-started/test-your-setup.mdx b/merge-queue/getting-started/test-your-setup.mdx index 05a3fb7..2a870c5 100644 --- a/merge-queue/getting-started/test-your-setup.mdx +++ b/merge-queue/getting-started/test-your-setup.mdx @@ -1,6 +1,6 @@ --- title: "Test your setup" -description: "Verify your Trunk Merge Queue installation by submitting a test PR and confirming it merges automatically." +description: "After completing configuration, verify your setup:" --- ### Prerequisites @@ -30,26 +30,24 @@ After completing configuration, verify your setup: #### Start using Merge Queue -→ [**Submit and cancel pull requests**](../using-the-queue/reference) - Learn how to use the queue day-to-day +→ [**Submit and cancel pull requests**](/merge-queue/using-the-queue/reference) - Learn how to use the queue day-to-day #### Optimize your queue Ready to make it even better? Explore these optimizations -→ [**Predictive Testing**](../optimizations/predictive-testing) - Prevent queue collapse and increase throughput +→ [**Predictive Testing**](/merge-queue/optimizations/predictive-testing) - Prevent queue collapse and increase throughput -→ [**Batching**](../optimizations/batching) - Merge multiple PRs together for faster processing +→ [**Batching**](/merge-queue/optimizations/batching) - Merge multiple PRs together for faster processing -→ [**Priority merging**](../optimizations/priority-merging) - Fast-track urgent PRs +→ [**Priority merging**](/merge-queue/optimizations/priority-merging) - Fast-track urgent PRs -→ [**Anti-flake protection**](../optimizations/anti-flake-protection) - Handle flaky tests automatically +→ [**Anti-flake protection**](/merge-queue/optimizations/anti-flake-protection) - Handle flaky tests automatically #### Configure integrations -→ [**Integration for Slack**](../integration-for-slack) - Get notifications in Slack +→ [**Slack integration**](/merge-queue/integration-for-slack) - Get notifications in Slack -→ [**Metrics and monitoring**](../administration/metrics) - Track your queue's performance +→ [**Metrics and monitoring**](/merge-queue/administration/metrics) - Track your queue's performance - - -_Having trouble?_ See our [Troubleshooting guide](../reference/troubleshooting) for common installation issues. +*Having trouble?* See our [Troubleshooting guide](/merge-queue/reference/troubleshooting) for common installation issues. diff --git a/merge-queue/integration-for-slack.mdx b/merge-queue/integration-for-slack.mdx index 68e66cf..6853ae8 100644 --- a/merge-queue/integration-for-slack.mdx +++ b/merge-queue/integration-for-slack.mdx @@ -1,192 +1,160 @@ --- -title: "Integration for Slack" -description: "Send merge queue updates to multiple Slack channels and receive personal DM notifications — all powered by the Trunk Slack app." +title: "Slack Integration" +description: "Push updates about your queue status to Slack to keep your team informed." --- -Trunk Merge Queue integrates with Slack to send real-time notifications about queue activity and pull request state changes. You can route notifications to **multiple Slack channels** per repository, each with its own set of enabled topics, and receive **personal DMs** about your own PRs directly in Slack. +Trunk Merge Queue integrates with Slack to send real-time notifications about queue activity and pull request state changes to a designated channel or directly to you as personal notifications. - -For details on how Trunk collects, manages, and stores your data, see our [Security and Privacy](../setup-and-administration/security) page. - +
+After you have Merge Queue set up and running in your repository, you can configure Slack notifications to receive alerts for various queue events. - -![](/assets/slack-multi-channel.png) - +## Channel Notifications -## Installing the Trunk Slack App +Send merge queue updates to a shared team Slack channel to keep everyone informed about queue activity. -Before you can set up channel notifications or personal DMs, a Slack workspace admin must install the Trunk Slack app for your organization. This is a one-time setup that enables all Slack integration features. +### Enable Merge Queue Notifications -### Steps to Install +After you have Merge set up and running in your repository, you can set up your integration with Slack under **Merge Queue** tab **>** repository **> Settings >** **Connect with Slack**. -1. In the Trunk web app, navigate to **Settings > Organization > Slack**. -2. Click **Add to Slack**. -3. Review and approve the requested permissions on the Slack OAuth screen. -4. You'll be redirected back to Trunk. The page will show your workspace as **Connected** along with the workspace name. +
- -![](/assets/slack-workspace-connect.png) - +1. Navigate to `Settings > Repositories > [your repository] > Merge Queue` +2. Find the **Connect with Slack** setting and click **Connect** to install the Trunk Slack application +3. Authorize the app to post to your desired channel +4. You'll be redirected back to your settings page once authorization is complete - -![](/assets/slack-workspace-oauth.png) - +### Configuring Channel Notification Preferences - -![](/assets/slack-workspace-connected.png) - +After connecting to a Slack channel, you can customize which notifications you want to receive. By default, all Merge Queue notifications are enabled. +
-### Managing the Connection +You can toggle individual notification types on or off. See Available Notification Topics below for descriptions of each notification type. -Once connected, you can **Reconnect** (to reauthorize) or **Disconnect** the workspace from the same settings page. + +**Tip:** Want to receive these notifications as personal DMs instead of in a shared channel? Check out our [Personal Slack Notifications](#personal-slack-notifications) setup guide. + - -**Migrating from legacy Slack integration?** If your organization previously connected Slack through the per-repo "Connect with Slack" flow, you still need to complete this new workspace-level installation to access multi-channel notifications and the new personal DM features. After installing, you can set up channel connections and personal notifications using the new workflows described below. - +## Personal Slack Notifications -## Channel Notifications +Get direct messages in Slack about your PRs in the merge queue, keeping you informed without adding noise to team channels. -Send merge queue updates to one or more shared Slack channels to keep your team informed about queue activity. Each channel can have its own set of enabled notification topics. +Personal Slack notifications allow you to receive personalized Slack DMs when your PRs are queued, start testing, pass tests, get merged, or encounter issues. This keeps you up-to-date on the progress of your code through the merge queue without needing to check the web UI or monitor shared channels. -### Connecting Slack Channels +### Setting up Personal Notifications + +To receive personal Slack notifications, you'll need to connect both your GitHub and Slack accounts to Trunk and configure your notification preferences. -**Prerequisite:** The Trunk Slack app must be [installed for your organization](./integration-for-slack#installing-the-trunk-slack-app) before you can connect channels. +**Note:** If your organization isn't already using merge queue Slack notifications to a shared channel, a Slack workspace admin may need to approve the Trunk Slack app before you can connect your personal account. See [Enable Merge Queue Notifications](#enable-merge-queue-notifications) for details on setting up the initial Slack integration. -1. Navigate to **Settings > Repositories > \[your repository] > Merge Queue**. -2. Under **Slack Notifications**, click **Add Channel**. -3. In the **Add Slack Channel** modal, select a channel from the dropdown. -4. Toggle the notification topics you want enabled for that channel. -5. Click **Connect**. - -You can connect **multiple channels**, each with a different set of enabled topics. For example, you might send all notifications to a `#merge-notifications` channel while only sending failure alerts to a `#merge-queue-failures` channel. +#### **Steps to Enable Personal Notifications** - -![](/assets/slack-multiple-channels.png) - +1. **Navigate to User Settings** + * Go to **User Settings > Notifications** in the Trunk web app + * These settings are specific to you, so you can customize them however you prefer +2. **Connect GitHub Account** + * Click **Connect GitHub** to begin the OAuth flow + * This verifies that you own your GitHub account and allows Trunk to link your PRs to your user profile +3. **Connect Slack Account** -The channel list displays each connected channel along with a summary of how many notification topics are enabled (e.g., "6/9 enabled"). To remove an individual channel, click the trash icon next to it. To remove all channel connections for the repository, click **Disconnect**. + * Click **Connect to Slack** to authorize the Trunk Slack app to send you direct messages -### Managing Channel Notification Preferences - -Each connected channel has its own independent set of notification topics. You can expand any channel in the list to view and toggle its topics on or off. Changes take effect immediately. - -See [Available Notification Topics](./integration-for-slack#available-notification-topics) below for descriptions of each notification type. + > **Important:** If your organization hasn't already installed the Trunk Slack app for channel notifications, a Slack workspace admin may need to approve the app before you can receive personal notifications. +4. **Configure Notification Preferences** + * Enable the specific notifications you want to receive via Slack DM + * See Available Personal Notification Topics below for descriptions of each notification type +5. **Manage Connections** + * You can disconnect your GitHub or Slack accounts at any time by clicking the respective **Disconnect** buttons in User Settings -**Tip:** Want to receive notifications about your own PRs as personal DMs instead of in a shared channel? Check out the [Personal Slack Notifications](./integration-for-slack#personal-slack-notifications) setup guide. +**Tip:** Want to receive these same notifications in a shared Slack channel instead? Check out our team [Channel Slack Notifications](#channel-notifications) setup guide. -## Personal Slack Notifications +
-Get direct messages in Slack about your PRs as they move through the merge queue — queued, testing, merged, failed, and more — without adding noise to team channels. +## Slack App Home Dashboard -### Setting up Personal Notifications +The Trunk Slack app's **Home** tab provides a personal merge queue dashboard directly in Slack. Open the Trunk app in Slack and click the **Home** tab to see an overview of your merge queue activity. - -**Prerequisite:** The Trunk Slack app must be [installed for your organization](./integration-for-slack#installing-the-trunk-slack-app) before personal notifications can be configured. If the app hasn't been installed yet, the Home tab will display a warning directing a Slack admin to complete the installation. - +### What you'll see -Personal notification setup is done from the **Trunk Slack app's Home tab** in Slack: +The Home tab displays the following sections: -1. Open the **Trunk** app in Slack. If you don't see it in your sidebar, add it via **Apps > Manage > Browse Apps** and search for "Trunk." -2. Go to the **Home** tab. -3. Click **Link Account** to connect your Trunk account to Slack. -4. Connect your **GitHub account** from the Home tab. This is required for PR tracking and most notifications. -5. Configure your notification preferences using the toggles on the Home tab. +* **Account connection status** — Shows whether your Trunk and GitHub accounts are linked. If GitHub is not connected, a warning explains that most notifications require it, with a button to start the GitHub OAuth flow. +* **Not Ready PRs** — PRs you've submitted to the queue that are waiting for prerequisites (e.g., passing required checks, no merge conflicts) before entering active testing. Grouped by queue. +* **PRs in Queue** — Your PRs that are currently pending, testing, or have passed tests, with status indicators. Grouped by queue. +* **Recently Merged PRs** — Up to 5 of your most recently merged PRs with merge dates. Grouped by queue. +* **Failed PRs** — Your PRs that failed in the queue. Grouped by queue. +* **Notification Preferences** — Toggle buttons for each notification topic. You can enable or disable individual notifications directly from Slack without visiting the web UI. - -![](/assets/slack-home-connect.png) - +Each PR entry shows the PR title, number, a link to the Trunk dashboard, and a link to the GitHub PR. All data is scoped to your PRs via your linked GitHub account. -### Using the Trunk Web UI +### Linking your account -You can also start setup from the Trunk web app, which will redirect you to Slack to complete the process: +1. Open the Trunk app in Slack and go to the **Home** tab +2. Click **Link Account** to connect your Trunk account +3. Click **Connect GitHub** to link your GitHub account (required for PR tracking and most notifications) -1. Navigate to **Settings > Account > Notifications** in Trunk. -2. Under **Connect your Slack workspace**, verify your workspace is connected. If not, click **Go to Slack settings** to install the app first. -3. Click **Open in Slack** to jump to the Trunk app's Home tab, where you'll link your account and configure notifications. +### Managing notification preferences - -![](/assets/slack-dm-start-connection.png) - +You can toggle notification topics on or off directly from the Home tab — no need to visit the Trunk web UI. Changes take effect immediately. The available topics are the same as those listed in [Available Notification Topics](#available-notification-topics). - -**Tip:** Want to send notifications to a shared team channel instead? Check out the [Channel Notifications](./integration-for-slack#channel-notifications) setup guide. - +Use the **Refresh** button at the top of the Home tab to update the view with the latest queue data. -## Slack App Home Dashboard +## Frequently Asked Questions -The Trunk Slack app's **Home** tab provides a personal merge queue dashboard directly in Slack. Open the Trunk app in Slack and click the **Home** tab to see an overview of your merge queue activity across all repositories. +
-### What You'll See +Do I need both GitHub and Slack connected to receive personal notifications? -The Home tab displays the following sections: +Yes, both connections are required. The GitHub connection links your PRs to your Trunk user account, and the Slack connection enables direct messaging. -* **Refresh** — A button at the top of the Home tab to update the view with the latest queue data, along with a "Last refreshed" timestamp. -* **Account connection status** — Shows your connected identity (e.g., "Connected as **Your Name**"), an **Unlink Account** button, and your GitHub account connection status. You can connect your GitHub account directly from the Home tab if it isn't linked yet. -* **Not Ready** — PRs you've submitted that are waiting for prerequisites (e.g., GitHub mergeability) before entering the queue. -* **PRs in Queue** — Your PRs that are currently in the queue, with real-time status indicators (e.g., "Testing"). -* **Recently Merged PRs** — Your most recently merged PRs, with merge dates. -* **Failed PRs** — Your PRs that failed in the queue. -* **Notification Preferences** — Toggle buttons for all notification topics. You can enable or disable individual notifications directly from Slack without visiting the web UI. +
-All PR sections are grouped by repository and branch. Each PR entry shows the title, PR number, and a link to the GitHub PR. Data is shown across **all merge queues** you submit to, scoped to your PRs via your linked GitHub account. +
-### Linking Your Account +What's the difference between personal notifications and channel notifications? -To use the Home tab, you need to link your Trunk and GitHub accounts. Follow the steps in [Setting up Personal Notifications](./integration-for-slack#setting-up-personal-notifications) — the same account linking process powers both the dashboard and personal DMs. +[Personal notifications](#personal-slack-notifications) are sent directly to you via Slack DM and only include updates about your own PRs. -### Managing Notification Preferences +[Channel notifications](#channel-notifications) are sent to a shared team channel and include updates about all PRs in the merge queue. -You can toggle notification topics on or off directly from the Home tab — no need to visit the Trunk web UI. Changes take effect immediately. The available topics are the same as those listed in [Available Notification Topics](./integration-for-slack#available-notification-topics). +You can use both simultaneously to stay informed personally while keeping your team updated. Learn more about setting up channel notifications. -## Frequently Asked Questions +
+ +
- - -Yes, both connections are required. Link your Trunk account from the Slack app's **Home** tab to establish the Slack connection, then connect your GitHub account from the same tab to link your PRs to your Trunk profile. - +Can I customize which notifications I receive? - -[Personal notifications](./integration-for-slack#personal-slack-notifications) are sent directly to you via Slack DM and only include updates about your own PRs. They are set up from the Trunk Slack app's Home tab. +Yes, in **Settings** > **Account** > **Notifications**, you can toggle individual notification topics on or off based on your preferences. For example, you might only want to be notified when your PR fails or gets merged, rather than at every stage. -[Channel notifications](./integration-for-slack#channel-notifications) are sent to one or more shared team channels and include updates about all PRs in the merge queue. You can connect multiple channels per repository, each with different notification topics. +
-You can use both simultaneously to stay informed personally while keeping your team updated. - +
- -Yes. For personal notifications, toggle topics on or off from the Trunk Slack app's **Home** tab. For channel notifications, configure topics per channel under **Settings > Repositories > \[your repository] > Merge Queue** in the Trunk web app. - +What happens if I disconnect my GitHub or Slack account? - -You can unlink your account from the Trunk Slack app's Home tab using the **Unlink Account** button. Disconnecting stops personal Slack notifications. You can reconnect at any time by returning to the Home tab and clicking **Link Account**. - +Disconnecting either account will stop personal Slack notifications. You can reconnect at any time through **Settings** > **Account** > **Notifications**. - -The Trunk app must first be [installed at the organization level](./integration-for-slack#installing-the-trunk-slack-app) by a Slack workspace admin. After that, individual users can add it to their sidebar: in Slack, go to **Apps > Manage > Browse Apps**, search for "Trunk," and click **Add**. - - +
## Available Notification Topics -Both channel and personal Slack notifications support the same notification topics. You can customize which events trigger notifications for each channel or for your personal DMs. +Both channel and personal Slack notifications use the same notification topics. You can customize which events trigger notifications based on your preferences. - -![](/assets/slack-notification-topics.png) - +
| Notification | Description | | --------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | Merge is updated | The merge queue's configuration was changed. This covers anything that changes how the queue acts, including: pausing or draining the queue, changing its mode, changing testing concurrency, and so on. | -| Pull request is submitted for merging | A pull request has been [submitted to the queue](/merge-queue/getting-started#submit-pull-requests) | +| Pull request is submitted for merging | A pull request has been [submitted to the queue](/merge-queue/using-the-queue/reference#submitting-and-cancelling-pull-requests) | | Pull request is admitted to the queue and is waiting to be tested | A pull request has been admitted to the queue and will begin testing as soon as it can | | Pull request is testing | Trunk merge has begun testing a pull request | -| Pull request has passed tests | Testing has passed on a pull request. The PR will be merged when it reaches the top of the queue | +| Pull request has passed tests | Testing has passed on a pull request. The PR will be merged when it reached the top of the queue | | Pull request is merged | A pull request submitted to the queue has successfully been merged into its target branch | | Pull request fails | Testing failed on a pull request and it was removed from the queue or Trunk failed to merge the PR into its target branch | | Pull request is canceled | A pull request has been canceled, either manually or due to it [reaching a configured testing timeout](/merge-queue/administration/advanced-settings#timeout-for-tests-to-complete) | -| Pull request failed and is waiting for PRs in front of it to finish testing |

A pull request failed testing, but the pull request is currently waiting before being kicked. This can happen for one of two reasons:
1. The pull request is not at the head of the queue, so it is waiting to determine if it is the source of the failure or if a PR it depends on is the cause
2. Pending Failure Depth is enabled and the PR is waiting for other PRs below it to finish testing

| +| Pull request failed and is waiting for PRs in front of it to finish testing |

A pull request failed testing, but the pull request is currently waiting before being kicked. This can happen for one of two reasons:
1. The pull request is not at the head of the queue, so it is waiting to determine if it is the source of the failure or if a PR it depends on is
2. Pending Failure Depth is enabled and the PR is waiting for other PRs below it to finish testing

| diff --git a/merge-queue/merge-queue.mdx b/merge-queue/merge-queue.mdx index 3e74a06..f700e5a 100644 --- a/merge-queue/merge-queue.mdx +++ b/merge-queue/merge-queue.mdx @@ -4,7 +4,7 @@ description: "Merge queue that guarantees branch stability and accelerates devel --- If you've hit the limits of GitHub's serial merge queue - main turning red, CI costs spiraling, chaos at scale - Trunk Merge Queue is the enterprise upgrade built for reliability at any scale. Handle your noisiest pipelines, cut CI costs up to 90%, and fire and forget. -*** +--- ### Benefits of using Trunk Merge Queue @@ -19,12 +19,12 @@ Trunk Merge Queue solves three critical problems that break traditional workflow **Key capabilities:** * Anti-flake protection with optimistic merging -* Pending failure depth holds failed PRs while successors test, enabling automatic flake recovery +* Pending failure depth prevents cascade failures * Automatic quarantine of flaky tests -→ Learn about [anti-flake protection](./optimizations/anti-flake-protection) +→ Learn about [anti-flake protection](/merge-queue/optimizations/anti-flake-protection) -*** +--- #### #2: Stop CI costs from spiraling @@ -39,9 +39,9 @@ Trunk Merge Queue solves three critical problems that break traditional workflow * Auto-Bisection * Configurable batch size & wait time -→ See how [batching](./optimizations/batching) works +→ See how [batching](/merge-queue/optimizations/batching) works -*** +--- #### #3: Stop waiting in a serial queue @@ -56,9 +56,9 @@ Trunk Merge Queue solves three critical problems that break traditional workflow * Impacted targets analysis * Priority merging -→ Explore [parallel queues](./optimizations/parallel-queues/) +→ Explore [parallel queues](/merge-queue/optimizations/parallel-queues) -*** +--- ### Try Trunk Merge Queue @@ -68,6 +68,6 @@ Trunk Merge Queue solves three critical problems that break traditional workflow 2. Create your first queue (2 minutes) 3. Submit a test PR -**Total setup time: < 10 minutes** +**Total setup time: < 10 minutes** -→ [Get started](./getting-started/) +→ [Get started](/merge-queue/getting-started) diff --git a/merge-queue/migrating-from-github-merge-queue.mdx b/merge-queue/migrating-from-github-merge-queue.mdx index da4abd2..0c717ef 100644 --- a/merge-queue/migrating-from-github-merge-queue.mdx +++ b/merge-queue/migrating-from-github-merge-queue.mdx @@ -1,59 +1,34 @@ --- title: "Migrate from GitHub Merge Queue" -description: "Switch from GitHub's native merge queue to Trunk Merge Queue with minimal disruption to your workflow." +description: "For teams switching from GitHub Merge Queues to Trunk Merge Queue, the process is straight forward." --- For teams switching from [GitHub Merge Queues](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/incorporating-changes-from-a-pull-request/merging-a-pull-request-with-a-merge-queue) to Trunk Merge Queue, the process is straight forward. - + Looking for a more detailed comparison between Trunk and GitHub's Merge Queues? [Learn more](https://trunk.io/trunk-vs-github-merge-queue) - + -*** +--- ### Turn off GitHub Merge Queue -To start, you will need to disable the existing merge queue for the target repository. This can be done by navigating to the repository and opening **Settings > Branches >** branch rule **>** toggle **off Require merge queue.** Be sure to click **Save changes** to confirm the settings. +To start, you will need to disable the exising merge queue for the target repository. This can be done by navigating to the repository and opening **Settings > Branches >** branch rule **>** toggle **off Require merge queue.** Be sure to click **Save changes** to confirm the settings. Note that only users with admin permissions can manage merge queues for pull requests targeting selected branches of a repository. More information on [manage merge queues](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/managing-protected-branches/managing-a-branch-protection-rule#creating-a-branch-protection-rule) can be found in the GitHub documentation. -*** +--- ### Enable Trunk Merge Queue -Follow the [Getting Started](./getting-started/) to setup your repo with Trunk Merge Queue and configure the [settings](./administration/advanced-settings) for your repository. - -*** - -### Running both merge queues simultaneously - -Many teams prefer a gradual migration approach where Trunk Merge Queue runs alongside GitHub Merge Queue before fully switching over. This is a common path for teams migrating from GitHub's merge queue to Trunk and works well for several reasons: - -#### No Disruption to Existing Workflows - -Enabling Trunk Merge Queue does not stop or prevent your current merging flow. GitHub's merge queue will continue to function normally and merge PRs as it always has. Your team can continue using their familiar workflow while you evaluate Trunk Merge Queue. - -#### Disable Comments During Evaluation - -To prevent confusion for developers who aren't yet aware of the migration, you can disable the comments Trunk leaves on PRs. This way, developers won't see unfamiliar comments about Trunk Merge Queue while you're still evaluating. +Follow the [Getting Started](/merge-queue/getting-started) to setup your repo with Trunk Merge Queue and configure the [settings](/merge-queue/administration/advanced-settings) for your repository. -This setting is found under **Merge Queue** tab **>** repository **> Settings >** toggle **off GitHub Comments.** - -#### Trunk Handles External Merges Gracefully - -Trunk Merge Queue understands when a PR is merged outside of its queue (for example, through GitHub's merge queue): - -- **If the PR is also in Trunk's queue**: Trunk will automatically mark it as merged on its side. -- **If the PR is not in Trunk's queue**: Trunk will restart any PRs currently in its queue so they can test on top of the new commit. - -This ensures that Trunk always tests against the latest state of your target branch, regardless of how PRs are merged. - -*** +--- -### Pre-migration +### Pre migration -Before migrating fully, it may be useful to evaluate the workflows quietly and confirm settings before converting your repository to an entirely new workflow. +Before migrating fully, it may be useful to evaluate the workflows "quiety" and confirm settings before converting your repository to an entirely new workflow. Here are some useful steps to get you familiar with the Trunk Merge Queue workflow without disrupting engineers. @@ -65,7 +40,7 @@ This setting is found under **Merge Queue** tab **>** repository **> Settings >* #### Make the switch -Once you have [configured settings](./administration/advanced-settings) and tested out the workflow quietly, turn off other merge tools (like [GitHub merge queue](./migrating-from-github-merge-queue#turn-off-github-merge-queue)), re-enable GitHub comments in the Trunk web app under the **Merge Queue** tab **>** repository **> Settings >** toggle **on GitHub Comments** +Once you have [configured settings](/merge-queue/administration/advanced-settings) and tested out the workflow quietly, turn off other merge tools (like [GitHub merge queue](#turn-off-github-merge-queue)), re-enable GitHub comments in the Trunk web app under the **Merge Queue** tab **>** repository **> Settings >** toggle **on GitHub Comments** It is important that a repository is configured to use ONLY Trunk Merge Queue and no other merge queue tools for best results. @@ -73,9 +48,9 @@ It is important that a repository is configured to use ONLY Trunk Merge Queue an #### Share the news -Now that you have migrated to Trunk Merge Queue, be sure to share the workflow with your team, [using-the-queue](./using-the-queue/ "mention")as a great place to start. +Now that you have migrated to Trunk Merge Queue, be sure to share the workflow with your team, [using-the-queue](/merge-queue/using-the-queue "mention")as a great place to start. -*** +--- ### Getting help diff --git a/merge-queue/optimizations.mdx b/merge-queue/optimizations.mdx new file mode 100644 index 0000000..1a97472 --- /dev/null +++ b/merge-queue/optimizations.mdx @@ -0,0 +1,33 @@ +--- +title: "Optimizations" +description: "The core concept of any merge queue is Predictive Testing: testing your pull request against the head of the main branch, including all pull requests ahead of it in the queue." +--- +The core concept of any merge queue is [**Predictive Testing**](/merge-queue/optimizations/predictive-testing): testing your pull request against the head of the `main` branch, including all pull requests ahead of it in the queue. + +While this is the foundation, achieving the scale necessary to merge thousands of PRs per day requires more advanced strategies. Trunk Merge Queue introduces a suite of powerful concepts designed to maximize throughput and maintain velocity, even in complex, high-traffic repositories. In fact, hitting a high scale is nearly impossible without leveraging features like optimistic merging, pending failure depth, and batching. + +This section explains each of these key concepts: + +#### Throughput and speed + +* [**Batching**](/merge-queue/optimizations/batching): Groups multiple compatible pull requests together into a single test run. This significantly increases merge throughput and can dramatically reduce CI costs by validating an entire batch with a single test run instead of one for each individual pull request. It is an essential feature for achieving high throughput. +* [**Parallel Queues**](/merge-queue/optimizations/parallel-queues): Allows for the creation of multiple independent queues that test and merge PRs in parallel. This feature is necessary for large monorepos and transforms the queue from a simple "line" into a more complex and efficient "graph". +* [**Testing Concurrency**](/merge-queue/administration/advanced-settings#testing-concurrency): A setting that defines the maximum number of pull requests that can be tested simultaneously. Fine-tuning this number is a powerful way to maximize merge velocity. It ensures a continuous flow of validated pull requests by keeping your CI runners fully utilized. + +#### Resilience and flake handling + +* [**Optimistic Merging**](/merge-queue/optimizations/optimistic-merging): Increases merge speed by leveraging test results from pull requests that are later in the queue. When a pull request (e.g., pull request 'c') passes testing, its success also verifies the changes from the pull requests ahead of it ('a' and 'b'). This allows the entire group of pull requests to be safely merged at once. +* [**Pending Failure Depth**](/merge-queue/optimizations/pending-failure-depth): Allows the queue to continue testing subsequent pull requests even if an earlier one fails. Because predictive testing re-tests the failed PR's code along with the subsequent PRs, this feature gives the failed PR additional chances to pass. This prevents a single flaky test from halting all forward progress and makes the queue more resilient to intermittent failures. +* [**Anti-Flake Protection**](/merge-queue/optimizations/anti-flake-protection): Combining Optimistic Merging and Pending Failure Depth makes the queue more resilient to flaky tests. This inherent outcome allows the successful test of a later pull request to retroactively validate an earlier one that failed due to a transient issue. + + +**Note on flaky tests** + +While Anti-Flake Protection provides resilience to flaky tests through queue mechanics, they still delay merges. Trunk Flaky Tests addresses the root cause by automatically [detecting](/flaky-tests/detection) and [quarantining](/flaky-tests/quarantining) flaky tests at runtime while maintaining test visibility. For maximum throughput, [integrate Flaky Tests](/flaky-tests/get-started) to work alongside Anti-Flake Protection. + + +* [**Flaky Tests Quarantining**](/flaky-tests/quarantining) (via [Flaky Tests](/flaky-tests/overview)): Automatically detects and quarantines flaky tests to prevent their failures from blocking the merge queue. Quarantined tests continue running and uploading results for visibility, allowing your team to identify and fix them while eliminating false-negative blockages. This foundation of clean test signals is essential for achieving maximum queue throughput. + +#### Prioritization + +* [**Priority Merging**](/merge-queue/optimizations/priority-merging): Provides the ability to prioritize certain pull requests, allowing urgent changes or hotfixes to bypass the standard queue order and be tested and merged more quickly. diff --git a/merge-queue/optimizations/anti-flake-protection.mdx b/merge-queue/optimizations/anti-flake-protection.mdx index a40b723..ae92f4d 100644 --- a/merge-queue/optimizations/anti-flake-protection.mdx +++ b/merge-queue/optimizations/anti-flake-protection.mdx @@ -1,35 +1,32 @@ --- title: "Anti-flake protection" -description: "Combine optimistic merging and pending failure depth to prevent flaky test failures from blocking the merge queue." +description: "Some CI jobs fail for reasons unrelated to a PR's code change, such as due to flaky tests or a CI runner disconnecting. These failures are usually cleared when the CI job is rerun." --- ### What it is -Some CI jobs fail for reasons unrelated to a PR's code change, such as due to [flaky tests](https://trunk.io/blog/the-ultimate-guide-to-flaky-tests) or a CI runner disconnecting. These failures are usually cleared when the CI job is rerun. If a second PR that depends on the first **does** pass, it is very likely that the first PR was good and experienced a transient failure. +Some CI jobs fail for reasons unrelated to a PR's code change, such as due to [flaky tests](https://trunk.io/blog/the-ultimate-guide-to-flaky-tests) or a CI runner disconnecting. These failures are usually cleared when the CI job is rerun. If a second PR that depends on the first **does** pass, it is very likely that the first PR was good and simply experienced a transient failure. -Trunk Merge Queue can use the combination of [**Optimistic Merging** ](./optimistic-merging)and [**Pending Failure Depth**](./pending-failure-depth) to merge pull requests that would otherwise be rejected from the queue. +Trunk Merge Queue can use the combination of [**Optimistic Merging** ](/merge-queue/optimizations/optimistic-merging)and [**Pending Failure Depth**](/merge-queue/optimizations/pending-failure-depth) to merge pull requests that would otherwise be rejected from the queue. - -If you have a lot of flaky tests in your projects, you should track and fix them with [Trunk Flaky Tests](../../flaky-tests/overview). Anti-flake protection helps reduce the impact of flaky tests but doesn't help you detect, track, and eliminate them. - + +If you have a lot of flaky tests in your projects, you should track and fix them with [Trunk Flaky Tests](/flaky-tests/overview). Anti-flake protection helps reduce the impact of flaky tests but doesn't help you detect, track, and eliminate them. + In the video below, you can see an example of this anti-flake protection: - - - + +Anti-flake protection with optimistic merging + pending failure depth + - - - -
what's happening?queue
A, B, C begin predictive testingmain <- A <- B+a <- C+ba
B fails testingmain <- A <- B+a <- C+ba
predictive failure depth keeps B from being evicted while C testsmain <- A <- B+a (hold) <- C+ba
C passesmain <- A <- B+a <- C+ba
optimistic merging allows A, B, C to mergemerge A B C
+
what's happening?queue
A, B, C begin predictive testingmain <- A <- B+a <- C+ba
B fails testingmain <- A <- B+a <- C+ba
predictive failure depth keeps B from being evicted while C testsmain <- A <- B+a (hold) <- C+ba
C passesmain <- A <- B+a <- C+ba
optimistic merging allows A, B, C to mergemerge A B C
-Optimistic Merging only works when the [Pending Failure Depth](./anti-flake-protection#pending-failure-depth) is set to **a value greater than zero**. When zero or disabled, Merge will not hold any failed tests in the queue. +Optimistic Merging only works when the [Pending Failure Depth](#pending-failure-depth) is set to **a value greater than zero**. When zero or disabled, Merge will not hold any failed tests in the queue. ### Why use it -* **Eliminate false negatives** - Flaky tests frequently cause PR failures unrelated to actual code changes. Anti-flake protection helps get these under control, so developers don't waste time investigating non-issues. +* **Eliminate false negatives** - Flaky tests cause 20-40% of PR failures in typical pipelines. Anti-flake protection helps get these under control, so developers don't waste time investigating non-issues. * **Maintain developer confidence** - When the queue rejects PRs for real reasons (not flaky tests), developers trust the system. Reduces "it's probably just flaky" dismissiveness of real failures. * **Reduce manual retries** - Developers don't need to manually resubmit PRs or click "retry" when tests flake. Trunk handles it automatically, saving time and frustration. * **Keep queue moving** - Flaky tests don't stall the queue. PRs that would have been blocked by transient failures merge successfully, increasing overall throughput. @@ -37,7 +34,7 @@ Optimistic Merging only works when the [Pending Failure Depth](./anti-flake-prot ### How to enable -Anti Flake Protection is active when [**Optimistic Merge Queue**](./optimistic-merging) is **On** and [**Pending Failure Depth**](./pending-failure-depth) is **set to a value greater than zero** +Anti Flake Protection is active when [**Optimistic Merge Queue**](/merge-queue/optimizations/optimistic-merging) is **On** and [**Pending Failure Depth**](/merge-queue/optimizations/pending-failure-depth) is **set to a value greater than zero** Enable Optimistic merging in **Settings** > **Repositories** > your repository > **Merge Queue** > toggle **On** **Optimistic Merge Queue**. @@ -64,7 +61,7 @@ Configure Pending Failure Depth in **Settings** > **Repositories** > your reposi Don't enable anti-flake protection if: -* **Your tests are not flaky (< 2% flake rate)** - No benefit, only cost +* **Your tests are not flaky (< 2% flake rate)** - No benefit, only cost * **CI resources are extremely limited** - Retries double test costs for flaky PRs * **You're actively fixing flaky tests** - Better to fix than to mask * **Flaky tests indicate real issues** - Sometimes "flaky" failures reveal race conditions or timing issues in your code @@ -89,7 +86,7 @@ Do enable anti-flake protection when: 1. **Enable anti-flake protection** - Unblock your team immediately 2. **Identify flaky tests** - Use CI analytics to find which tests flake most 3. **Fix the root causes** - Make tests deterministic, add retries at test level, improve infrastructure -4. **Reduce flake rate over time** - Goal should be < 2% flake rate +4. **Reduce flake rate over time** - Goal should be < 2% flake rate 5. **Consider disabling** - Once tests are stable, anti-flake protection becomes unnecessary **Red flags indicating systemic issues:** @@ -111,4 +108,4 @@ Do enable anti-flake protection when: ### Next Steps -If you have a lot of flaky tests in your projects, you should track and fix them with [Trunk Flaky Tests](../../flaky-tests/overview). Anti-flake protection helps reduce the impact of flaky tests but doesn't help you detect, track, and eliminate them. +If you have a lot of flaky tests in your projects, you should track and fix them with [Trunk Flaky Tests](/flaky-tests/overview). Anti-flake protection helps reduce the impact of flaky tests but doesn't help you detect, track, and eliminate them. diff --git a/merge-queue/optimizations/batching.mdx b/merge-queue/optimizations/batching.mdx index 362d09a..7c336bf 100644 --- a/merge-queue/optimizations/batching.mdx +++ b/merge-queue/optimizations/batching.mdx @@ -1,21 +1,21 @@ --- title: "Batching" -description: "Test multiple PRs together as a single unit to increase merge throughput and reduce CI costs." +description: "Batching allows Trunk Merge Queue to test multiple pull requests together as a single unit, rather than testing them one at a time." --- -## What it is +### What it is -Batching allows Trunk Merge Queue to test multiple pull requests together as a single unit, rather than testing them one at a time. +Batching allows Trunk Merge Queue to test multiple pull requests together as a single unit, rather than testing them one at a time. When batching is enabled, Trunk intelligently groups compatible PRs and runs your test suite once for the entire batch. If the batch passes, all PRs in the batch merge together, dramatically reducing total test time. -## Why use it +### Why use it * **Reduce total test time by 60-80%** - Instead of running your full test suite 10 times for 10 PRs, you run it 2-3 times for the same PRs grouped into batches. More PRs merged with less CI time. * **Increase merge throughput** - Process 3-5x more PRs per hour compared to testing individually. A queue that handled 20 PRs/hour can now handle 60-100 PRs/hour with batching. * **Lower CI costs** - Fewer test runs means lower CI/CD infrastructure costs. Teams report 50-70% reduction in CI minutes consumed by merge queue testing. * **Faster time-to-production** - PRs spend less time waiting in queue. What used to take hours can now take minutes, getting features and fixes to production faster. -## How to enable +### How to enable Batching is **disabled by default** and must be explicitly enabled. @@ -34,7 +34,7 @@ With Batching enabled, you can configure two options: A good place to start is with the defaults, Maximum wait time set to 5 (minutes) and Target batch size set to 4 (PRs). -## Excluding PRs from Batching +### Excluding PRs from Batching Sometimes you need a specific PR to test in isolation, even when batching is enabled for your queue. You can prevent individual PRs from batching without changing your overall batching configuration. @@ -42,7 +42,7 @@ Sometimes you need a specific PR to test in isolation, even when batching is ena * **High-risk changes** — Infrastructure updates, database migrations, or changes that could affect other PRs in unpredictable ways * **Debugging batch failures** — Isolate a suspected problematic PR to confirm it tests correctly on its own -* **Critical hotfixes** — Make sure a time-sensitive fix isn't delayed or affected by other PRs in a batch +* **Critical hotfixes** — Ensure a time-sensitive fix isn't delayed or affected by other PRs in a batch * **Flaky PR isolation** — Test a PR with known flaky behavior separately to avoid impacting other PRs #### How to exclude a PR from batching @@ -57,7 +57,7 @@ Add the `--no-batch` flag when submitting your PR: **Option 2: Using the API** -Set `noBatch: true` when calling the [`/submitPullRequest`](../reference/merge#post-submitpullrequest) endpoint: +Set `noBatch: true` when calling the [`/submitPullRequest`](/merge-queue/reference/merge#post-submitpullrequest) endpoint: ```bash curl -X POST https://api.trunk.io/v1/submitPullRequest \ @@ -90,13 +90,11 @@ When a PR is submitted with no-batch: Excluding a PR from batching only affects that specific PR. Your queue's batching settings and other PRs remain unaffected.
-## Bisection Testing Concurrency +### Bisection Testing Concurrency When a batch fails, Trunk automatically splits it apart (bisects) to identify which PR caused the failure. You can configure a separate, higher concurrency limit specifically for these bisection tests to isolate failures faster without impacting your main queue. - -![](/assets/1768426960-batching-settings.avif) - +
#### Why Separate Bisection Concurrency? @@ -120,13 +118,17 @@ When you set a higher bisection concurrency: 2. **Bisection concurrency** controls how many PRs test simultaneously during failure isolation 3. Both run independently - bisection tests don't count against your main queue limit - +
+ +Example scenario: + * Main queue concurrency: 5 * Bisection concurrency: 15 * Batch `ABCD` fails and needs to be split The bisection process can spin up 15 test runners to quickly isolate which PR failed, while your main queue continues processing 5 PRs normally. Developers get faster feedback about failures without slowing down successful merges. - + +
#### Configuring Bisection Concurrency @@ -140,21 +142,31 @@ Navigate to **Settings** > **Repositories** > your repository > **Merge Queue** #### Recommended Settings + + * Main queue concurrency: 5 * Bisection concurrency: 10 * Good for: Teams managing CI costs carefully + + + * Main queue concurrency: 10 * Bisection concurrency: 25 * Good for: Teams with moderate CI capacity + + + * Main queue concurrency: 25 * Bisection concurrency: 50 * Good for: Teams prioritizing fast feedback over CI costs + + #### When to Use Higher Bisection Concurrency @@ -193,9 +205,7 @@ Start with bisection concurrency 2x your main queue concurrency, monitor the imp ❌ **Don't set lower than main queue** - This defeats the purpose and slows down bisection - - -## Test Caching During Bisection +### Test Caching During Bisection When a batch fails and Trunk splits it apart to identify the failing PR, the merge queue intelligently reuses test results it has already collected during the bisection process. This avoids redundant CI runs and speeds up failure isolation. @@ -203,7 +213,10 @@ When a batch fails and Trunk splits it apart to identify the failing PR, the mer During bisection, Trunk maintains a cache of test results as it progressively splits the failed batch. If the queue knows with certainty that a particular combination of PRs will fail (because it already tested that exact combination earlier in the bisection process), it skips running the test again and reuses the previous result. - +
+ +Example bisection with test caching + 1. Batch `ABCD` fails testing (main ← ABCD) 2. Trunk splits the batch: `AB` and `CD` 3. Tests `AB` (passes) and `CD` (fails) @@ -212,7 +225,8 @@ During bisection, Trunk maintains a cache of test results as it progressively sp 6. If `main ← ABCD` failed and `main ← AB` passed, Trunk knows `CD` contains the failure 7. When testing `main ← AB ← C`, if this combination was already tested earlier, reuse that result 8. Skip redundant CI runs and identify the failing PR faster - + +
#### Benefits @@ -289,7 +303,7 @@ Together, these features create a highly efficient batch failure recovery system **Note:** Test caching for batch failure isolation is automatically enabled for all repositories using batching mode. No configuration is required.
-## Fine tuning batch sizes +### Fine tuning batch sizes **Signs your batch size is too large:** @@ -309,22 +323,22 @@ Together, these features create a highly efficient batch failure recovery system * Test stability (more flaky tests = smaller batches) * PR submission rate (more PRs = larger batches) -## Tradeoffs and considerations +### Tradeoffs and considerations -The downsides here are very limited. Since batching combines multiple pull requests into one, you essentially give up the proof that every pull request in complete isolation can safely be merged into your protected branch. +The downsides here are very limited. Since batching combines multiple pull requests into one, you essentially give up the proof that every pull request in complete isolation can safely be merged into your protected branch. -In the unlikely case that you have to revert a change from your protected branch or do a rollback, you will need to retest that revert or submit it to the queue to make sure nothing has broken. In practice, this re-testing is required in almost any case, regardless of how it was originally merged, and the downsides are fairly limited. +In the unlikely case that you have to revert a change from your protected branch or do a rollback, you will need to retest that revert or submit it to the queue to ensure nothing has broken. In practice, this re-testing is required in almost any case, regardless of how it was originally merged, and the downsides are fairly limited. #### Common misconceptions -* **Misconception:** "Batching merges multiple PRs into a single commit" +* **Misconception:** "Batching merges multiple PRs into a single commit" * **Reality:** No! Each PR is still merged as a separate commit. Batching only affects testing, not merging. -* **Misconception:** "If a batch fails, all PRs in the batch fail" +* **Misconception:** "If a batch fails, all PRs in the batch fail" * **Reality:** Trunk automatically splits the batch and retests to identify only the failing PR(s). Passing PRs still merge. -* **Misconception:** "Batching always makes the queue faster" +* **Misconception:** "Batching always makes the queue faster" * **Reality:** Batching is most effective with stable tests and high PR volume. For low-traffic repos or flaky tests, the overhead may outweigh benefits. -## Related features +### Related features Batching works exceptionally well with these optimizations: @@ -332,45 +346,45 @@ Batching works exceptionally well with these optimizations: **Optimistic merging** - While a batch is testing, the next batch can begin forming and testing optimistically. Combining batching with optimistic merging provides maximum throughput. Configure both for best results. -**Pending failure depth** - When a batch fails, [pending failure depth](./pending-failure-depth) controls how many successor test runs the system waits on before transitioning the failed batch. Combined with optimistic merging, this can prevent premature bisection of a batch that only failed due to a transient issue. +**Pending failure depth** - When a batch fails and is being split/retested, pending failure depth controls how many other PRs can test simultaneously. Higher pending failure depth helps maintain throughput during batch failures. **Anti-flake protection** - Essential companion to batching. Reduces false batch failures caused by flaky tests, making batching more reliable and efficient. -## Batching + Optimistic Merging and Pending Failure Depth +### Batching + Optimistic Merging and Pending Failure Depth -Enabling batching along with Pending Failure Depth and Optimistic Merging can help you realize the major cost savings of batching while still reaping the [anti-flake](./anti-flake-protection) protection of optimistic merging and pending failure depth. +Enabling batching along with Pending Failure Depth and Optimistic Merging can help you realize the major cost savings of batching while still reaping the [anti-flake](/merge-queue/optimizations/anti-flake-protection) protection of optimistic merging and pending failure depth. - - - + +example of testing pull requests in batches of 3 + -
eventqueue
Enqueue A, B, C, D, E, F, Gmain <- ABC <- DEF +abc
Batch ABC failsmain <- ABC
pending failure depth keeps ABC from being evicted while DEFmain <- ABC (hold) <- DEF+abc
DEF passesmain <- ABC <- DEF+abc
optimistic merging allows ABC and DEF to mergemerge ABC, DEF
+
eventqueue
Enqueue A, B, C, D, E, F, Gmain <- ABC <- DEF +abc
Batch ABC failsmain <- ABC
pending failure depth keeps ABC from being evicted while DEFmain <- ABC (hold) <- DEF+abc
DEF passesmain <- ABC <- DEF+abc
optimistic merging allows ABC and DEF to mergemerge ABC, DEF
Combined, Pending Failure Depth, Optimistic Merging, and Batching can greatly improve your CI performance because now Merge can optimistically merge whole batches of PRs with far less wasted testing. -## Next steps +### Next steps **Start with batching:** 1. Enable batching with conservative settings (batch size: 3-5) 2. Monitor for a few days and observe behavior 3. Gradually increase batch size as you gain confidence -4. Check [Metrics and monitoring](../administration/metrics) to measure impact +4. Check [Metrics and monitoring](/merge-queue/administration/metrics) to measure impact **Optimize further:** -* [Optimistic merging](./optimistic-merging) - Combine with batching for maximum throughput -* [Anti-flake protection](./anti-flake-protection) - Reduce false batch failures -* [Pending failure depth](./pending-failure-depth) - Tune behavior during batch failures +* [Optimistic merging](/merge-queue/optimizations/optimistic-merging) - Combine with batching for maximum throughput +* [Anti-flake protection](/merge-queue/optimizations/anti-flake-protection) - Reduce false batch failures +* [Pending failure depth](/merge-queue/optimizations/pending-failure-depth) - Tune behavior during batch failures **Monitor performance:** -* [Metrics and monitoring](../administration/metrics) - Track throughput improvements and CI cost savings -* Watch batch failure rate (should be \<10%) +* [Metrics and monitoring](/merge-queue/administration/metrics) - Track throughput improvements and CI cost savings +* Watch batch failure rate (should be <10%) * Measure time-to-merge improvements **Troubleshoot issues:** -* If batches fail frequently → Lower batch size or enable [Anti-flake protection](./anti-flake-protection) +* If batches fail frequently → Lower batch size or enable [Anti-flake protection](/merge-queue/optimizations/anti-flake-protection) * If not seeing improvements → Check PR volume and test stability -* For detailed help → [Troubleshooting](../reference/troubleshooting) +* For detailed help → [Troubleshooting](/merge-queue/reference/troubleshooting) diff --git a/merge-queue/optimizations/direct-merge-to-main.mdx b/merge-queue/optimizations/direct-merge-to-main.mdx index b7380b0..202d326 100644 --- a/merge-queue/optimizations/direct-merge-to-main.mdx +++ b/merge-queue/optimizations/direct-merge-to-main.mdx @@ -1,6 +1,6 @@ --- title: "Direct merge to main" -description: "Skip redundant retesting and merge PRs directly when they are already tested against the current tip of main." +description: "Direct Merge to Main is an optimization that allows PRs to merge immediately without waiting in the queue when retesting would provide no value." --- ### Overview @@ -41,7 +41,10 @@ Direct Merge to Main only activates when **ALL** of these conditions are met: If any of these conditions are not met, the PR enters the queue normally and tests predictively as usual. - +
+ +Example Scenarios + **Scenario 1: Perfect candidate for Direct Merge** * Developer updates their PR to tip of main using "Update branch" on GitHub @@ -69,7 +72,8 @@ If any of these conditions are not met, the PR enters the queue normally and tes * Queue is empty * But CI checks are still running or failed * **Result:** PR cannot enter queue until checks pass - + +
### When to Enable @@ -84,7 +88,7 @@ If any of these conditions are not met, the PR enters the queue normally and tes * You rarely keep PRs up-to-date with main (feature won't trigger often) * You want every PR to test in the queue regardless (for additional validation) -* Your tests are very fast (< 1 minute) and the optimization is negligible +* Your tests are very fast (< 1 minute) and the optimization is negligible ### Configuration @@ -95,9 +99,7 @@ If any of these conditions are not met, the PR enters the queue normally and tes 3. Enable the setting 4. Changes take effect immediately - -![](/assets/1768426934-direct-merge-mode-toggle.avif) - +
#### Verify It's Working @@ -111,7 +113,7 @@ When a PR is directly merged, you'll see different timeline messages and notific > "This PR was merged directly to main because it was already up-to-date and the queue was empty." -**In** [**Slack notifications**](../integration-for-slack) **(if configured):** +**In** [**Slack notifications**](/merge-queue/integration-for-slack) **(if configured):** > "✅ PR #123 merged directly (was up-to-date, queue empty)" @@ -121,25 +123,25 @@ These messages confirm that the optimization triggered and your PR skipped the q Direct Merge to Main complements other optimizations: -[**Predictive Testing**](./predictive-testing) +[**Predictive Testing**](/merge-queue/optimizations/predictive-testing) * When direct merge doesn't trigger, predictive testing takes over * PRs not at tip of main test against predicted future state -* Both features work together: direct merge handles the tip, predictive testing handles the rest +* Both features work together seamlessly -[**Optimistic Merging**](./optimistic-merging) +[**Optimistic Merging**](/merge-queue/optimizations/optimistic-merging) * Optimistic merging handles PRs deeper in queue * Direct merge handles the special case at the front * Both reduce unnecessary waiting -[**Batching**](./batching) +[**Batching**](/merge-queue/optimizations/batching) * If queue has batching enabled and isn't empty, direct merge won't trigger * Batching takes priority when multiple PRs are present * Direct merge is for the empty queue case -[**Parallel Queues**](./parallel-queues/) +[**Parallel Queues**](/merge-queue/optimizations/parallel-queues) * Works in both Single and Parallel mode * In parallel mode, checks if PR's specific lane is empty @@ -147,8 +149,10 @@ Direct Merge to Main complements other optimizations: ### Troubleshooting - - +
+ +Why didn't my PR merge directly? + Check these conditions: 1. Was your PR based on the tip of main? (Check GitHub branch status) @@ -157,22 +161,34 @@ Check these conditions: 4. Is Direct Merge to Main enabled? (Check Merge Queue settings) If all conditions were met but direct merge didn't happen, contact support with the PR number. - - +
+ +
+ +Does this bypass security checks? + No. Direct merge only skips the queue testing step. Your PR must still: * Pass all required status checks on GitHub * Meet all branch protection requirements * Have the necessary approvals * Be based on the latest main branch - - +
+ +
+ +Will this slow down other PRs? + No. Direct merge only happens when the queue is empty, so there are no other PRs to slow down. When other PRs are present, direct merge doesn't trigger and the queue operates normally. - - +
+ +
+ +What if tests are flaky? + Direct merge relies on the tests that ran on your PR branch (before entering the queue). If those tests are flaky and gave a false positive, the issue existed before direct merge. Focus on fixing flaky tests rather than disabling the optimization. - - + +
diff --git a/merge-queue/optimizations/index.mdx b/merge-queue/optimizations/index.mdx deleted file mode 100644 index aeed473..0000000 --- a/merge-queue/optimizations/index.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: "Optimizations" -description: "Advanced features that increase merge throughput, handle flaky tests, and prioritize critical PRs in Trunk Merge Queue." ---- -The core concept of any merge queue is [**Predictive Testing**](./predictive-testing): testing your pull request against the head of the `main` branch, including all pull requests ahead of it in the queue. - -While this is the foundation, achieving the scale necessary to merge thousands of PRs per day requires more advanced strategies. Trunk Merge Queue introduces a set of features designed to maximize throughput and maintain velocity, even in complex, high-traffic repositories. In fact, hitting a high scale is nearly impossible without features like optimistic merging, pending failure depth, and batching. - -This section explains each of these key concepts: - -#### Throughput and speed - -* [**Batching**](./batching): Groups multiple compatible pull requests together into a single test run. This significantly increases merge throughput and can dramatically reduce CI costs by validating an entire batch with a single test run instead of one for each individual pull request. It is an essential feature for achieving high throughput. -* [**Parallel Queues**](./parallel-queues/): Allows for the creation of multiple independent queues that test and merge PRs in parallel. This feature is necessary for large monorepos and transforms the queue from a simple "line" into a more complex and efficient "graph". -* [**Testing Concurrency**](../administration/advanced-settings#testing-concurrency): A setting that defines the maximum number of pull requests that can be tested simultaneously. Fine-tuning this number maximizes merge velocity. It keeps a continuous flow of validated pull requests moving by keeping your CI runners fully utilized. - -#### Resilience and flake handling - -* [**Optimistic Merging**](./optimistic-merging): Increases merge speed by using test results from pull requests that are later in the queue. When a pull request (e.g., pull request 'c') passes testing, its success also verifies the changes from the pull requests ahead of it ('a' and 'b'). This allows the entire group of pull requests to be safely merged at once. -* [**Pending Failure Depth**](./pending-failure-depth): When a group fails testing, it enters a Pending Failure state and waits for successor test runs to complete before transitioning. When combined with Optimistic Merging, a passing successor can retroactively clear the failure, enabling automated recovery from transient (flaky) failures without evicting the group from the queue. -* [**Anti-Flake Protection**](./anti-flake-protection): Combining Optimistic Merging and Pending Failure Depth makes the queue more resilient to flaky tests. This inherent outcome allows the successful test of a later pull request to retroactively validate an earlier one that failed due to a transient issue. - - -**Note on flaky tests** - -While Anti-Flake Protection provides resilience to flaky tests through queue mechanics, they still delay merges. Trunk Flaky Tests addresses the root cause by automatically [detecting](../../flaky-tests/detection/index) and [quarantining](../../flaky-tests/quarantining/) flaky tests at runtime while maintaining test visibility. For maximum throughput, [integrate Flaky Tests](../../flaky-tests/get-started/) to work alongside Anti-Flake Protection. - - -* [**Flaky Tests Quarantining**](../../flaky-tests/quarantining/index) (via [Flaky Tests](../../flaky-tests/overview)): Automatically detects and quarantines flaky tests to prevent their failures from blocking the merge queue. Quarantined tests continue running and uploading results for visibility, allowing your team to identify and fix them while eliminating false-negative blockages. This foundation of clean test signals is essential for achieving maximum queue throughput. - -#### Prioritization - -* [**Priority Merging**](./priority-merging): Provides the ability to prioritize certain pull requests, allowing urgent changes or hotfixes to bypass the standard queue order and be tested and merged more quickly. diff --git a/merge-queue/optimizations/optimistic-merging.mdx b/merge-queue/optimizations/optimistic-merging.mdx index 4d4f292..c7beaf3 100644 --- a/merge-queue/optimizations/optimistic-merging.mdx +++ b/merge-queue/optimizations/optimistic-merging.mdx @@ -1,20 +1,18 @@ --- title: "Optimistic merging" -description: "Merge PRs faster by using passing test results from later PRs in the queue to validate earlier ones." +description: "Optimistic merging allows pull requests that fail tests to still get merged if pull requests behind them in the queue pass their tests. The assumption is that the queue has proof t" --- ### What it is -Optimistic merging allows pull requests that fail tests to still get merged if pull requests behind them in the queue pass _their_ tests. The assumption is that the queue has proof that while one specific PR might fail tests, it passes them when combined with a pull request that is going to merge soon behind it. +Optimistic merging allows pull requests that fail tests to still get merged if pull requests behind them in the queue pass *their* tests. The assumption is that the queue has proof that while one specific PR might fail tests, it passes them when combined with a pull request that is going to merge soon behind it. -The foundation of our merge queue starts with [predictive testing](/broken/pages/BAKgbuxqWos5o4kna99T). When a predictive test is being run, concurrent tests sometimes finish before the work ahead of it. This creates a situation where the system knows that all code ahead of it collectively `passes` tests, and it is safe to merge all those changes into your protected branch (`main)`.\ +The foundation of our merge queue starts with [predictive testing](/merge-queue/optimizations/predictive-testing). When a predictive test is being run, concurrent tests sometimes finish before the work ahead of it. This creates a situation where the system knows that all code ahead of it collectively `passes` tests, and it is safe to merge all those changes into your protected branch (`main)`.\ \ -With optimistic merging enabled, the queue uses results from pull requests later in the queue to merge faster. In the illustration below you can see that pull request 'c' includes the verified testing results of pull requests 'b' and 'a'. As soon as 'c' passes testing, we can safely merge 'a', 'b', and 'c' and know they will all work correctly together. - - - - - +With optimistic merging enabled, we can leverage results from pull requests later in the queue to merge faster. In the illustration below you can see that pull request 'c' includes the verified testing results of pull requests 'b' and 'a'. As soon as 'c' passes testing, we can safely merge 'a', 'b', and 'c' and know they will all work correctly together. + +Optimistic merging to merge faster + ### Why use it @@ -45,7 +43,7 @@ After enabling, watch your queue: The downsides here are very limited. You essentially give up the proof that every pull request in complete isolation can safely be merged into your protected branch. -In the unlikely case that you have to revert a change from your protected branch, you will need to retest that revert or submit it to the queue to make sure nothing has broken. In practice, this re-testing is required in almost any case, regardless of how it was originally merged, and the downsides are fairly limited. +In the unlikely case that you have to revert a change from your protected branch, you will need to retest that revert or submit it to the queue to ensure nothing has broken. In practice, this re-testing is required in almost any case, regardless of how it was originally merged, and the downsides are fairly limited. #### What you gain @@ -77,16 +75,16 @@ Don't enable optimistic merging if: **Combine with other optimizations:** Optimistic merging works best alongside: -* [Batching](./batching) - Test batches optimistically -* [Predictive testing](./predictive-testing) - Required foundation for optimistic merging -* [Anti-flake protection](/broken/pages/eP3tevVEeuSPwyO2f6yo) - Reduces unnecessary retests +* [Batching](/merge-queue/optimizations/batching) - Test batches optimistically +* [Predictive testing](/merge-queue/optimizations/predictive-testing) - Required foundation for optimistic merging +* [Anti-flake protection](/merge-queue/optimizations/anti-flake-protection) - Reduces unnecessary retests #### Common misconceptions * **Misconception:** "Optimistic merging is risky - it might merge broken code" - * **Reality:** No! Trunk still requires all tests to pass. Optimistic merging only affects _when_ testing starts, not _whether_ testing happens. Safety is never compromised. + * **Reality:** No! Trunk still requires all tests to pass. Optimistic merging only affects *when* testing starts, not *whether* testing happens. Safety is never compromised. * **Misconception:** "Optimistic merging causes lots of wasted retests" - * **Reality:** Retests are rare (< 5% of PRs in typical queues). The throughput gains far outweigh the occasional retest cost. + * **Reality:** Retests are rare (< 5% of PRs in typical queues). The throughput gains far outweigh the occasional retest cost. * **Misconception:** "I should enable every optimization immediately" * **Reality:** Start with just predictive testing. Add batching once stable. Add optimistic merging last. Build confidence in each layer. @@ -94,24 +92,24 @@ Don't enable optimistic merging if: **Before enabling optimistic merging:** -1. Make sure basic queue is working well -2. Verify test stability (< 5% flake rate recommended) -3. Enable [Anti-flake protection](./anti-flake-protection) first +1. Ensure basic queue is working well +2. Verify test stability (< 5% flake rate recommended) +3. Enable [Anti-flake protection](/merge-queue/optimizations/anti-flake-protection) first 4. Check that you have consistent PR volume **After enabling:** -* [Metrics and monitoring](../administration/metrics) - Track throughput improvements -* Watch for retest rate (should be < 5%) +* [Metrics and monitoring](/merge-queue/administration/metrics) - Track throughput improvements +* Watch for retest rate (should be < 5%) * Measure time-to-merge improvements **Optimize further:** -* [Batching](./batching) - Combine with optimistic merging for maximum effect -* [Pending failure depth](./pending-failure-depth) - Fine-tune simultaneous testing behavior +* [Batching](/merge-queue/optimizations/batching) - Combine with optimistic merging for maximum effect +* [Pending failure depth](/merge-queue/optimizations/pending-failure-depth) - Fine-tune simultaneous testing behavior **Troubleshooting:** * If seeing frequent retests → Check test stability or disable temporarily * If not seeing improvements → Check PR volume and queue activity -* For detailed help → [Troubleshooting](../reference/troubleshooting) +* For detailed help → [Troubleshooting](/merge-queue/reference/troubleshooting) diff --git a/merge-queue/optimizations/parallel-queues/index.mdx b/merge-queue/optimizations/parallel-queues.mdx similarity index 51% rename from merge-queue/optimizations/parallel-queues/index.mdx rename to merge-queue/optimizations/parallel-queues.mdx index 9e0175c..eb0b469 100644 --- a/merge-queue/optimizations/parallel-queues/index.mdx +++ b/merge-queue/optimizations/parallel-queues.mdx @@ -2,14 +2,13 @@ title: "Parallel queues" description: "Create dynamic parallel queues to reduce queue time" --- +Normally, a merge queue behaves by enqueueing all submitted pull requests into a single line. Under this mode of operation, every pull request is [predictively tested ](/merge-queue/optimizations/predictive-testing)against the pull requests ahead of it. While this guarantees the correctness of the protected branch at all times, under a high submission load, the wait time for an item in the queue can be negatively impacted.\ +\ +A regular merge queue operates like a grocery store with only a single checkout lane. When a lot of folks are trying to checkout at the same time - the line will grow (sometimes intolerably). With a dynamic parallel queue, trunk merge creates additional checkout lanes in real-time while still guaranteeing that the protected branch doesn't break.​  -Normally, a merge queue behaves by enqueueing all submitted pull requests into a single line. Under this mode of operation, every pull request is [predictively tested ](/broken/pages/BAKgbuxqWos5o4kna99T)against the pull requests ahead of it. While this guarantees the correctness of the protected branch at all times, under a high submission load, the wait time for an item in the queue can be negatively impacted. - -A regular merge queue operates like a grocery store with only a single checkout lane. When a lot of folks are trying to checkout at the same time - the line will grow (sometimes intolerably). With a dynamic parallel queue, trunk merge creates additional checkout lanes in real-time while still guaranteeing that the protected branch doesn't break.​ - - - - + +track impacted code of each pull request to create dynamic queues + For example, the following four pull requests: @@ -18,18 +17,16 @@ For example, the following four pull requests: * PR C with impacted target list `[ frontend, backend]` * PR D with impacted target list `[ docs]` -Without parallelization, the PRs **A**, **B**, **C**, and **D** would all be tested in a single predictive path **A** \<- **B** \<- **C** \<- **D**. Using the impacted target information we can instead build three dynamically provisioned queues and the predictive testing can yield higher throughput - which means your pull request spends less time in the queue stuck testing with unrelated code changes. +Without parallelization, the PRs **A**, **B**, **C**, and **D** would all be tested in a single predictive path **A** <- **B** <- **C** <- **D**. Using the impacted target information we can instead build three dynamically provisioned queues and the predictive testing can yield higher throughput - which means your pull request spends less time in the queue stuck testing with unrelated code changes. - -Three Dynamic Parallel Queues - +Three Dynamic Parallel Queues #### **How does it work?** -To run in parallel mode, each pull request needs to be inspected for its impacted targets. This is a fancy way of saying that each pull request needs to report what parts of the codebase are changing. - -In the example above, the pull requests **A**, **B**, and **D** can be tested in isolation since they affect distinct targets - `backend`, `frontend` and `docs`. The **C** pull request affects both `frontend` and `backend` and would be tested predictively with the changes in both **A** and **B**. - +To run in parallel mode, each pull request needs to be inspected for its impacted targets. This is a fancy way of saying that each pull request needs to report what parts of the codebase are changing.\ +\ +In the example above, the pull requests **A**, **B**, and **D** can be tested in isolation since they affect distinct targets - `backend`, `frontend` and `docs`. The **C** pull request affects both `frontend` and `backend` and would be tested predictively with the changes in both **A** and **B**.\ +\ To understand the interactions or dependent changes between pull requests, Trunk Merge Queue provides an API for posting the list of **impacted targets** that result from code changes in every PR. When Trunk Merge Queue is running in parallel mode, pull requests will not be processed until the list of impacted targets are uploaded. #### **What are Impacted Targets?** @@ -40,37 +37,27 @@ Impacted targets are metadata that describe the logical changes of a pull reques We ship several pre-built solutions for popular build systems to automatically calculate and post the impacted targets of a pull request. If you are using another build system, we would be happy to work with you to add support for your specific build system. - - - - - +
Bazelbazel-dark.pngbazel
NxNX.pngnx
OtherGroup 1277.pngapi
**Enable Parallel Modes**\ Merge can be swapped between `Single` and `Parallel` mode at any time. If there are no PRs in the merge queue when switching, the switch will be immediate. If there are PRs in the queue, then Merge will go into the `Switching Modes` state, where it'll wait for all currently testing PRs to merge before switching modes. During this time, PRs will not be able to enter the queue. Switching modes can be done from the `Merge Queue Mode` section of the `Settings > Repositories > repo name > Merge` panel - -![](/assets/enable-parallel-mode) - +

enabling parallel mode

**Find your Trunk API Token** + +Explore the interactive walkthrough in a new tab. + + #### Store your Organization Token as a GitHub Secret + +Explore the interactive walkthrough in a new tab. + + ### Monitoring Parallel Queue Performance Once you've enabled parallel mode and configured impacted targets, you can analyze how well the parallel workflow performs for different parts of your codebase. @@ -83,5 +70,5 @@ The Health dashboard allows you to filter all metrics by impacted targets, so yo * Demonstrate the value of parallel mode to engineering leadership -See [Filter Metrics by Impacted Targets ](../../administration/metrics#filter-metrics-by-impacted-targets)for detailed guidance on using this feature. +See [Filter Metrics by Impacted Targets ](/merge-queue/administration/metrics#filter-metrics-by-impacted-targets)for detailed guidance on using this feature. diff --git a/merge-queue/optimizations/parallel-queues/api.mdx b/merge-queue/optimizations/parallel-queues/api.mdx index 9a3ca8a..3ce7292 100644 --- a/merge-queue/optimizations/parallel-queues/api.mdx +++ b/merge-queue/optimizations/parallel-queues/api.mdx @@ -1,5 +1,5 @@ --- -title: "API" +title: "Custom Build Systems" description: "Upload custom list of impacted targets" --- Impacted Targets should be computed for every PR. The list of impacted targets should be computed by comparing two different SHAs: the **head of the target branch**, and the **merge commit of the pr**. @@ -34,7 +34,7 @@ BODY: { `impactedTargets` allows specifying either an array of strings representing the impacted targets from the PR or the string "ALL" (note that this is explicitly not in an array and is just the string "ALL"). Specifying "ALL" is the equivalent of saying that everything that comes into the graph after this PR should be based on this one, which is useful when your PR contains changes that affect the whole repo (such as editing `trunk.yaml` or a GitHub workflow). **Handling Forked Pull Requests**\ -The HTTP POST must contain the `x-api-token` to prove that it is a valid request from a workflow your org controls. _Workflows that come from forked PRs most likely will not have access to the Trunk org token_ required for the HTTP POST above. In this case, you should provide the **run ID** of the workflow as the `x-forked-workflow-run-id` header in place of the `x-api-token`. This ID can be obtained from [the GitHub context](https://docs.github.com/en/actions/learn-github-actions/contexts#github-context) as `${{ github.run_id }}`. Trunk Merge Queue will verify that the ID belongs to a currently running workflow originating from a forked PR with a SHA that matches the one provided in the request and allow it through. +The HTTP POST must contain the `x-api-token` to prove that it is a valid request from a workflow your org controls. *Workflows that come from forked PRs most likely will not have access to the Trunk org token* required for the HTTP POST above. In this case, you should provide the **run ID** of the workflow as the `x-forked-workflow-run-id` header in place of the `x-api-token`. This ID can be obtained from [the GitHub context](https://docs.github.com/en/actions/learn-github-actions/contexts#github-context) as `${{ github.run_id }}`. Trunk Merge Queue will verify that the ID belongs to a currently running workflow originating from a forked PR with a SHA that matches the one provided in the request and allow it through. We do not recommend using an event trigger like `pull_request_target.` This would allow workflows from forked PRs to get secrets, which is a security risk and would open your repo to attackers making forks, adding malicious code, and then running it against your repo to exfiltrate information. (see[ Keeping your GitHub Actions and workflows secure](https://securitylab.github.com/research/github-actions-preventing-pwn-requests/)). diff --git a/merge-queue/optimizations/parallel-queues/bazel.mdx b/merge-queue/optimizations/parallel-queues/bazel.mdx index f16ffa0..eb90182 100644 --- a/merge-queue/optimizations/parallel-queues/bazel.mdx +++ b/merge-queue/optimizations/parallel-queues/bazel.mdx @@ -2,7 +2,7 @@ title: "Bazel" description: "Instructions for enabled dynamic parallel queues powered by your bazel graph" --- -Leveraging [parallel mode](../../merge-queue#single-mode-vs.-parallel-mode) for Trunk Merge Queue is easy for Bazel-enabled repos because Bazel already knows the structure of your code and can automatically generate a dependency graph. Merge can use this information in parallel mode to create dynamic parallel queues enabling your pull requests to run through your Merge Queue faster.\ +Leveraging [parallel mode](/merge-queue#single-mode-vs.-parallel-mode) for Trunk Merge Queue is easy for Bazel-enabled repos because Bazel already knows the structure of your code and can automatically generate a dependency graph. Merge can use this information in parallel mode to create dynamic parallel queues enabling your pull requests to run through your Merge Queue faster.\ \ **How do we create parallel queues?**\ By understanding which Bazel targets a pull request affects, we can build a real-time graph and detect intersection points and where distinct non-overlapping graphs exist. This information is essentially a list of unique target names, which can then be used in real time to understand along which targets pull requests might overlap. diff --git a/merge-queue/optimizations/parallel-queues/nx.mdx b/merge-queue/optimizations/parallel-queues/nx.mdx index d95ace0..a018c7b 100644 --- a/merge-queue/optimizations/parallel-queues/nx.mdx +++ b/merge-queue/optimizations/parallel-queues/nx.mdx @@ -2,7 +2,7 @@ title: "Nx" description: "Instructions for enabled dynamic parallel queues powered by your Nx graph" --- -Leveraging [parallel mode](../../merge-queue#single-mode-vs.-parallel-mode) for Trunk Merge Queue is easy for Nx-enabled repos because Nx already knows the structure of your code and can automatically generate a dependency graph. Merge can use this information in parallel mode to create dynamic parallel queues enabling your pull requests to run through your Merge Queue faster.\ +Leveraging [parallel mode](/merge-queue#single-mode-vs.-parallel-mode) for Trunk Merge Queue is easy for Nx-enabled repos because Nx already knows the structure of your code and can automatically generate a dependency graph. Merge can use this information in parallel mode to create dynamic parallel queues enabling your pull requests to run through your Merge Queue faster.\ \ **How do we create parallel queues?**\ By understanding which Nx targets a pull request affects, we can build a real-time graph and detect intersection points and where distinct non-overlapping graphs exist. This information is essentially a list of unique target names, which can then be used in real time to understand along which targets pull requests might overlap. diff --git a/merge-queue/optimizations/pending-failure-depth.mdx b/merge-queue/optimizations/pending-failure-depth.mdx index 870d296..fe47638 100644 --- a/merge-queue/optimizations/pending-failure-depth.mdx +++ b/merge-queue/optimizations/pending-failure-depth.mdx @@ -1,90 +1,139 @@ --- title: "Pending failure depth" -description: "Keep failed PRs in the queue while successor PRs test, giving transient failures a chance to pass." +description: "Pending failure depth allows pull requests to wait until other pull requests behind them in the queue complete testing before getting removed from the queue." --- ### What it is -When a group's test run fails in the merge queue, it doesn't immediately get evicted. Instead, it enters a **Pending Failure** state — a holding state where the system hasn't yet decided whether to mark the group as failed or, if [batching](./batching) is enabled, to bisect the batch to isolate the culprit. +Pending failure depth allows pull requests to wait until other pull requests behind them in the queue complete testing before getting removed from the queue. + +By default, a PR that fails testing will be evicted from the queue. The **Pending Failure Depth** feature allows a failed PR to remain in the queue for pull requests behind it so that testing can be finished before this eviction occurs. The number of PRs that the queue will wait for is the *Pending Failure Depth.* This depth is configurable and reflects the number of pull requests behind this one that should complete testing before eviction is assessed. + +### Why use it + +* **Prevent queue stalls** - When a PR fails, the queue doesn't grind to a halt. Other PRs continue testing, assuming the failure was isolated. Keeps merge velocity high even during issues. +* **Faster failure recovery** - If PR #3 fails but PR #4 fixes the issue, both can be processed quickly because they tested in parallel. Without pending failure depth, you'd wait for #3 to fail, then wait for #4 to test sequentially. +* **Optimize for your team size** - Small teams benefit from lower values (fewer wasted tests), large teams benefit from higher values (maintain throughput despite occasional failures). +* **Balance risk vs. throughput** - Tune the setting to match your team's tolerance for wasted CI resources vs. need for high queue velocity. + +### How to enable -Throughout this page, "group" means either a batch of PRs (when [batching](./batching) is enabled) or an individual PR (when it's not). +Pending failure depth is **set to zero by default** and should be enabled after you're confident in your basic queue setup. -#### Waiting for Predecessors +Configure Pending Failure Depth in **Settings** > **Repositories** > your repository > **Merge Queue** > select a value from the **Pending Failure Depth** dropdown. -A group in Pending Failure always waits for predecessor groups (the PRs ahead of it in the queue) to finish testing. This is how the system determines root cause: +### Configuration options -* If a predecessor also failed, the current group's failure may have been caused by the predecessor. The current group will be retested once the bad predecessor is removed. -* If all predecessors passed, the failure is attributable to the current group itself. + +Just getting started with tuning Pending Failure Depth? Try a value of 2, and work from there with your team to find the right balance. + -This predecessor-waiting happens regardless of the Pending Failure Depth setting. +**Start with a small value** and observe: -#### Waiting for Successors (Controlled by Pending Failure Depth) +* If your queue frequently stalls when PRs fail → Increase value +* If you see lots of wasted test runs (many PRs test then all fail) → Decrease value +* If your CI infrastructure is constrained → Use lower value (3-4) +* If you have abundant CI capacity → Use higher value (7-10) -**Pending Failure Depth** is a configuration value (integer, default 0) that controls how many levels of **successor** test runs (PRs behind the failed group in the queue) the system also waits on before transitioning the group out of the Pending Failure state. +#### Verify it's working -* **When set to 0 (default):** The successor check is skipped. The group transitions as soon as the predecessor condition is met. -* **When set to a value greater than 0:** The system additionally waits for successor groups within that many hops to finish testing before transitioning. +When a PR fails, watch for: -#### Why Wait for Successors? +* ✅ Multiple other PRs continue testing (up to your configured depth) +* ✅ Queue doesn't stop entirely +* ✅ Failed PR is removed, but others keep going -The value of waiting for successors depends on whether [optimistic merging](./optimistic-merging) is enabled: +### Tradeoffs and considerations -* **With optimistic merging (primary use case):** If the failure was caused by a flake rather than a real code problem, a successor further down the queue may pass its tests. Because that successor's test run includes the failed group's changes, a passing result is proof that those changes work. Optimistic merging uses this to retroactively clear the failed group and merge it. The Pending Failure Depth window gives those successors time to finish testing before the system prematurely fails or bisects the group. This is the automated [anti-flake protection](./anti-flake-protection) path. -* **Without optimistic merging:** The hold window gives you time to manually inspect the failure and restart the test run if it looks transient, before the system auto-transitions the group to Failed (or bisection, if [batching](./batching) is enabled). This is the only benefit without optimistic merging. +#### What you gain - -Pending Failure Depth only helps with transient (flaky) failures. For legitimate failures that propagate to successors, those successors will also fail, and the hold window expires without clearing the failure. - +* **Queue never fully stops** - Failures don't block all subsequent PRs +* **Faster recovery** - Independent PRs can merge while others fail +* **Tunable throughput** - Adjust for your team's needs +* **Better CI utilization** - Tests keep running instead of stopping -#### Example: Anti-Flake Protection in Action +#### What you give up or risk -This example shows how Pending Failure Depth works together with optimistic merging to automatically recover from a flaky failure: +* **Wasted CI resources** - PRs may test against a state that includes failing PRs, then need to retest +* **Cascading failures** - If one PR breaks something, multiple subsequent PRs might fail before the issue is caught +* **Complexity** - More PRs testing simultaneously = harder to understand queue state -
What's Happening?Queue
A, B, C begin predictive testingmain <- A <- B+a <- C+ba
B fails testing (a flake)main <- A <- B+a <- C+ba
Pending Failure Depth keeps B in the queue while C finishes testingmain <- A <- B+a (hold) <- C+ba
C passes — proving B's failure was a flakemain <- A <- B+a <- C+ba
Optimistic merging clears B and merges A, B, Cmerge A B C
+#### When to decrease pending failure depth -Without Pending Failure Depth, **B** would have been immediately evicted or bisected when its tests failed — even though the failure was transient and **C**'s passing result proves the changes work. +Lower the value (3-4) if: -### Why use it +* **Your tests are flaky (>5% flake rate)** - Flaky tests cause false failures, leading to wasted retests +* **CI resources are expensive/limited** - Lower parallelism reduces waste +* **PRs frequently conflict** - Related changes often fail together, so testing them in parallel wastes resources +* **You're seeing excessive retests** - Many PRs testing, failing, retesting pattern -* **Automated flake recovery with optimistic merging** - When combined with [optimistic merging](./optimistic-merging), a passing successor automatically clears a flaky failure without any manual intervention. This is the [anti-flake protection](./anti-flake-protection) mechanism. -* **Manual inspection window without optimistic merging** - Even without optimistic merging, the hold gives you a grace period to inspect the failure and manually restart the test run if it looks transient, before the system auto-transitions the group to Failed (or bisection, if [batching](./batching) is enabled). -* **Reduce developer disruption** - PRs that failed due to flakes are not unnecessarily evicted, so authors don't need to re-enqueue or investigate non-issues. -* **Prevent premature bisection of batches** - When [batching](./batching) is enabled, the hold prevents the system from immediately bisecting a batch that may have only failed due to a transient issue. +#### When to increase pending failure depth -### How to enable +Raise the value (7-10) if: - -Pending Failure Depth is **set to 0 by default** (successor-waiting disabled). We recommend enabling it after you have [optimistic merging](./optimistic-merging) configured and your basic queue setup is working. - +* **Your queue stalls frequently when PRs fail** - Low depth is blocking throughput +* **PRs are mostly independent** - Failures are isolated, not cascading +* **You have abundant CI capacity** - Waste isn't a concern +* **Large team, high PR volume** - Need parallelism to maintain velocity -Configure Pending Failure Depth in **Settings** > **Repositories** > your repository > **Merge Queue** > select a value from the **Pending Failure Depth** dropdown. +#### Understanding the cost -You can also configure it via Terraform using the `pending_failure_depth` attribute. +**Example cost calculation:** -### Recommendations +Scenario: Pending failure depth = 5, PR #101 fails testing -* **Not using optimistic merging?** We don't recommend enabling Pending Failure Depth out of the box. Without optimistic merging, the only benefit is a manual inspection window, which most teams don't need. -* **Using optimistic merging?** Start with a depth of 1. This gives one successor a chance to pass and clear a flaky failure automatically. -* **Optimistic merging not kicking in as often as expected?** If you're seeing PRs get evicted for flakes that a successor _would_ have cleared — but the hold expired before the successor finished testing — increase the depth to give more successors time to complete. +* PRs #102, #103, #104, #105, #106 all test against a state including #101 +* All 5 fail because #101 broke something +* All 5 retest after #101 is removed +* **Wasted**: 5 test runs -### Tradeoffs and considerations +**But consider:** -#### What you gain +* Without pending failure depth, PRs would test sequentially (much slower) +* In most cases, failures ARE independent, so PRs merge successfully +* Occasional waste is preferable to frequent queue stalls -* **Grace period for flake recovery** - Failed groups are held while successors finish testing, giving optimistic merging a chance to clear transient failures. -* **Fewer unnecessary evictions** - PRs that would have been evicted due to flakes can instead be automatically cleared and merged. -* **Avoids premature batch bisection** - When [batching](./batching) is enabled, the hold prevents the system from immediately bisecting a batch that failed due to a transient issue. +**Typical waste rate:** 5-10% of test runs are wasted retests in well-configured queues -#### What you give up or risk +#### Common misconceptions -* **Delayed failure feedback** - Legitimate failures take longer to surface because the system waits for successors to finish testing before transitioning the group. The higher the depth, the longer the wait. -* **No automatic benefit for real failures** - If the failure is legitimate (not a flake), successors that include the same broken code will also fail. The hold window expires without clearing the failure — the group transitions to Failed (or bisection) just as it would have, only later. -* **Limited value without optimistic merging** - Without optimistic merging enabled, there is no automated mechanism to clear the failure during the hold. The only benefit is the manual inspection window. +* **Misconception:** "Higher pending failure depth always means faster queue" + * **Reality:** Too high = wasted CI resources and cascading failures. Too low = queue stalls. The sweet spot depends on your team size and test stability. +* **Misconception:** "Pending failure depth should be set to 1 to avoid waste" + * **Reality:** Value of 1 means queue stops on every failure (defeats the purpose of predictive testing). Start at 5 and adjust. +* **Misconception:** "This setting isn't important" + * **Reality:** Poorly tuned pending failure depth can either waste significant CI resources or cause frequent queue stalls. It's worth monitoring and adjusting. ### Next Steps -* [Anti-flake protection](./anti-flake-protection) - Understand the combined mechanism of optimistic merging + Pending Failure Depth -* [Optimistic merging](./optimistic-merging) - The companion feature that enables automated flake clearing -* [Batching](./batching) - How Pending Failure Depth interacts with batch groups and bisection -* [Predictive testing](./predictive-testing) - The foundation that makes successor test runs include predecessor changes +**Initial setup:** + +1. Start with a small value (2) +2. Monitor queue behavior +3. Check metrics for wasted test runs +4. Adjust based on observations + +**Optimize the value:** + +* Queue stalls frequently? → Increase depth +* Excessive retests (>15%)? → Decrease depth +* Make small adjustments and observe impact + +**Monitor performance:** + +* [Metrics and monitoring](/merge-queue/administration/metrics) - Track retest rate and queue throughput +* Watch for patterns: Do failures cascade? Are they independent? +* Adjust pending failure depth based on data + +**Combine with other optimizations:** + +* [Anti-flake protection](/merge-queue/optimizations/anti-flake-protection) - Reduce false failures first +* [Batching](/merge-queue/optimizations/batching) - Understand how pending failure depth affects batch splitting +* [Predictive testing](/merge-queue/optimizations/predictive-testing) - Read the full explanation of how these work together + +**Troubleshooting:** + +* Too many wasted tests → Lower pending failure depth +* Queue stops on every failure → Increase pending failure depth +* Unclear which value to use → Start at 2, monitor for a week diff --git a/merge-queue/optimizations/predictive-testing.mdx b/merge-queue/optimizations/predictive-testing.mdx index 30eb09f..faac193 100644 --- a/merge-queue/optimizations/predictive-testing.mdx +++ b/merge-queue/optimizations/predictive-testing.mdx @@ -1,6 +1,6 @@ --- title: "Predictive testing" -description: "Test PRs against the projected future state of your main branch to catch conflicts before they reach production." +description: "Trunk Merge Queue tests pull requests against the projected future state of your main branch, not just the current state.&x20;" --- ### What it is @@ -10,29 +10,25 @@ This means when multiple PRs are in the queue, each PR is tested as if all the P ### Why use it -Normally, pull requests are tested against a snapshot of the head of `main` when the pull request is posted to your source control provider. This can mean that by the time the pull request is actually merged - the results of the automated testing are **stale**. - +Normally, pull requests are tested against a snapshot of the head of `main` when the pull request is posted to your source control provider. This can mean that by the time the pull request is actually merged - the results of the automated testing are **stale**.\ +\ When you merge a pull request with stale results, you are effectively merging in **un-tested code**. The changes to the protected branch since the test was run create a blind spot in your testing regimen. With predictive testing, you no longer have a blind spot because the merge queue ensures that the pull request is tested against the state of `main` that will exist when your pull request is merged. - - ### What's Happening? The "Happy Path" This example shows how pull requests (PRs) are tested in a queue. PR `B` is tested with the changes from `A`, and `C` is tested with the changes from both `A` and `B`. - - - + +Test your pull request with the changes ahead of it in the queue + - - -
What's Happening?Queue
A begins testingmain <- A
B begins predictive testing by including the changes in Amain <- A <- B+a <- C+ba
C begins predictive testing by including the changes in both A and Bmain <- A <- B+a <- C+ba
as testing completes - pull requests can merge safelymerge A, B, C
+
What's Happening?Queue
A begins testingmain <- A
B begins predictive testing by including the changes in Amain <- A <- B+a <- C+ba
C begins predictive testing by including the changes in both A and Bmain <- A <- B+a <- C+ba
as testing completes - pull requests can merge safelymerge A, B, C
### The "Unhappy Path": How the Queue Handles Test Failures -Predictive testing is effective, but it creates a new challenge: **failure cascades**. +Predictive testing is powerful, but it creates a new challenge: **failure cascades**. -In the "Happy Path" example, if PR `A` introduces a failing test, the predictive tests for `B` and `C` are _also_ guaranteed to fail, because they both include the broken code from `A`. +In the "Happy Path" example, if PR `A` introduces a failing test, the predictive tests for `B` and `C` are *also* guaranteed to fail, because they both include the broken code from `A`. A simple queue would kick `B` and `C` as soon as their tests failed. This would disrupt their authors, who did nothing wrong, and force them to restart their PRs multiple times, wasting valuable CI time . @@ -40,21 +36,17 @@ This is solved by **Pending Failure**. #### How Pending Failure Works -The main purpose of "pending failure" is to **minimize disruptions to the queue** by intelligently finding the _**true**_ source of a failure. +The main purpose of "pending failure" is to **minimize disruptions to the queue** by intelligently finding the ***true*** source of a failure. Instead of immediately kicking a PR just because its test run failed, the queue follows this logic: 1. **A Test Fails**: Let's say PR `C`'s test run fails. -2. **Enter** `Pending Failure` **State**: `C` is _not_ kicked. It enters a `Pending Failure` state and _waits_ for the PRs it depends on (`A` and `B`) to finish testing. +2. **Enter** `Pending Failure` **State**: `C` is *not* kicked. It enters a `Pending Failure` state and *waits* for the PRs it depends on (`A` and `B`) to finish testing. 3. **Identify the Root Cause:** The queue's goal is to determine: "Did this PR fail because of its own code, or did it fail because of a change in a PR ahead of it?". * `C` (failed) waits for `B`. * `B` (also fails) waits for `A`. - * When `A` (at the top of the queue) fails, the queue knows it _must_ be the PR that introduced the failure, as it only depends on `main`. + * When `A` (at the top of the queue) fails, the queue knows it *must* be the PR that introduced the failure, as it only depends on `main`. 4. **Minimize Disruption:** The queue only kicks the first faulty PR (`A`). -5. **Automatic Recovery:** PRs `B` and `C` (which are likely healthy) stay in the queue. They are automatically re-scheduled for testing with a new predicted state that _excludes_ the bad PR (e.g., `B` now tests against `main`, and `C` tests against `main + B`). +5. **Automatic Recovery:** PRs `B` and `C` (which are likely healthy) stay in the queue. They are automatically re-scheduled for testing with a new predicted state that *excludes* the bad PR (e.g., `B` now tests against `main`, and `C` tests against `main + B`). **Pending Failure** is the essential recovery mechanism that makes **Predictive Testing** practical. It ensures the queue is resilient and that engineers are not disrupted by test failures they didn't cause. - - -The predecessor-waiting described above is built into the Pending Failure state and always happens. The [Pending Failure Depth](./pending-failure-depth) configuration adds an additional hold: it also waits for _successor_ test runs to complete, which enables [optimistic merging](./optimistic-merging) to automatically clear failures caused by flakes. - diff --git a/merge-queue/optimizations/priority-merging.mdx b/merge-queue/optimizations/priority-merging.mdx index e516027..f22d04b 100644 --- a/merge-queue/optimizations/priority-merging.mdx +++ b/merge-queue/optimizations/priority-merging.mdx @@ -1,12 +1,12 @@ --- title: "Priority merging" -description: "Fast-track critical PRs like hotfixes and security patches to the front of the merge queue." +description: "Priority merging allows you to fast-track critical pull requests to the front of the merge queue." --- ### What it is Priority merging allows you to fast-track critical pull requests to the front of the merge queue. -By assigning a priority level to a PR, you can make sure urgent changes (like hotfixes, security patches, or critical bug fixes) merge ahead of regular feature work. PRs with higher priority are tested and merged before lower-priority PRs, regardless of when they were submitted. +By assigning a priority level to a PR, you can ensure urgent changes (like hotfixes, security patches, or critical bug fixes) merge ahead of regular feature work. PRs with higher priority are tested and merged before lower-priority PRs, regardless of when they were submitted. ### Why use it @@ -49,13 +49,7 @@ trunk merge -p #### Valid priority levels -| label | number | note | -|---|---|---| -| urgent | 0 | Production outages, security vulnerabilities.

urgent items will interrupt running jobs and begin testing immediately | -| high | 10 | Urgent bug fixes, important hotfixes | -| medium | 100 | Regular feature work (default) | -| low | 200 | Non-urgent refactors, documentation | -| | 255 | lowest possible priority | +
labelnumbernote
urgent0Production outages, security vulnerabilities.

urgent items will interrupt running jobs and begin testing immediately
high10Urgent bug fixes, important hotfixes
medium100Regular feature work (default)
low200Non-urgent refactors, documentation
255lowest possible priority
### How priority affects PR order @@ -66,28 +60,22 @@ When prioritizing a PR, Merge will explicitly **not interrupt** any currently te **There is an exception to this rule.** Sometimes, when there is a PR urgent enough to get in that it is worth the cost of restarting a currently testing PR, you can move the new PR to the front using the `"urgent"` priority. This is the only time Merge will reschedule a PR that is already in testing. -Another exception: Admins can still merge PRs in absolutely necessary cases outside of the merge queue. Merge Queue handles this gracefully and will react properly to restart the rest of the queue. +Another exception: Admins can still merge PRs in absolutely necessary cases outside of the merge queue. Merge Queue handles thes gracefull and will react properly to restart the rest of the queue. #### Example: Say you have a queue that is configured to test two PRs at once. The queue currently looks like this: - -Queue with two testing PRs and one pending - +Queue with two testing PRs and one pending If you submit a PR D with a `"high"` priority it will be put in front of C (since it is a higher priority than C and C is not testing). D will begin as soon as either A or B finishes, like this: - -Queue with two testing PRs and a new higher priority pending PR - +Queue with two testing PRs and a new higher priority pending PR Instead, if you submit PR D with an `"urgent"` priority, then D would be tested immediately, A would be restarted, and B would be bumped back to pending, like this: - -Queue with an urgent PR moved to the front and a normal PR restarting - +Queue with an urgent PR moved to the front and a normal PR restarting ### Visual indicators in the dashboard @@ -100,7 +88,7 @@ When a PR is queued with a non-default priority, the merge queue graph view disp | Medium | No badge | Default priority — no visual indicator | | Low | No badge | Lower-than-default priority — no visual indicator | -Priority badges appear on individual PR nodes in the [graph view](../using-the-queue/monitor-queue-status#graph-view). Batch nodes (multiple PRs tested together) do not display priority badges. +Priority badges appear on individual PR nodes in the [graph view](/merge-queue/using-the-queue/monitor-queue-status#graph-view). Batch nodes (multiple PRs tested together) do not display priority badges. ### Tradeoffs and considerations @@ -169,11 +157,10 @@ Don't use high priority for: **For true emergencies:** -* If priority isn't fast enough → [Emergency pull requests](../using-the-queue/emergency-pull-requests) -* If branch protection is blocking an admin merge → [Force merge](../using-the-queue/force-merge) +* If priority isn't fast enough → Emergency pull requests * If you need to pause the queue → **Settings** > **Repositories** > your repository > **Merge Queue** > **Merge Queue State** and select **Paused** from the dropdown. **Monitor impact:** -* [Metrics and monitoring](../administration/metrics) - Track priority PR usage and merge times -* Watch for priority overuse (should be \<5% of PRs) +* [Metrics and monitoring](/merge-queue/administration/metrics) - Track priority PR usage and merge times +* Watch for priority overuse (should be <5% of PRs) diff --git a/merge-queue/reference/index.mdx b/merge-queue/reference.mdx similarity index 72% rename from merge-queue/reference/index.mdx rename to merge-queue/reference.mdx index 0aeb121..cb895d0 100644 --- a/merge-queue/reference/index.mdx +++ b/merge-queue/reference.mdx @@ -6,16 +6,16 @@ Quick-lookup documentation for command-line tools, APIs, and troubleshooting. Th ### Command-line and API -[**CLI reference**](./merge-queue-cli-reference)\ +[**CLI reference**](/merge-queue/reference/merge-queue-cli-reference)\ Complete command reference for the Trunk CLI tool. -[**API reference**](./merge)\ +[**API reference**](/merge-queue/reference/merge)\ Programmatic access to queue operations via REST API. ### Help and troubleshooting -[**FAQ**](./common-problems)\ +[**FAQ**](/merge-queue/reference/common-problems)\ Common questions about merge queue behavior and features. -[**Troubleshooting**](./troubleshooting)\ +[**Troubleshooting**](/merge-queue/reference/troubleshooting)\ Diagnose and resolve common issues with queue setup and operation. diff --git a/merge-queue/reference/common-problems.mdx b/merge-queue/reference/common-problems.mdx index 115bcec..cebdd0f 100644 --- a/merge-queue/reference/common-problems.mdx +++ b/merge-queue/reference/common-problems.mdx @@ -2,29 +2,40 @@ title: "FAQ" description: "Troubleshooting and FAQ" --- - #### Entering the Queue - - +
+ +Can I add a pull request to the queue before all required checks pass? + Yes. A pull request can be submitted to the queue at any time, even if it's not yet ready to merge. The pull request will enter the queue in a "Queued" state and wait for all branch protection rules (like passing status checks and required reviews) to be met. Once the PR is ready, Trunk Merge Queue will automatically move it into the testing phase. - - +
+ +
+ +Why isn’t my pull request entering the queue? + First, check the Trunk web app to see what Trunk is waiting on before putting your PR into the merge queue. Next, if something on that page doesn't look right, for example, it says that GitHub is still checking the mergeability of the PR, comment `/trunk merge` again in the PR. - - -Most likely, you did not set up the required status checks to trigger for `trunk-merge/` branches. It is also possible that your CI provider just randomly never started testing on the Trunk Merge Queue branch, even after setting the required status checks to trigger. To assist with this, you can [configure a testing timeout](../administration/advanced-settings#timeout-for-tests-to-complete). - - +
+ +
+ +Why aren't my required checks triggering, even though my pull request is being tested in queue? + +Most likely, you did not set up the required status checks to trigger for `trunk-merge/` branches. It is also possible that your CI provider just randomly never started testing on the Trunk Merge Queue branch, even after setting the required status checks to trigger. To assist with this, you can [configure a testing timeout](/merge-queue/administration/advanced-settings#timeout-for-tests-to-complete). + +
#### Merge Behavior - - +
+ +Can I choose the merge strategy for my pull requests? + Yes! Trunk Merge Queue supports three merge methods: * **Squash** (default) - Combines all commits into a single commit @@ -36,98 +47,125 @@ The merge method is configured at the repository level in Settings > Repositorie **Note:** The merge method applies to the entire repository, not on a per-PR basis. See [Merge Method documentation](/merge-queue/administration/advanced-settings#merge-method) for detailed information on each option and how to configure your preference. - - +
+ +
+ +How does Trunk handle commit messages + Commit messages depend on your configured merge method: * **Squash** (default): The commit message is automatically generated from the pull request's title and description, following GitHub's default behavior * **Merge Commit**: Preserves all individual commit messages from the PR and creates an additional merge commit message * **Rebase**: Preserves all individual commit messages from the PR as they are replayed onto the target branch -You can override the merge commit title for any PR by adding `merge-commit-title: Your Custom Title` on its own line in the PR body. See [Custom merge commit titles](../using-the-queue/reference#custom-merge-commit-titles) for details. +You can configure your preferred merge method in [Advanced Settings](/merge-queue/administration/advanced-settings#merge-method). -You can configure your preferred merge method in [Advanced Settings](../administration/advanced-settings#merge-method). - +
+ +
+ +Do Optimistic Merging or Batching ever merge multiple pull requests into a single commit? - No. Pull requests are always merged individually, and each PR will result in a separate commit in your `main` branch's history, regardless of your configuration. Features like Optimistic Merging and Batching are validation and testing strategies, not merging strategies. -* [Optimistic Merging](../optimizations/optimistic-merging) uses the successful test of a pull request later in the queue to validate all the PRs ahead of it in the queue, allowing the entire sequence to be merged without waiting for the earlier PRs to finish testing. -* [Batching](../optimizations/batching) allows the queue to _test_ multiple PRs in a single CI job to save time and resources. After the test passes, the PRs in the batch are still merged one by one. - - +* [Optimistic Merging](/merge-queue/optimizations/optimistic-merging) uses the successful test of a pull request later in the queue to validate all the PRs ahead of it in the queue, allowing the entire sequence to be merged without waiting for the earlier PRs to finish testing. +* [Batching](/merge-queue/optimizations/batching) allows the queue to *test* multiple PRs in a single CI job to save time and resources. After the test passes, the PRs in the batch are still merged one by one. + +
#### Queue Configuration - - -Yes! You can create multiple queues within a single repository, with each queue targeting a different branch (e.g., `main`, `staging`, `release/v2`). Each queue operates independently with its own settings, required statuses, and merge behavior. A branch can only be associated with one queue. +
+ +Can I create multiple merge queues for a single repository? + +Currently, Trunk Merge Queue supports one merge queue per repository. If this is critical for your use case, [talk to us](/setup-and-administration/support) and we'll consider adding support for your use case. -To create an additional queue, click **New Queue** from the Merge Queue dashboard and select the same repository with a different target branch. See [Multiple queues per repository](../administration/advanced-settings#multiple-queues-per-repository) for details. +For validating significant changes to your CI process or queue configuration without impacting your primary workflow, the recommended approach is to use a fork of your repository. You can set up and test a separate merge queue on the fork to ensure your changes work as expected before applying them to your primary repository. -For validating significant changes to your CI process or queue configuration without impacting your primary workflow, you can use a fork of your repository. Set up and test a separate merge queue on the fork to make sure your changes work as expected before applying them to your primary repository. - +
+ +
+ +What are trunk-temp/* branches, and should CI run on them? - No, you should configure your CI to completely ignore `trunk-temp/*` branches. Running workflows on them will only create unnecessary or canceled builds. The `trunk-temp/*` branch is a temporary, intermediate branch that the merge queue uses to assemble the necessary commits for a test run. Once the build is prepared, this branch is immediately renamed to a `trunk-merge/*` branch. - - + +
#### Priority & Overrides - - -**Recommended approach:** Use [PR Prioritization](../optimizations/priority-merging) to fast-track your PR through the queue while still validating it: +
+ +How can I merge a pull request immediately? + +**Recommended approach:** Use [PR Prioritization](/merge-queue/optimizations/priority-merging) to fast-track your PR through the queue while still validating it: ``` /trunk merge --priority=urgent ``` The `urgent` priority is the only level that will interrupt currently testing PRs. Your PR will immediately begin testing, and other PRs will restart after yours completes. - - -**Recommended approach:** Use [PR Prioritization](../optimizations/priority-merging) to fast-track your PR through the queue while still validating it: +
+ +
+ +How do I merge an emergency pull request right now? + +**Recommended approach:** Use [PR Prioritization](/merge-queue/optimizations/priority-merging) to fast-track your PR through the queue while still validating it: ``` /trunk merge --priority=urgent ``` The `urgent` priority is the only level that will interrupt currently testing PRs. Your PR will immediately begin testing, and other PRs will restart after yours completes. - - + +
### Failures, Errors & Debugging - - +
+ +How am I notified if my pull request fails in the queue? + When a pull request is removed from the queue due to a failure, the Trunk bot updates its comment on the original PR. This update includes direct links to the specific workflows that failed, allowing you to quickly investigate and resolve the issue. Example below. - -![Example of a Trunk bot PR comment, detailing a failed status check that caused the PR to be removed from the merge queue.](/assets/Screenshot_2025-10-09_at_3.28.03_PM.png) - - +
Example of a Trunk bot PR comment, detailing a failed status check that caused the PR to be removed from the merge queue.
+ +
+ +
- -Most likely, you have a [branch protection rule](../getting-started/configure-branch-protection) that affects merge branches. +Why does my pull request consistently fail during testing due to "GitHub errors"? + +Most likely, you have a [branch protection rule](/merge-queue/getting-started/configure-branch-protection) that affects merge branches. For example, the wild card rule `*/*` applies to `trunk-merge/...`. The Trunk GitHub app does not have admin privileges, so it fails to do some actions on protected branches. To resolve this, you must remove this rule or reach out to Trunk on our community Slack if that is not possible. - - +
+ +
+ +Why does my pull request keep failing to merge in the queue? + The two most likely problems are that you are restricting **who can merge** or that you have **disabled squash merges** into your repo. Trunk Merge Queue needs to use squash merges. To fix this, turn on `'allow squash merges'` for this repo in your GitHub setup. - - +
+ +
+ +Why do Dependabot and Renovate pull requests keep getting kicked from the queue? + By default, both [dependabot](https://docs.github.com/en/code-security/dependabot/working-with-dependabot/managing-pull-requests-for-dependency-updates#changing-the-rebase-strategy-for-dependabot-pull-requests) and [renovate](https://docs.renovatebot.com/updating-rebasing/#updating-and-rebasing-branches) both will rebase their PRs whenever other PRs merge into their base branch. If that rebase happens when those PRs are in the queue, they will get kicked since they were updated. There are two ways to mitigate this: 1. Both dependabot and renovate can be configured to not automatically rebase, while renovate can specifically be configured to only rebase if there's a merge conflict ([dependabot](https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#rebase-strategy), [renovate](https://docs.renovatebot.com/configuration-options/#rebasewhen)) 2. Their PRs can be manually merged, and anything currently in the merge queue will restart with those merged changes - - +
diff --git a/merge-queue/reference/how-does-it-work.mdx b/merge-queue/reference/how-does-it-work.mdx deleted file mode 100644 index cfda430..0000000 --- a/merge-queue/reference/how-does-it-work.mdx +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: "How does it work?" -description: "Merge Queue creates predictive branches to verify that the future state of your protected branch will remain green when the contents of the queue merge into it." -hidden: true ---- -### **How does a merge queue work?** - -1. Instead of merging your pull requests directly through GitHub, engineers submit their pull requests to the Merge Queue service. A pull request can be submitted to the queue before passing CI or code review is complete. Once the prerequisites for the queue have been met, the pull request will formally enter the queue. -2. Trunk Merge Queue will test your pull request against the changes ahead of it in the queue so that the changes are tested against the predicted view of the branch, assuming everything ahead of it merges successfully. This process is called [**predictive testing** ](/broken/pages/BAKgbuxqWos5o4kna99T)and is illustrated in the video below. -3. When all the required tests are passed, Trunk will **squash merge** your pull request into the protected branch automatically. -4. If your pull requests fail tests, they will either be retested or removed from the queue for further inspection by the author. - - - - - - -**What happens under the hood when I submit a pull request to the queue?** - -1. Trunk Merge Queue will wait until all the normal gating tests for a pull request are passing. By default, this is the list of required status checks and code review requirements you have configured for pull requests to pass before they can be merged onto `main` -2. Once the pre-requisites are met, Trunk Merge Queue will create a temporary branch with the naming convention `trunk-merge/***`. This branch will be based on the head of `main` and will include the changes in your pull request and the changes from pull requests ahead of your pull request in the queue. This is the predictive branch described above and is used to guarantee the correctness of your system. -3. The same tests that are required for your pull request will now be run on this predictive branch. When those tests pass - your original pull request will be merged into `main`. - -### Key concepts - - - - - - - - - - - - - - - - - - - - -### **Requirements** - -Trunk Merge Queue works with any CI provider as long as you use GitHub for your repo hosting. - -### **Next steps** - - - - - - - diff --git a/merge-queue/reference/merge-queue-cli-reference.mdx b/merge-queue/reference/merge-queue-cli-reference.mdx index d0be255..f37d3c9 100644 --- a/merge-queue/reference/merge-queue-cli-reference.mdx +++ b/merge-queue/reference/merge-queue-cli-reference.mdx @@ -1,23 +1,39 @@ --- title: "CLI reference" -description: "Trunk CLI commands for submitting, canceling, pausing, and resuming the merge queue." +description: "The Trunk CLI allows you to insert and remove PRs into the Merge Queue. You can also pause and resume the queue from the CLI." --- The Trunk CLI allows you to insert and remove PRs into the Merge Queue. You can also pause and resume the queue from the CLI. ### Installation - -```bash macOS/Linux + + + + +```bash curl -LO https://trunk.io/releases/trunk chmod +x trunk ``` -```bash Homebrew + + + + + +```bash brew install trunk-io ``` -```bash Windows + + + + + +```bash curl https://get.trunk.io -fsSL | bash ``` - + + + + ### Prerequisites @@ -45,7 +61,7 @@ trunk merge [flags] **Flags** -`-p, --priority <0-255>` - Priority determines the order PRs are tested and merged. When a PR is submitted with a priority, it will begin testing before any lower priority PR that isn't currently being tested. Levels: +`-p, --priority <0-255>` - Priority determines the order PRs are tested and merged. When a PR is submitted with a priority, it will begin testing before any lower priority PR that isn't currently being tested. Levels: * `0` (Urgent) **Interrupts currently testing PRs** to begin testing immediately. Use sparingly as restarting tests is costly. This is the only priority that interrupts running tests * `10` (High) Tests before default priority PRs @@ -53,27 +69,38 @@ trunk merge [flags] * `200` (Low) Tests after default priority PRs * `255` ( Lowest) Lowest possible priority -If multiple PRs have the same priority, they are processed in the order they were submitted. See [Priority Levels](/broken/pages/KBGLXshIgaxHjvuU5rpy). +If multiple PRs have the same priority, they are processed in the order they were submitted. See [Priority Levels](/merge-queue/optimizations/priority-merging). **Examples** + + ```bash trunk merge 1234 ``` + + + ```bash trunk merge 1234 -p 10 ``` + + + ```bash trunk merge 1234 -p 200 ``` + + + Other PRs testing in the queue will be restarted behind PR 1234 @@ -81,7 +108,9 @@ Other PRs testing in the queue will be restarted behind PR 1234 ```bash trunk merge 1234 -p 0 ``` + + #### Check Queue Status diff --git a/merge-queue/reference/merge.mdx b/merge-queue/reference/merge.mdx index f7d7732..a6d927f 100644 --- a/merge-queue/reference/merge.mdx +++ b/merge-queue/reference/merge.mdx @@ -6,23 +6,23 @@ The Trunk Merge Queue API lets you manage pull requests, configure queues, and m The API is an HTTP REST API hosted at `https://api.trunk.io/v1`. It returns JSON from all requests and uses standard HTTP response codes. -All requests must be [authenticated](../../setup-and-administration/apis/#authentication) by providing the `x-api-token` header. +All requests must be [authenticated](/setup-and-administration/apis#authentication) by providing the `x-api-token` header. ## Endpoint summary -| Endpoint | Method | Description | -| --- | --- | --- | -| [`/submitPullRequest`](/merge-queue/reference/merge/submit-a-pull-request-to-a-merge-queue) | POST | Submit a PR to the merge queue for testing and merging | -| [`/cancelPullRequest`](/merge-queue/reference/merge/cancel-a-pull-request-in-a-merge-queue) | POST | Remove a PR from the merge queue | -| [`/restartTestsOnPullRequest`](/merge-queue/reference/merge/restart-tests-on-a-pull-request-in-a-merge-queue) | POST | Re-run tests on a PR currently in the queue | -| [`/getSubmittedPullRequest`](/merge-queue/reference/merge/get-a-submitted-pull-request-from-a-merge-queue) | POST | Check the status of a submitted PR | -| [`/setImpactedTargets`](/merge-queue/reference/merge/set-impacted-targets-for-a-pull-request) | POST | Set impacted targets for a PR (used with [parallel queues](../optimizations/parallel-queues/)) | -| [`/getMergeQueueTestingDetails`](/merge-queue/reference/merge/get-details-about-testing-that-merge-queue-is-performing) | POST | Get details about in-progress merge queue testing | -| [`/createQueue`](/merge-queue/reference/merge/create-a-new-merge-queue) | POST | Create a new merge queue for a branch | -| [`/deleteQueue`](/merge-queue/reference/merge/delete-the-specified-merge-queue-the-queue-must-be-empty-in-order-to-be-deleted) | POST | Delete an empty merge queue | -| [`/getQueue`](/merge-queue/reference/merge/get-the-merge-queue) | POST | Get queue state, configuration, and enqueued PRs | -| [`/updateQueue`](/merge-queue/reference/merge/update-the-merge-queue) | POST | Update queue configuration (mode, concurrency, batching, etc.) | -| [`/getMergeQueueMetrics`](/merge-queue/reference/merge/get-prometheus-format-metrics-for-a-merge-queue) | GET | Get Prometheus-format metrics for monitoring | +| Endpoint | Method | Description | +| ------------------------------------------------------------------- | ------ | ---------------------------------------------------------------------------------------------------------------------------- | +| [`/submitPullRequest`](#post-submitpullrequest) | POST | Submit a PR to the merge queue for testing and merging | +| [`/cancelPullRequest`](#post-cancelpullrequest) | POST | Remove a PR from the merge queue | +| [`/restartTestsOnPullRequest`](#post-restarttestsonpullrequest) | POST | Re-run tests on a PR currently in the queue | +| [`/getSubmittedPullRequest`](#post-getsubmittedpullrequest) | POST | Check the status of a submitted PR | +| [`/setImpactedTargets`](#post-setimpactedtargets) | POST | Set impacted targets for a PR (used with [parallel queues](/merge-queue/optimizations/parallel-queues)) | +| [`/getMergeQueueTestingDetails`](#post-getmergequeuetestingdetails) | POST | Get details about in-progress merge queue testing | +| [`/createQueue`](#post-createqueue) | POST | Create a new merge queue for a branch | +| [`/deleteQueue`](#post-deletequeue) | POST | Delete an empty merge queue | +| [`/getQueue`](#post-getqueue) | POST | Get queue state, configuration, and enqueued PRs | +| [`/updateQueue`](#post-updatequeue) | POST | Update queue configuration (mode, concurrency, batching, etc.) | +| [`/getMergeQueueMetrics`](#prometheus-metrics) | GET | Get Prometheus-format metrics for monitoring | ## Common use cases @@ -36,8 +36,6 @@ All requests must be [authenticated](../../setup-and-administration/apis/#authen ## Request format -Request bodies must not exceed 20 MiB. - Most endpoints accept a JSON request body with these common fields: ```json @@ -54,13 +52,13 @@ Most endpoints accept a JSON request body with these common fields: } ``` -| Field | Type | Required | Description | -| --- | --- | --- | --- | -| `repo.host` | string | Yes | Repository host (e.g., `github.com`) | -| `repo.owner` | string | Yes | Repository owner or organization | -| `repo.name` | string | Yes | Repository name | -| `targetBranch` | string | Yes | The branch the merge queue targets | -| `pr.number` | integer | Varies | The pull request number (required for PR endpoints) | +| Field | Type | Required | Description | +| -------------- | ------- | -------- | --------------------------------------------------- | +| `repo.host` | string | Yes | Repository host (e.g., `github.com`) | +| `repo.owner` | string | Yes | Repository owner or organization | +| `repo.name` | string | Yes | Repository name | +| `targetBranch` | string | Yes | The branch the merge queue targets | +| `pr.number` | integer | Varies | The pull request number (required for PR endpoints) | ## Examples @@ -126,58 +124,115 @@ The response includes the queue state (`RUNNING`, `PAUSED`, `DRAINING`, or `SWIT ## Pull request endpoints - - - - - - - - +## POST /cancelPullRequest - -Large monorepos can produce target lists that exceed the 20 MiB request body limit. If you hit this limit, send `"ALL"` as the `impactedTargets` value to mark the PR as impacting every target. - +> Cancel a pull request in a merge queue. + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/cancelPullRequest":{"post":{"summary":"Cancel a pull request in a merge queue.","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string"},"owner":{"type":"string"},"name":{"type":"string"}},"required":["host","owner","name"]},"pr":{"type":"object","properties":{"number":{"type":"integer","minimum":0,"maximum":4294967295}},"required":["number"]},"targetBranch":{"type":"string"}},"required":["repo","pr","targetBranch"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /getSubmittedPullRequest + +> Get a submitted pull request from a merge queue. + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/getSubmittedPullRequest":{"post":{"summary":"Get a submitted pull request from a merge queue.","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string"},"owner":{"type":"string"},"name":{"type":"string"}},"required":["host","owner","name"]},"pr":{"type":"object","properties":{"number":{"type":"integer","minimum":0,"maximum":4294967295}},"required":["number"]},"targetBranch":{"type":"string"}},"required":["repo","pr","targetBranch"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"type":"object","properties":{"id":{"type":"string"},"state":{"type":"string","enum":["NOT_READY","PENDING","TESTING","TESTS_PASSED","MERGED","FAILED","CANCELLED","PENDING_FAILURE"]},"readiness":{"type":"object","properties":{"hasImpactedTargets":{"type":"boolean"},"requiresImpactedTargets":{"type":"boolean"},"doesBaseBranchMatch":{"type":"boolean"},"gitHubMergeability":{"type":"string","enum":["UNSPECIFIED","IN_PROGRESS","MERGEABLE","NOT_MERGEABLE"]}},"required":["requiresImpactedTargets","doesBaseBranchMatch","gitHubMergeability"]},"stateChangedAt":{"type":"string"},"priorityValue":{"type":"number"},"priorityName":{"type":"string"},"usedDefaultPriorityName":{"type":"string"},"skipTheLine":{"type":"boolean"},"forceEnqueued":{"type":"boolean"},"isCurrentlySubmittedToQueue":{"type":"boolean"},"prNumber":{"type":"number"},"prTitle":{"type":"string"},"prSha":{"type":"string"},"prBaseBranch":{"type":"string"},"prAuthor":{"type":"string"}},"required":["stateChangedAt","priorityValue","priorityName","skipTheLine","forceEnqueued","isCurrentlySubmittedToQueue","prNumber","prTitle","prSha","prBaseBranch","prAuthor"]}}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /restartTestsOnPullRequest + +> Restart tests on a pull request in a merge queue. + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/restartTestsOnPullRequest":{"post":{"summary":"Restart tests on a pull request in a merge queue.","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string"},"owner":{"type":"string"},"name":{"type":"string"}},"required":["host","owner","name"]},"pr":{"type":"object","properties":{"number":{"type":"integer","minimum":0,"maximum":4294967295}},"required":["number"]},"targetBranch":{"type":"string"}},"required":["repo","pr","targetBranch"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /setImpactedTargets + +> Set impacted targets for a pull request. + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/setImpactedTargets":{"post":{"summary":"Set impacted targets for a pull request.","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string"},"owner":{"type":"string"},"name":{"type":"string"}},"required":["host","owner","name"]},"pr":{"type":"object","properties":{"number":{"type":"integer","minimum":1,"maximum":4294967295},"sha":{"type":"string"}},"required":["number","sha"]},"targetBranch":{"type":"string"},"impactedTargets":{"anyOf":[{"type":"array","items":{"type":"string"}},{"type":"string","const":"ALL"}]}},"required":["repo","pr","targetBranch","impactedTargets"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /submitPullRequest + +> Submit a pull request to a merge queue. + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/submitPullRequest":{"post":{"summary":"Submit a pull request to a merge queue.","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string"},"owner":{"type":"string"},"name":{"type":"string"}},"required":["host","owner","name"]},"pr":{"type":"object","properties":{"number":{"type":"integer","minimum":0,"maximum":4294967295}},"required":["number"]},"targetBranch":{"type":"string"},"priority":{"anyOf":[{"type":"integer","minimum":0,"maximum":4294967295},{"type":"string"},{"type":"null"}]},"noBatch":{"type":"boolean"}},"required":["repo","pr","targetBranch"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /getMergeQueueTestingDetails + +> Get details about testing that Merge Queue is performing + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/getMergeQueueTestingDetails":{"post":{"summary":"Get details about testing that Merge Queue is performing","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string"},"owner":{"type":"string"},"name":{"type":"string"}},"required":["host","owner","name"]},"testRunId":{"type":"string"},"targetBranch":{"type":"string"}},"required":["repo","testRunId","targetBranch"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"type":"object","properties":{"requiredStatuses":{"type":"array","items":{"type":"string"}},"requiredStatusesSource":{"type":"string","enum":["TRUNK_CONFIG","REPO_PROVIDER_BRANCH_PROTECTION"]},"testBranch":{"type":"string"},"testBranchSha":{"type":"string"},"createdAt":{"type":"string"},"status":{"type":"string","enum":["IN_PROGRESS","FAILED","CANCELLED","SUCCEEDED"]},"checkSuites":{"type":"array","items":{"type":"object","properties":{"checkRuns":{"type":"array","items":{"type":"object","properties":{"name":{"type":"string"},"url":{"type":"string"},"status":{"type":"string","enum":["QUEUED","IN_PROGRESS","COMPLETED"]},"conclusion":{"type":"string","enum":["ACTION_REQUIRED","CANCELLED","FAILURE","NEUTRAL","SUCCESS","SKIPPED","STALE","TIMED_OUT"]}},"required":["name","url"]}}},"required":["checkRuns"]}},"statusChecks":{"type":"array","items":{"type":"object","properties":{"context":{"type":"string"},"url":{"type":"string"},"state":{"type":"string","enum":["ERROR","FAILURE","PENDING","SUCCESS"]}},"required":["context"]}},"testedPullRequests":{"type":"array","items":{"type":"object","properties":{"prNumber":{"type":"number"},"prUrl":{"type":"string"},"title":{"type":"string"}},"required":["prNumber","prUrl","title"]}},"impactedTargets":{"type":"array","items":{"type":"string"}},"dependentPrs":{"type":"array","items":{"type":"number"}}},"required":["requiredStatuses","testBranch","testBranchSha","checkSuites","statusChecks","testedPullRequests"]}}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` ## Metrics endpoints ### Prometheus metrics - - `GET /v1/getMergeQueueMetrics` Returns merge queue metrics in Prometheus text exposition format. Authenticate with the `x-api-token` header. -| Parameter | Required | Description | -| --- | --- | --- | -| `repo` | No | Repository in `owner/name` format. If omitted, returns metrics for all repositories in the organization. Must be provided together with `repoHost`. | -| `repoHost` | Conditional | Repository host (e.g., `github.com`). Required if `repo` is specified. | +| Parameter | Required | Description | +| ---------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | +| `repo` | No | Repository in `owner/name` format. If omitted, returns metrics for all repositories in the organization. Must be provided together with `repoHost`. | +| `repoHost` | Conditional | Repository host (e.g., `github.com`). Required if `repo` is specified. | Response content type: `text/plain; version=0.0.4; charset=utf-8` -See [Prometheus metrics endpoint](../administration/metrics#prometheus-metrics-endpoint) for the full list of available metrics, scrape configuration, and example queries. +See [Prometheus metrics endpoint](/merge-queue/administration/metrics#prometheus-metrics-endpoint) for the full list of available metrics, scrape configuration, and example queries. ## Queue endpoints -Use these endpoints to create, configure, and manage merge queues. Each queue targets a specific branch in your repository. For more on running multiple queues, see [parallel queues](../optimizations/parallel-queues/). +Use these endpoints to create, configure, and manage merge queues. Each queue targets a specific branch in your repository. For more on running multiple queues, see [parallel queues](/merge-queue/optimizations/parallel-queues). + +## POST /createQueue + +> Create a new merge queue. + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/createQueue":{"post":{"summary":"Create a new merge queue.","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string"},"owner":{"type":"string"},"name":{"type":"string"}},"required":["host","owner","name"]},"targetBranch":{"type":"string"},"mode":{"type":"string","enum":["single","parallel"]},"concurrency":{"type":"integer","minimum":1,"maximum":4294967295}},"required":["repo","targetBranch"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /deleteQueue - - - - - - +> Delete the specified merge queue. The queue must be empty in order to be deleted. + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/deleteQueue":{"post":{"summary":"Delete the specified merge queue. The queue must be empty in order to be deleted.","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string"},"owner":{"type":"string"},"name":{"type":"string"}},"required":["host","owner","name"]},"targetBranch":{"type":"string"}},"required":["repo","targetBranch"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` The queue must be empty before it can be deleted. Cancel or merge all enqueued PRs first. +## POST /getQueue + +> Get the merge queue. + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/getQueue":{"post":{"summary":"Get the merge queue.","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string"},"owner":{"type":"string"},"name":{"type":"string"}},"required":["host","owner","name"]},"targetBranch":{"type":"string"}},"required":["repo","targetBranch"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"type":"object","properties":{"state":{"type":"string","enum":["RUNNING","PAUSED","DRAINING","SWITCHING_MODES"]},"branch":{"type":"string"},"concurrency":{"type":"number"},"testingTimeoutMins":{"type":"number"},"mode":{"type":"string","enum":["SINGLE","PARALLEL"]},"canOptimisticallyMerge":{"type":"boolean"},"pendingFailureDepth":{"type":"number"},"isBatching":{"type":"boolean"},"batchingMaxWaitTimeMins":{"type":"number"},"batchingMinSize":{"type":"number"},"createPrsForTestingBranches":{"type":"boolean"},"enqueuedPullRequests":{"type":"array","items":{"type":"object","properties":{"id":{"type":"string"},"state":{"type":"string","enum":["NOT_READY","PENDING","TESTING","TESTS_PASSED","MERGED","FAILED","CANCELLED","PENDING_FAILURE"]},"stateChangedAt":{"type":"string"},"priorityValue":{"type":"number"},"priorityName":{"type":"string"},"usedDefaultPriorityName":{"type":"string"},"skipTheLine":{"type":"boolean"},"prNumber":{"type":"number"},"prTitle":{"type":"string"},"prSha":{"type":"string"},"prBaseBranch":{"type":"string"},"prAuthor":{"type":"string"}},"required":["stateChangedAt","priorityValue","priorityName","skipTheLine","prNumber","prTitle","prSha","prBaseBranch","prAuthor"]}}},"required":["branch","concurrency","testingTimeoutMins","canOptimisticallyMerge","pendingFailureDepth","isBatching","batchingMaxWaitTimeMins","batchingMinSize","createPrsForTestingBranches","enqueuedPullRequests"]}}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` + +## POST /updateQueue + +> Update the merge queue. + +```json +{"openapi":"3.1.0","info":{"title":"Trunk APIs","version":"1.0.0"},"servers":[{"url":"https://api.trunk.io/v1"}],"security":[{"ApiKeyAuth":[]}],"components":{"securitySchemes":{"ApiKeyAuth":{"type":"apiKey","in":"header","name":"x-api-token"}}},"paths":{"/updateQueue":{"post":{"summary":"Update the merge queue.","requestBody":{"content":{"application/json":{"schema":{"type":"object","properties":{"repo":{"type":"object","properties":{"host":{"type":"string"},"owner":{"type":"string"},"name":{"type":"string"}},"required":["host","owner","name"]},"targetBranch":{"type":"string","description":"The branch that the merge queue is targeting."},"state":{"type":"string","enum":["RUNNING","PAUSED","DRAINING"],"description":"The desired state of the merge queue. Valid values: RUNNING, PAUSED, DRAINING."},"concurrency":{"type":"integer","minimum":1,"maximum":4294967295,"description":"The number of PRs or batches of PRs the queue can test at once."},"bisectionConcurrency":{"type":"integer","minimum":1,"maximum":4294967295,"description":"The number of tests the merge queue can run when bisecting a batch to figure out what PR in the batch failed."},"testingTimeoutMinutes":{"type":"integer","minimum":1,"maximum":4294967295,"description":"The maximum number of minutes the merge queue will wait for tests to complete before timing out."},"pendingFailureDepth":{"type":"integer","minimum":1,"maximum":4294967295,"description":"When enabled, PRs that fail tests will wait for the specified number of PRs below them to finish testing before getting kicked from the queue. This works best with optimistic merging enabled."},"canOptimisticallyMerge":{"type":"boolean","description":"When enabled, a PR that passes tests will also cause any PR ahead of it in the queue to also get marked as passing, since tests have passed with those commits."},"batch":{"type":"boolean","description":"Enable or disable batching. When enabled, the merge queue will group PRs into batches for testing."},"batchingMaxWaitTimeMinutes":{"type":"integer","minimum":1,"maximum":4294967295,"description":"The maximum number of minutes the merge queue will wait to collect PRs into a batch before starting tests."},"batchingMinSize":{"type":"integer","minimum":1,"maximum":4294967295,"description":"The minimum number of PRs required to form a batch."},"mode":{"type":"string","enum":["single","parallel"],"description":"The queue mode. 'single' processes PRs one at a time. 'parallel' processes multiple PRs concurrently."},"commentsEnabled":{"type":"boolean","description":"Whether or not Merge Queue will post GitHub comments on PRs."},"commandsEnabled":{"type":"boolean","description":"Whether or not users are allowed to submit PRs to the merge queue by commenting `/trunk merge`."},"createPrsForTestingBranches":{"type":"boolean","description":"Whether or not the merge queue will create PRs for its testing branches, allowing CI to run on them."},"directMergeMode":{"type":"string","enum":["OFF","ALWAYS"],"description":"Allow PRs to merge directly into the target branch if they're up to date with the target branch when submitting them to the queue instead of running tests on them in the merge queue."},"optimizationMode":{"type":"string","enum":["OFF","BISECTION_SKIP_REDUNDANT_TESTS"],"description":"The optimization strategy for the merge queue. 'OFF' disables optimizations. 'BISECTION_SKIP_REDUNDANT_TESTS' uses bisection and skips redundant tests."},"mergeMethod":{"type":"string","enum":["MERGE_COMMIT","SQUASH","REBASE"],"description":"The Git merge method used when merging PRs into the target branch. Valid values: MERGE_COMMIT, SQUASH, REBASE."},"statusCheckEnabled":{"type":"boolean","description":"Post a GitHub status check on PRs with the status of the PR in the merge queue."},"requiredStatuses":{"type":"array","items":{"type":"string"},"description":"Allows setting the statuses that must pass when the merge queue performs tests in order for a PR to merge. Setting the statuses here will override GitHub branch protection settings or your `.trunk/trunk.yaml`."},"deleteRequiredStatuses":{"type":"boolean","description":"Removes a manually specified set of required statuses. After this, the statuses that must pass when the merge queue performs testing will be pulled from either GitHub branch protection settings or your `.trunk/trunk.yaml`."}},"required":["repo","targetBranch"]}}}},"responses":{"200":{"description":"OK","content":{"application/json":{}}},"400":{"description":"Bad Request","content":{"application/plain-text":{"schema":{"type":"string"}}}},"401":{"description":"Unauthorized","content":{"application/plain-text":{"schema":{"type":"string"}}}},"404":{"description":"Not Found","content":{"application/plain-text":{"schema":{"type":"string"}}}},"500":{"description":"Internal Server Error","content":{"application/plain-text":{"schema":{"type":"string"}}}}}}}}} +``` ## Related resources -* [CLI reference](./merge-queue-cli-reference) — Command-line interface for merge queue operations -* [Metrics and monitoring](../administration/metrics) — Dashboard analytics and Prometheus endpoint details -* [Webhooks](../webhooks) — Event-driven notifications for queue activity -* [Settings and configurations](../administration/advanced-settings) — Queue settings available in the Trunk web app -* [Authentication](../../setup-and-administration/apis/#authentication) — API token setup and management +* [CLI reference](/merge-queue/reference/merge-queue-cli-reference) — Command-line interface for merge queue operations +* [Metrics and monitoring](/merge-queue/administration/metrics) — Dashboard analytics and Prometheus endpoint details +* [Webhooks](/merge-queue/webhooks) — Event-driven notifications for queue activity +* [Settings and configurations](/merge-queue/administration/advanced-settings) — Queue settings available in the Trunk web app +* [Authentication](/setup-and-administration/apis#authentication) — API token setup and management diff --git a/merge-queue/reference/troubleshooting.mdx b/merge-queue/reference/troubleshooting.mdx index d15a788..2647a27 100644 --- a/merge-queue/reference/troubleshooting.mdx +++ b/merge-queue/reference/troubleshooting.mdx @@ -1,48 +1,59 @@ --- title: "Troubleshooting" -description: "Common Trunk Merge Queue issues and how to fix them, including permission errors, stuck PRs, and missing status checks." +description: "If your test PR doesn't merge automatically:" --- - #### Troubleshooting common issues -Visit [Trunk Support](../../setup-and-administration/support) for additional assistance or to contact the support team. +Visit [Trunk Support](/setup-and-administration/support) for additional assistance or to contact the support team. If your test PR doesn't merge automatically: * **Check the status comments for the PR in** the [Trunk Dashboard](https://app.trunk.io/) to see what it's waiting for -* **Stuck in "Queued"**: Usually means branch protection rules haven't passed (missing required status checks or code review) or there are merge conflicts. If the status looks correct but the PR still won't enter the queue, try [removing](/broken/pages/c7O7hgOoGwFcANCdzUMZ#manually-restarting-failed-prs) and re-adding by commenting `/trunk merge` again on the PR. +* **Stuck in "Queued"**: Usually means branch protection rules haven't passed (missing required status checks or code review) or there are merge conflicts. If the status looks correct but the PR still won't enter the queue, try [removing](/merge-queue/using-the-queue/reference) and re-adding by commenting `/trunk merge` again on the PR. * **Fails when attempting to merge**: Check that squash merges are enabled for your repository in GitHub settings (`Settings > General > Allow squash merging`). Trunk Merge Queue requires squash merges to be enabled. -* **"Permission denied" errors**: Review the [Branch Protection](/broken/pages/zvDo6oVz6lP1OOz5wOUB#configure-branch-protection-rules) guide to make sure `trunk-temp/*` and `trunk-merge/*` branches aren't protected by wildcard rules like `*/*`. -* **Status checks not running**: Verify your CI is configured to run on draft PRs (or `trunk-merge/**` branches if using push-triggered mode). See the [Branch Protection](/broken/pages/zvDo6oVz6lP1OOz5wOUB#configure-branch-protection-rules) guide for details. +* **"Permission denied" errors**: Review the [Branch Protection](/merge-queue/getting-started/configure-branch-protection) guide to ensure `trunk-temp/*` and `trunk-merge/*` branches aren't protected by wildcard rules like `*/*`. +* **Status checks not running**: Verify your CI is configured to run on draft PRs (or `trunk-merge/**` branches if using push-triggered mode). See the [Branch Protection](/merge-queue/getting-started/configure-branch-protection) guide for details. + +### ### Troubleshooting common issues - - +
+ +"Permission denied on trunk-merge/* branch" + **Cause:** Branch protection rules are applying to Trunk's temporary branches. -**Solution:** Follow the "Exclude Trunk's Temporary Branches" section above to make sure `trunk-temp/*` and `trunk-merge/*` are not protected. - +**Solution:** Follow the "Exclude Trunk's Temporary Branches" section above to ensure `trunk-temp/*` and `trunk-merge/*` are not protected. + +
+ +
+ +Pull request stuck as "Queued" in the queue - **Cause:** Required status checks are not completing or not configured correctly. **Solution:** * Click on the pull request in the Trunk Dashboard to see which checks it's waiting for * Verify those checks are running in your CI provider -* If using Push-triggered mode, make sure the check names in `trunk.yaml` exactly match your CI job names - +* If using Push-triggered mode, ensure the check names in `trunk.yaml` exactly match your CI job names + +
+ +
+ +Required status checks not running - **If using Draft PR mode:** Verify your CI workflows are triggered by pull requests (including draft pull requests). **If using Push-triggered mode:** * Verify your CI workflows trigger on pushes to `trunk-merge/**` branches * Check that the workflows actually ran in your CI provider's interface -* Make sure the `trunk-io` bot has permission to push to create these branches - - +* Ensure the `trunk-io` bot has permission to push to create these branches + +
diff --git a/merge-queue/using-the-queue/index.mdx b/merge-queue/using-the-queue.mdx similarity index 62% rename from merge-queue/using-the-queue/index.mdx rename to merge-queue/using-the-queue.mdx index f322759..59e6f6d 100644 --- a/merge-queue/using-the-queue/index.mdx +++ b/merge-queue/using-the-queue.mdx @@ -6,18 +6,21 @@ These pages cover the daily operations that developers perform when working with ### Submit and manage pull requests -[**Submit and cancel pull requests**](./reference)\ +[**Submit and cancel pull requests**](/merge-queue/using-the-queue/reference)\ How to add PRs to the queue via GitHub comments, CLI, or UI, and remove them when needed. +[**Work with stacked pull requests**](/merge-queue/using-the-queue/stacked-pull-requests)\ +Manage dependent PRs that build on each other. + ### Monitor and troubleshoot -[**Monitor queue status**](./monitor-queue-status)\ +[**Monitor queue status**](/merge-queue/using-the-queue/monitor-queue-status)\ Track your PR's progress through the queue in real-time via dashboard or CLI. -[**Handle failed pull requests**](./handle-failed-pull-requests)\ +[**Handle failed pull requests**](/merge-queue/using-the-queue/handle-failed-pull-requests)\ Diagnose failures, retry flaky tests, fix issues, and resubmit. ### Emergency procedures -[**Emergency pull requests**](./#emergency-procedures)\ +[**Emergency pull requests**](#emergency-procedures)\ Bypass the queue for critical production fixes (use sparingly). diff --git a/merge-queue/using-the-queue/emergency-pull-requests.mdx b/merge-queue/using-the-queue/emergency-pull-requests.mdx index 3746411..a6027e5 100644 --- a/merge-queue/using-the-queue/emergency-pull-requests.mdx +++ b/merge-queue/using-the-queue/emergency-pull-requests.mdx @@ -1,12 +1,12 @@ --- title: "Emergency pull requests" -description: "Bypass the merge queue entirely for true emergencies. Use with caution as this can invalidate in-progress queue tests." +description: "Emergency merges bypass the queue entirely and push directly to your main branch. This is the most disruptive action you can take and should be reserved for true emergencies only." --- Emergency merges bypass the queue entirely and push directly to your main branch. This is the **most disruptive action** you can take and should be reserved for true emergencies only. - + **Warning:** Emergency merges bypass all safety checks. Use sparingly. - +
### **Emergency bypass** @@ -14,14 +14,10 @@ If you need to completely bypass the merge queue, you can merge the PR directly #### **Recommended approach** -Use [PR Prioritization](../optimizations/priority-merging) to fast-track your PR through the queue while still validating it: +Use [PR Prioritization](/merge-queue/optimizations/priority-merging) to fast-track your PR through the queue while still validating it: ``` /trunk merge --priority=urgent ``` The `urgent` priority is the only level that will interrupt currently testing PRs. Your PR will immediately begin testing, and other PRs will restart after yours completes. - -#### See also - -If the blocker is branch protection (not the queue) — for example, a broken required status check — admins can use [Force merge](./force-merge) instead. Force merge still goes through the queue and still tests the PR; only the branch protection gate is bypassed at merge time. diff --git a/merge-queue/using-the-queue/force-merge.mdx b/merge-queue/using-the-queue/force-merge.mdx deleted file mode 100644 index 15e88e0..0000000 --- a/merge-queue/using-the-queue/force-merge.mdx +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: "Force merge" -description: "Admins can push a pull request through Merge Queue even when GitHub branch protection rules aren't satisfied. The PR is still tested; only the final merge bypasses protection." ---- -### What it is - -Force merge lets a GitHub repository admin push a pull request through the Trunk Merge Queue even when branch protection requirements are not satisfied. The PR is still tested by the queue exactly like any other PR — only the final protection gate is bypassed at merge time, using the [Trunk Sudo GitHub App](../../setup-and-administration/trunk-sudo-app). - - -**Force merge is admin-only and can only be triggered by a GitHub comment.** Trunk verifies admin identity via GitHub comment authorship, which is why other submission paths (CLI, checkbox, web app) don't support `--force`. If a non-admin posts `/trunk merge --force`, Trunk will reply on the PR with a rejection comment explaining that the command requires admin access. - - -### Why use it - -* **Unblock misconfigured protection.** Ship a PR when a required status check is broken or misconfigured, without disabling the rule for everyone else. -* **Merge emergency fixes safely.** You still get queue validation — predictive testing, batching, failure detection — instead of merging directly to `main` and hoping for the best. -* **Avoid direct-to-`main` bypass.** Force merge is strictly safer than pushing to the protected branch manually, because the PR is fully tested before it lands. - -### Prerequisites - -Before you can use force merge, make sure you have: - -* [ ] [Trunk Sudo GitHub App](../../setup-and-administration/trunk-sudo-app) installed and configured for this repository -* [ ] GitHub admin access on the repository - -### How to use it - -#### Via GitHub comment - -On any pull request, post: - -``` -/trunk merge --force -``` - -There is no CLI, checkbox, or web app equivalent. This is intentional: Trunk verifies admin identity through GitHub comment authorship, so the command is only accepted through the PR comment flow. - -### What happens step by step - -1. **Admission.** The PR enters the queue despite branch protection not being satisfied. Normally Trunk Merge Queue waits until GitHub marks a PR as ready to merge; `--force` skips that wait. -2. **Testing.** The PR is tested normally. Batching, ordering, priority, and failure handling all behave exactly as they would for any other PR — nothing about the testing pipeline changes. -3. **Merge.** If tests pass, Trunk Sudo merges the PR, bypassing branch protection. Without Trunk Sudo installed and configured, this step will fail. -4. **Failure.** If tests fail, the PR is handled like any normal queue failure. See [Handle failed pull requests](./handle-failed-pull-requests). - -After a PR is force-merged, the PR shows a **Forced** badge in the Merge Queue dashboard PR list so you can identify which merges bypassed branch protection. - -### Combining with other flags - -Force merge can be combined with other `/trunk merge` flags. The most common combination is with [priority](../optimizations/priority-merging) when both urgency and protection bypass are needed — for example: - -``` -/trunk merge --force --priority=urgent -``` - -### Tradeoffs and considerations - -#### What you gain - -* **Queue validation is preserved** — tests still run before merge. -* **No direct-to-`main` push** — the PR goes through the same merge flow as every other PR. -* **Unblock stuck PRs** without weakening your default branch protection for everyone. - -#### What you give up - -* **Bypasses the human review gate** if required reviews aren't satisfied. -* **Bypasses required status checks** that would otherwise block the merge. -* Because force merge bypasses protections that every other PR must satisfy, overuse erodes the value of those protections. - -#### When NOT to use force merge - -* **Normal feature work.** If a PR is going to merge eventually, let it wait for reviews and checks. -* **"The required check is slow."** Fix the check or the CI configuration — force merge is not a substitute for unbreaking your pipeline. -* **Non-admin urgency.** If you aren't an admin, don't ask an admin to force merge your PR — escalate via the usual incident or on-call process. - -### Common misconceptions - -* **Misconception:** "Force merge skips testing." - * **Reality:** Tests still run normally. The PR goes through the full merge queue testing pipeline — only the branch protection gate is bypassed at merge time. -* **Misconception:** "I can force merge through the CLI." - * **Reality:** Force merge is comment-only and admin-only. The CLI, web app checkbox, and "Retry" button don't accept `--force`. -* **Misconception:** "Force merge is the same as emergency pull requests." - * **Reality:** [Emergency pull requests](./emergency-pull-requests) bypass the queue entirely and push directly to your merge branch. Force merge still goes through the queue and still tests the PR — it only bypasses branch protection at merge time. - -### Visibility in the queue dashboard - -Force-merged pull requests appear with a **Forced** badge in the merge queue PR list. This lets you identify which PRs bypassed branch protection at a glance without opening each PR individually. - -### Next steps - -* [Trunk Sudo GitHub App](../../setup-and-administration/trunk-sudo-app) — install and configure the app that powers force merge. -* [Emergency pull requests](./emergency-pull-requests) — when the queue itself needs to be bypassed, not just branch protection. -* [Priority merging](../optimizations/priority-merging) — fast-track a PR without bypassing any rules. diff --git a/merge-queue/using-the-queue/handle-failed-pull-requests.mdx b/merge-queue/using-the-queue/handle-failed-pull-requests.mdx index 01c7c21..31e2d21 100644 --- a/merge-queue/using-the-queue/handle-failed-pull-requests.mdx +++ b/merge-queue/using-the-queue/handle-failed-pull-requests.mdx @@ -1,6 +1,6 @@ --- title: "Handle failed pull requests" -description: "Understand why PRs fail in the merge queue and how to fix and resubmit them." +description: "When a PR fails in the merge queue, it's automatically removed so it doesn't block other PRs. Understanding why it failed helps you fix it quickly." --- ### Understanding failures @@ -49,30 +49,24 @@ The PR Details panel has a dropdown "**Actions"** menu, where you can: 1. **Restart tests.** Use this to manually restart testing of this PR. 2. **Remove from queue**. If the PR is "Queued", then it will cancel it, preventing it from going into the queue until it is re-queued. If the PR is currently in the queue, it will be removed from the queue, which will restart all PRs that depended on it. -Trunk Merge Queue will automatically restart failed PRs when it can under certain conditions (see PR states). Since the restart is usually from a failed PR being removed from the queue, other PRs behind it will also be restarted. If you want to manually restart a PR, you can restart it _**in place**_ by clicking the **Details** link in the Failures summary screen to open the merge details screen. Then, click the **Actions** dropdown, and select **Restart** +Trunk Merge Queue will automatically restart failed PRs when it can under certain conditions (see PR states). Since the restart is usually from a failed PR being removed from the queue, other PRs behind it will also be restarted. If you want to manually restart a PR, you can restart it ***in place*** by clicking the **Details** link in the Failures summary screen to open the merge details screen. Then, click the **Actions** dropdown, and select **Restart** - -![](/assets/merge-pr-details-action.png) - +
There are a couple of reasons you might want to manually retry a PR. First, if a PR ends up in the `PENDING_FAILURE` state because of something transient like a CI runner disconnecting or flakey tests, you can retry the PR right away instead of waiting for PRs in front of it to pass or fail. Another reason to restart a PR is if the proper tests don't get kicked off due to a failure in the CI system. For example, if GitHub has an outage and is not triggering workflows or actions properly. -*** +--- ## Failures A tabulated view of all the items that have failed in the Merge Queue, e.g. due to testing. - -![](/assets/merge-failures.png) - +
### Retry failed pull requests When a PR has been dropped from the queue, you can manually retry the PR by clicking the **Details** link in the Failures summary screen to open the merge details screen. Then, click the **Actions** dropdown, and select **Retry** - -![](/assets/merge-failure-retry.png) - +

Re-queue a PR if it is currently not in the queue that has failed or been cancelled.

diff --git a/merge-queue/using-the-queue/monitor-queue-status.mdx b/merge-queue/using-the-queue/monitor-queue-status.mdx index 4980c43..43791b7 100644 --- a/merge-queue/using-the-queue/monitor-queue-status.mdx +++ b/merge-queue/using-the-queue/monitor-queue-status.mdx @@ -1,6 +1,6 @@ --- title: "Monitor queue status" -description: "View real-time queue activity, PR status, and test results in the Trunk Merge Queue dashboard." +description: "The Trunk Merge Queue dashboard gives you real-time visibility into your queue's activity." --- ### Access the Merge Queue dashboard @@ -11,48 +11,18 @@ The Trunk Merge Queue dashboard gives you real-time visibility into your queue's 1. **Navigate to Trunk:** [https://app.trunk.io](https://app.trunk.io/) 2. **Select your organization** (if you're in multiple) 3. **Click** the **Merge Queue** tab in the upper left -4. Select your repository from the selector in the page header +4. Select your repository **Quick access from GitHub:** * Trunk bot comments include dashboard links * Click any link in bot comments to go directly to that PR's status -#### GitHub status check - -When enabled, Trunk posts a check on your PR that reflects the current queue state. The check is named `Trunk Merge Queue ()`, for example `Trunk Merge Queue (main)` for a queue on `main`. A repository with multiple queues will have a separate check for each branch. - -The check is posted once the PR is admitted to the queue and updates in place as it moves through: - -* **Queued** - PR has been admitted to the queue and is waiting to test -* **Testing** - PR is actively being tested -* **Merged** - Successfully merged into the base branch -* **Cancelled** - PR was removed from the queue without merging -* **Failed** - Tests failed and the PR could not merge - -Once the PR reaches a terminal state (Merged, Cancelled, or Failed), the check remains on the commit in that final state. - -Click the **Details** link on the check to go directly to the Trunk dashboard for that PR. - -To enable GitHub status checks, go to [**GitHub Statuses**](../../merge-queue/administration/advanced-settings#github-statuses) in **Settings** > **Repositories** > your repository > **Merge Queue**. This is a per-queue setting and is enabled by default. - -### Repository selector - -The selector in the page header lets you switch between merge queues without leaving the dashboard. - -**Starred repositories:** Click the star icon next to a repository in the selector list to star it. Starred repositories always appear at the top of the list, regardless of organization. - -**Organization grouping:** Unstarred repositories are grouped by GitHub organization. Repositories not associated with a GitHub organization appear under **Other repositories**. - -**Search:** Type to filter the repository list by name. Search also matches branch names, making it easy to find the right queue when multiple repositories share a branch. - ### Queue overview The main dashboard shows a high-level view of your merge queue activity. - -![](/assets/merge-queue-screen.png) - +

Clicking on a queue item navigates you to the details page.

### Queue view @@ -97,20 +67,11 @@ PR nodes in the graph view display a priority badge when the PR was queued with PRs queued at the default medium priority or at low priority do not display a badge, keeping the graph view clean. -For details on setting priority levels, see [Priority merging](../../merge-queue/optimizations/priority-merging). - -#### Impacted targets in the graph - -When running in [Parallel mode](../optimizations/parallel-queues/), the graph view surfaces impacted targets data to help you understand why PRs are grouped or ordered the way they are. - -* **Per-PR tooltips**: Hover over a PR node to see which targets that PR impacts. -* **Overlapping targets on edges**: Use the **Show targets on hover** toggle to display which targets overlap between connected PRs. Overlapping targets explain why PRs are in the same testing sequence: PRs with shared targets must be tested together. - -This is useful for debugging unexpected queue ordering or understanding why specific PRs are batched together. +For details on setting priority levels, see [Priority merging](/merge-queue/optimizations/priority-merging). ### Health view -Select a period of time to inspect using the **Period** dropdown (default 7 days) and a **Granularity** (defaults to daily) of queue metrics +Select a period of time to inspect using the **Period** dropdown (default 7 days) and a **Granularity** (defaults to daily) of queue metrics #### Conclusion counts @@ -120,8 +81,6 @@ A Bar chart of PRs and their statuses. More Green = More Merges! View statical trends of PR time in queue, default p50 view is useful for an "Average time in queue" evaluation. - - ## Pull request details The PR details show information about a PR, including a link to the PR in GitHub, the history of the PR within Trunk Merge Queue, and what must be done before a PR can be admitted to the queue for PRs that have not entered the queue yet. @@ -132,16 +91,6 @@ When a PR has not been admitted to the queue yet, Trunk Merge Queue waits for: 2. The PR to be mergeable according to GitHub. If the PR is not mergeable yet, this most likely means that the PR is not meeting all branch protection rules you have set (for example, not all required status checks have passed yet) or has a merge conflict with the target branch 3. The target branch of the pull request to match the branch that merge queue merges into - -![](/assets/merge-details_(1).png) - - -In the screenshot above, the PR has been submitted to Merge but has not yet been added to the queue. It will be added once all of the branch protection rules pass and there are no merge conflicts with the target branch. - -### View impacted targets - -In [Parallel mode](../optimizations/parallel-queues/), the PR detail page includes a **View Impacted Targets** button when targets have been uploaded for the PR. The modal shows one of three states: +

PR readiness details for a PR that has been submitted but has not yet entered the merge queue.

-* `IMPACTS_ALL`: The PR depends on everything in the queue, and every PR submitted after this one will depend on it. No parallel optimization is possible. -* **Specific targets**: A list of each target the PR impacts. -* **None (empty list)**: An empty list of targets was uploaded. The PR will not depend on anything, and nothing will depend on it. +In the screenshot above, the PR has been submitted to Merge but has not yet been added to the queue. It will be added once all of the branch protection rules pass and there are no merge conflics with the target branch. diff --git a/merge-queue/using-the-queue/reference.mdx b/merge-queue/using-the-queue/reference.mdx index 23d7237..3dcf6dd 100644 --- a/merge-queue/using-the-queue/reference.mdx +++ b/merge-queue/using-the-queue/reference.mdx @@ -16,16 +16,12 @@ trunk login trunk merge ``` -Admins can also use [`/trunk merge --force`](./force-merge) to push a PR through the queue when branch protection isn't satisfied. - We offer similar commands for cancellation. * Posting a GitHub comment `/trunk cancel` on a pull request. * Cancellation from the WebApp: - -![](/assets/image_(35).png) - +
* Using the `trunk` CLI: @@ -34,50 +30,21 @@ trunk login trunk merge cancel ``` -## Custom merge commit titles - -You can specify a custom merge commit title for any PR by adding a `merge-commit-title:` directive on its own line anywhere in the PR body: - -``` -merge-commit-title: Your Custom Commit Title Here -``` - -When Trunk merges the PR, it uses this title instead of the default GitHub-generated title. When the directive is not present, the default behavior is preserved. - -The directive name is case-sensitive. It must be lowercase `merge-commit-title:`. Variations such as `Merge-Commit-Title:` are not recognized. - -This is useful for teams that follow conventional commit formats, include ticket numbers in merge commits, or want a cleaner git history. - -### Example - -```markdown -## Description -This PR adds user authentication. - -merge-commit-title: feat(auth): add OAuth2 login flow [PROJ-123] -``` - - -The `merge-commit-title:` directive only customizes the merge commit **title**. The commit body follows the usual behavior for your configured [merge method](../administration/advanced-settings#merge-method). - -The directive applies to the **Squash** and **Merge Commit** merge methods. It has no effect when using **Rebase**, since rebase replays the original commits onto the target branch and does not produce a separate merge commit. - - ## Pull request processing -Once a PR is submitted to the merge queue it goes through several states. First, it starts as _Queued_ until all of the required conditions to submit it are met. Once ready, the PR moves to the _Pending_ state, waiting for a Merge Queue to pick it up, and then enters the _Testing_ state. Once the tests pass the PR may still need to wait for upstream PRs. Once any upstream PRs are complete the PR will be merged and then removed from the Merge Queue. If a PR fails or is canceled then it will go to the failed or canceled state. +Once a PR is submitted to the merge queue it goes through several states. First, it starts as *Queued* until all of the required conditions to submit it are met. Once ready, the PR moves to the *Pending* state, waiting for a Merge Queue to pick it up, and then enters the *Testing* state. Once the tests pass the PR may still need to wait for upstream PRs. Once any upstream PRs are complete the PR will be merged and then removed from the Merge Queue. If a PR fails or is canceled then it will go to the failed or canceled state. ## Pull request states A PR's lifecycle in the Merge Queue goes through the following states: -| State | Description | -| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| Queued | The PR was submitted to Trunk Merge Queue, but the PR isn't eligible for merging yet. Impacted targets may not be uploaded, or readiness checks may not have passed. | -| Pending | The MergeGraph created a node for the PR. Testing will begin if the graph has capacity. | -| Testing | The PR is testing. Required status checks that Trunk Merge Queue must gate on before merging PRs can be specified with in `.trunk/trunk.yaml` or through GitHub branch protection rules as the "Status checks that are required" before merging on your merge branch | -| Tests Passed | The PR successfully passed tests. It may have to wait for upstream PRs to complete tests before merging. | -| Pending Failure | The PR failed tests. The cause of failures is still indeterminate - it may be due to an upstream PR, or due to the current PR. It will wait until the root cause of tests has been determined, and restart testing on your PR if due to an upstream PR. If you want to manually restart a failed PR, see [manually restarting PRs](./handle-failed-pull-requests#manually-restarting-failed-pull-requests). | -| Merged | The PR successfully merged into the target branch. It will be removed from the queue. | -| Failed | The PR caused a testing failure. It will be removed from the queue. | -| Cancelled | The PR was cancelled, e.g. `/trunk cancel`. It will be removed from the queue. | +| State | Description | +| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Queued | The PR was submitted to Trunk Merge Queue, but the PR isn't eligible for merging yet. Impacted targets may not be uploaded, or readiness checks may not have passed. | +| Pending | The MergeGraph created a node for the PR. Testing will begin if the graph has capacity. | +| Testing | The PR is testing. Required status checks that Trunk Merge Queue must gate on before merging PRs can be specified with in `.trunk/trunk.yaml` or through GitHub branch protection rules as the "Status checks that are required" before merging on your merge branch | +| Tests Passed | The PR successfully passed tests. It may have to wait for upstream PRs to complete tests before merging. | +| Pending Failure | The PR failed tests. The cause of failures is still indeterminate - it may be due to an upstream PR, or due to the current PR. It will wait until the root cause of tests has been determined, and restart testing on your PR if due to an upstream PR. If you want to manually restart a failed PR, see [manually restarting PRs](/merge-queue/using-the-queue/handle-failed-pull-requests#manually-restarting-failed-pull-requests). | +| Merged | The PR successfully merged into the target branch. It will be removed from the queue. | +| Failed | The PR caused a testing failure. It will be removed from the queue. | +| Cancelled | The PR was cancelled, e.g. `/trunk cancel`. It will be removed from the queue. | diff --git a/merge-queue/using-the-queue/stacked-pull-requests.mdx b/merge-queue/using-the-queue/stacked-pull-requests.mdx index 7652733..9114856 100644 --- a/merge-queue/using-the-queue/stacked-pull-requests.mdx +++ b/merge-queue/using-the-queue/stacked-pull-requests.mdx @@ -1,178 +1,65 @@ --- -title: "Stacked pull requests" -description: "Merge a chain of dependent pull requests through Trunk Merge Queue, either as a single combined unit with /trunk stack or one at a time with /trunk merge." +title: "Work with stacked pull requests" +description: "Yes, Trunk Merge Queue fully supports stacked pull requests. You can use stacked PR workflows with your preferred tooling (GitHub CLI, web interface, or third-party apps)." --- -Trunk Merge Queue supports merging **stacked pull requests**: a chain of PRs where each one builds on the previous, with the bottom PR targeting your merge queue branch. Trunk gives you two ways to get a stack merged: +### How it works -* [`/trunk stack`](#merge-the-stack-as-one-unit): combine the entire stack into a single PR that moves through the queue as one unit. Faster, and requires the [Trunk Sudo GitHub App](../../setup-and-administration/trunk-sudo-app). -* [`/trunk merge`](#enqueue-each-pr-individually) on each PR: enqueue each PR in the stack separately and let Trunk process them sequentially. Slower but gives you per-PR test isolation, and has no additional setup requirements. +Trunk Merge Queue determines PR dependencies by examining each pull request's **base branch** (the branch it will merge into, shown under the PR title on GitHub). -## What is a stack +* If a PR's base branch is your main branch (e.g., `main`), it's ready to process immediately +* If a PR's base branch is another feature branch (indicating it's part of a stack), Merge Queue will wait until that base branch changes to your main branch before processing -A stack is a chain of pull requests connected through their base branches. Each PR targets the branch of the PR below it, and the bottom PR targets your merge queue branch: +### Merging stacked PRs -``` -merge queue branch <-- PR #1 (base: merge queue branch) - <-- PR #2 (base: PR #1's branch) - <-- PR #3 (base: PR #2's branch) -``` +#### Step 1: Enqueue all PRs in your stack -Trunk discovers the stack automatically by walking base branches. No separate configuration is required to mark PRs as stacked. +Each PR in the stack must be enqueued separately. You can: -## Choose your approach +* Comment `/trunk merge` on each PR +* Check the box in the Trunk comment on each PR +* Use the CLI: `trunk merge ` for each PR -| | `/trunk stack` (combined) | `/trunk merge` (individual) | -| --- | --- | --- | -| **Enqueue method** | Single `/trunk stack` comment on any PR | `/trunk merge` on each PR separately | -| **Queue processing** | One stacked PR tests and merges as a unit | Each PR tests and merges sequentially | -| **Test runs** | One test run for the combined changes | One test run per PR | -| **Speed** | Faster: one pass through the queue | Slower: each PR waits for the previous one | -| **Isolation** | Less: all changes test together | More: each PR tests against the actual merge queue branch | -| **On merge** | Member PRs closed automatically | Each PR merges individually | -| **Requirements** | [Trunk Sudo GitHub App](../../setup-and-administration/trunk-sudo-app) installed and configured | No additional setup | +**Why enqueue separately?** Each PR is an independent merge operation in the queue. This gives you control over which PRs in your stack should be merged versus which might need more work. -## Merge the stack as one unit +#### Step 2: Automatic sequential processing -Comment `/trunk stack` on any PR in the stack and Trunk combines every PR in the chain into a single stacked PR that moves through the merge queue together. That stacked PR tests and merges like any other PR in the queue; batching, priority, and failure handling all behave normally. When the stacked PR merges, Trunk automatically closes every member PR, since its code is already in your target branch. +Once enqueued, Trunk handles the rest automatically: -### Prerequisites +1. The **first PR** in the stack (base branch = `main`) enters the queue, runs tests, and merges +2. When it merges, **GitHub automatically updates** the next PR's base branch from the previous feature branch to `main` +3. The **second PR** now has `main` as its base, so it proceeds through the queue +4. This continues until all PRs in the stack are merged -Trunk Sudo is required because the stacked PR Trunk creates is brand-new and auto-generated, so it doesn't inherit the approvals or required status checks that have already been satisfied on the member PRs. Trunk Sudo merges the stacked PR on the strength of those member PRs by bypassing its branch protection. +**Example:** For a stack of 5 PRs: -* [ ] [Trunk Sudo GitHub App](../../setup-and-administration/trunk-sudo-app) installed on your repository. If it isn't installed, `/trunk stack` will fail with an error linking you to the install page. -* [ ] Trunk Sudo configured to bypass branch protection on your target branch. Required status checks must live in a [GitHub ruleset](../../setup-and-administration/trunk-sudo-app#option-a-github-rulesets-recommended) (not classic branch protection) with Trunk Sudo listed as an exempt bypass actor. -* [ ] You have write access to the repository, or you're a member of your repository's Trunk organization. - -### Stack requirements - -For `/trunk stack` to succeed, the stack must satisfy: - -* The chain terminates at your merge queue branch (e.g., `main`). -* The stack contains **2 to 10 PRs**. -* Every member PR is **open**: not closed, not a draft, not already merged. -* No member PR already belongs to another active stacked PR group. - -If any requirement fails, Trunk rejects the command with a message listing the specific problems (e.g., `#42: is a draft PR`). - -### Command syntax - -Comment on any PR in the stack: - -``` -/trunk stack [--title "Custom title"] [-p ] [--no-batch] -``` - -| Option | Short | Description | -| --- | --- | --- | -| `--title` | `-t` | Custom title for the stacked PR. Defaults to the title of the **topmost** PR in the stack (the one furthest from the merge queue branch). | -| `--priority` | `-p` | [Priority level](../optimizations/priority-merging) for the stacked PR in the merge queue. | -| `--no-batch` | | Prevent the stacked PR from being [batched](../optimizations/batching) with other items in the queue. | - -**Examples:** - -``` -/trunk stack -/trunk stack --title "Feature: user authentication" -/trunk stack -t "Refactor auth module" -p 1 -/trunk stack --no-batch -``` - -### What happens when you run `/trunk stack` - -1. **Permissions check**: Trunk verifies you have write access to the repository or are a Trunk organization member. -2. **Prerequisite check**: Trunk verifies the Trunk Sudo GitHub App is installed and branch protection is configured. -3. **Stack discovery**: Trunk walks the base branch chain from your PR down to the merge queue branch, collecting all PRs. -4. **Validation**: Trunk checks every member PR is open, not a draft, and not already in another stack. If anything fails, the command is rejected. -5. **Temporary branch**: Trunk creates a branch at the current HEAD of your merge queue branch, named `trunk-stack/`. -6. **Sequential merge**: Trunk merges each member PR into the temporary branch starting from the PR closest to the merge queue branch. If any merge conflicts occur, the operation fails and you'll need to resolve conflicts before retrying. -7. **Stacked PR creation**: Trunk opens a new pull request from `trunk-stack/` to your target branch. -8. **Enqueue**: The stacked PR is automatically added to the merge queue. -9. **Status comments**: Trunk comments on each member PR to note that a stacked PR has been created and is queued. - -Trunk reacts to your `/trunk stack` comment with a 👍 on success, or a 👎 plus an explanation on failure. - -### The stacked PR - -The stacked PR is a real GitHub pull request containing the combined changes from every member PR. - -* **Title**: If you passed `--title`, that title is used exactly. Otherwise it defaults to the title of the topmost PR in the stack. -* **Body**: Auto-generated. Includes the list of member PRs, the target branch, and an explanation of how the stacked PR works (merge behavior, automatic closure of member PRs, cancellation triggers). -* **Branch**: `trunk-stack/`. Managed entirely by Trunk and cleaned up automatically when the stacked PR merges or is cancelled. - -### Lifecycle - -#### On successful merge - -1. Each member PR is automatically closed by Trunk, since its code is already in the target branch via the stacked PR. -2. Trunk posts a comment on each member PR: "Stacked PR #N merged successfully. Closing this PR as its code has been merged into ``." -3. The `trunk-stack/` branch is deleted. - -#### On test failure - -1. Trunk posts a comment on each member PR: "Stacked PR #N failed testing in the merge queue. Please investigate the failure and re-submit the stack." -2. The stacked PR is closed on GitHub. -3. The `trunk-stack/` branch is deleted. - -Fix the issue in the relevant member PR, then run `/trunk stack` again to create a fresh stacked PR. - -#### On cancellation - -The stacked PR is automatically cancelled if any of the following happens: - -| Trigger | What it means | -| --- | --- | -| **Member PR pushed to** | A new commit is pushed to any member PR's branch. | -| **Member PR closed** | Any member PR is closed without merging. | -| **Member PR merged independently** | Any member PR is merged outside of the stacked PR workflow. | -| **Member PR base branch changed** | The base branch of any member PR is changed. | -| **Stacked PR closed** | The stacked PR itself is closed manually. | -| **Stacked PR pushed to** | The stacked PR's branch is pushed to directly. | -| **User cancelled** | Someone runs `/trunk cancel` on the stacked PR. | - -When cancellation occurs, Trunk posts a comment on each member PR explaining the reason (e.g., "Stacked PR #N was cancelled: a member PR was pushed to.") and closes the stacked PR. To re-stack, run `/trunk stack` again on any member PR. This creates a fresh stacked PR with the latest state of all member PRs. - -### Why `/trunk stack` creates a separate PR - -An obvious-sounding shortcut would be to test the stack against `main`, then fast-forward `main` to the top of the stack once tests pass. That doesn't work. - -Trunk Merge Queue uses [predictive testing](../optimizations/predictive-testing): every PR is tested against the projected future state of the target branch, not its current state, and multiple PRs test concurrently against different speculative merge states. By the time your stack finishes testing, the actual target branch tip has almost certainly advanced past where your stack was based: one or more PRs ahead in the queue have merged. You can't fast-forward past those intermediate merges, and pushing over them would skip testing against the new tip: exactly the stale-results blind spot the queue exists to eliminate. - -Wrapping the stack in a synthetic PR hands it off to the same predictive-testing machinery every other PR uses. The queue tests it against the current projected future state and produces a real merge commit incorporating both the stack's changes and anything that landed ahead of it. Everything else falls out for free: batching, priority, optimistic merging, failure handling, `/trunk cancel`, and a real PR visible in GitHub's history. - -## Enqueue each PR individually - -If you prefer per-PR test isolation, or you don't want to install Trunk Sudo, you can enqueue each PR in the stack separately with `/trunk merge`. Trunk processes the PRs sequentially, testing and merging each one against the actual state of your merge queue branch. - -### Step 1: Enqueue every PR in the stack +* PR #1 (base: `main`) → tests → merges +* PR #2's base automatically changes from PR #1's branch to `main` → tests → merges +* PR #3's base automatically changes from PR #2's branch to `main` → tests → merges +* And so on... -Each PR in the stack must be enqueued separately. Use any of the standard submission methods on every PR: +--- -* Comment `/trunk merge` on each PR. -* Check the box in the Trunk comment on each PR. -* Use the CLI: `trunk merge ` for each PR. +### Important considerations -Enqueuing each PR separately gives you control over which PRs in your stack should be merged versus which might need more work. +#### Sequential testing -### Step 2: Automatic sequential processing +PRs in a stack are tested and merged **one at a time** in order. The second PR won't begin testing until the first PR has fully merged. This ensures: -Once enqueued, Trunk handles the rest: +* Each PR is tested against the actual state of your main branch +* No conflicts arise from dependencies +* Test results are deterministic and reliable -1. The **bottom PR** in the stack (base branch = your merge queue branch) enters the queue, runs tests, and merges. -2. When it merges, **GitHub automatically updates** the next PR's base branch from the previous feature branch to your merge queue branch. -3. The **next PR** now targets the merge queue branch, so it proceeds through the queue. -4. This continues until every PR in the stack is merged. +**Tradeoff:** This sequential approach means that a stack of 5 PRs will take longer to merge than 5 independent PRs, since they cannot be tested in parallel. However, it provides the safest merge path for dependent changes. -For example, a stack of 5 PRs with merge queue branch `main`: +#### Enqueued PRs with non-main base branches -* PR #1 (base: `main`) → tests → merges -* PR #2's base automatically changes to `main` → tests → merges -* PR #3's base automatically changes to `main` → tests → merges -* …and so on. +If you enqueue a PR whose base branch is not your main branch and that base never changes to main, the PR will remain in the queue without processing. This typically happens if: -### Considerations +* The parent PR in the stack was not enqueued or merged +* You're testing queue behavior with a non-standard workflow -**Sequential testing.** PRs in the stack are tested and merged one at a time in order. The second PR won't begin testing until the first PR has fully merged. This ensures each PR is tested against the actual state of your merge queue branch and results are deterministic, at the cost of speed. A stack of 5 PRs takes substantially longer than 5 independent PRs, since they can't be tested in parallel. +The PR will begin processing as soon as its base branch updates to your main branch. -**Enqueued PRs with non-merge-branch bases.** If you enqueue a PR whose base branch is not your merge queue branch and that base never updates, the PR stays in the queue without processing. This typically means the parent PR in the stack was never enqueued or merged. The PR will begin processing as soon as its base branch updates to the merge queue branch. +### Configuration -**No special configuration.** Individual enqueuing requires no additional setup beyond a functioning merge queue. Trunk detects the stack relationship automatically from each PR's base branch. +No special configuration is required. Trunk Merge Queue automatically detects stacked relationships based on the base branch field in GitHub. diff --git a/merge-queue/webhooks.mdx b/merge-queue/webhooks.mdx index 5127099..01bc341 100644 --- a/merge-queue/webhooks.mdx +++ b/merge-queue/webhooks.mdx @@ -1,20 +1,15 @@ --- -title: "Merge Queue webhooks" -description: "Merge Queue > Webhooks" +title: "Webhooks" +description: "Merge Queue Webhooks" --- Trunk provides a variety of webhooks to allow responding to various events from Trunk. Each event corresponds to a Trunk feature and an action within that feature (for example, a Pull Request being submitted to Trunk Merge). ### Supported events -Trunk provides various webhooks to respond to events from Trunk Flaky Tests. Flaky Tests events are named with a `pull_request` prefix, you can find the events you can respond to in the Webhook Events reference from Svix. +Trunk provides various webhooks to respond to events from Trunk Flaky Tests. Flaky Test events are named with a `pull_request` prefix, you can find the events you can respond to in the Webhook Events reference from Svix. - - www.svix.com + +Open the referenced resource in a new tab. You can learn about the Svix event catalog in the [Svix docs](https://docs.svix.com/receiving/using-app-portal/event-catalog). diff --git a/openapi.json b/openapi.json deleted file mode 100644 index 598ae8d..0000000 --- a/openapi.json +++ /dev/null @@ -1,6416 +0,0 @@ -{ - "openapi": "3.1.0", - "servers": [ - { - "url": "https://api.trunk.io/v1" - } - ], - "info": { - "title": "Trunk APIs", - "version": "1.0.0", - "license": { - "name": "UNLICENSED" - } - }, - "paths": { - "/submitPullRequest": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "summary": "Submit a pull request to a merge queue.", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "type": "object", - "properties": { - "host": { - "type": "string" - }, - "owner": { - "type": "string" - }, - "name": { - "type": "string" - } - }, - "required": [ - "host", - "owner", - "name" - ] - }, - "pr": { - "type": "object", - "properties": { - "number": { - "type": "integer", - "minimum": 0, - "maximum": 4294967295 - } - }, - "required": [ - "number" - ] - }, - "targetBranch": { - "type": "string" - }, - "priority": { - "anyOf": [ - { - "type": "integer", - "minimum": 0, - "maximum": 4294967295 - }, - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "noBatch": { - "type": "boolean" - } - }, - "required": [ - "repo", - "pr", - "targetBranch" - ] - } - } - } - }, - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": {} - } - }, - "400": { - "description": "Bad Request", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "404": { - "description": "Not Found", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "500": { - "description": "Internal Server Error", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - }, - "/cancelPullRequest": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "summary": "Cancel a pull request in a merge queue.", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "type": "object", - "properties": { - "host": { - "type": "string" - }, - "owner": { - "type": "string" - }, - "name": { - "type": "string" - } - }, - "required": [ - "host", - "owner", - "name" - ] - }, - "pr": { - "type": "object", - "properties": { - "number": { - "type": "integer", - "minimum": 0, - "maximum": 4294967295 - } - }, - "required": [ - "number" - ] - }, - "targetBranch": { - "type": "string" - } - }, - "required": [ - "repo", - "pr", - "targetBranch" - ] - } - } - } - }, - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": {} - } - }, - "400": { - "description": "Bad Request", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "404": { - "description": "Not Found", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "500": { - "description": "Internal Server Error", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - }, - "/restartTestsOnPullRequest": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "summary": "Restart tests on a pull request in a merge queue.", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "type": "object", - "properties": { - "host": { - "type": "string" - }, - "owner": { - "type": "string" - }, - "name": { - "type": "string" - } - }, - "required": [ - "host", - "owner", - "name" - ] - }, - "pr": { - "type": "object", - "properties": { - "number": { - "type": "integer", - "minimum": 0, - "maximum": 4294967295 - } - }, - "required": [ - "number" - ] - }, - "targetBranch": { - "type": "string" - } - }, - "required": [ - "repo", - "pr", - "targetBranch" - ] - } - } - } - }, - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": {} - } - }, - "400": { - "description": "Bad Request", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "404": { - "description": "Not Found", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "500": { - "description": "Internal Server Error", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - }, - "/getSubmittedPullRequest": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "summary": "Get a submitted pull request from a merge queue.", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "type": "object", - "properties": { - "host": { - "type": "string" - }, - "owner": { - "type": "string" - }, - "name": { - "type": "string" - } - }, - "required": [ - "host", - "owner", - "name" - ] - }, - "pr": { - "type": "object", - "properties": { - "number": { - "type": "integer", - "minimum": 0, - "maximum": 4294967295 - } - }, - "required": [ - "number" - ] - }, - "targetBranch": { - "type": "string" - } - }, - "required": [ - "repo", - "pr", - "targetBranch" - ] - } - } - } - }, - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "state": { - "type": "string", - "enum": [ - "not_ready", - "pending", - "testing", - "tests_passed", - "merged", - "failed", - "cancelled", - "pending_failure" - ], - "description": "The state of a pull request in the merge queue. See https://docs.trunk.io/merge-queue/using-the-queue/reference#pull-request-states for the full description of each state." - }, - "readiness": { - "type": "object", - "properties": { - "hasImpactedTargets": { - "type": "boolean", - "description": "Whether the set of impacted build/test targets for this PR has been reported." - }, - "requiresImpactedTargets": { - "type": "boolean", - "description": "Whether the queue is configured to require impacted-target reporting before a PR can start testing." - }, - "doesBaseBranchMatch": { - "type": "boolean", - "description": "Whether the PR's base branch matches the queue's target branch." - }, - "gitHubMergeability": { - "type": "string", - "enum": [ - "unspecified", - "in_progress", - "mergeable", - "not_mergeable" - ], - "description": "GitHub's mergeability state for the PR (cached). `unspecified` — not yet known. `in_progress` — GitHub is still computing mergeability. `mergeable` — GitHub reports the PR can be merged. `not_mergeable` — GitHub reports the PR cannot be merged (e.g. merge conflict)." - } - }, - "required": [ - "requiresImpactedTargets", - "doesBaseBranchMatch", - "gitHubMergeability" - ], - "description": "Readiness signals for a pull request in the merge queue. A PR can start testing when `doesBaseBranchMatch` is true, `gitHubMergeability` is `mergeable`, and (when `requiresImpactedTargets` is true) `hasImpactedTargets` is true." - }, - "stateChangedAt": { - "type": "string" - }, - "priorityValue": { - "type": "number" - }, - "priorityName": { - "type": "string" - }, - "usedDefaultPriorityName": { - "type": "string" - }, - "skipTheLine": { - "type": "boolean" - }, - "forceEnqueued": { - "type": "boolean" - }, - "isCurrentlySubmittedToQueue": { - "type": "boolean" - }, - "prNumber": { - "type": "number" - }, - "prTitle": { - "type": "string" - }, - "prSha": { - "type": "string" - }, - "prBaseBranch": { - "type": "string" - }, - "prAuthor": { - "type": "string" - } - }, - "required": [ - "stateChangedAt", - "priorityValue", - "priorityName", - "skipTheLine", - "forceEnqueued", - "isCurrentlySubmittedToQueue", - "prNumber", - "prTitle", - "prSha", - "prBaseBranch", - "prAuthor" - ] - } - } - } - }, - "400": { - "description": "Bad Request", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "404": { - "description": "Not Found", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "500": { - "description": "Internal Server Error", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - }, - "/listPullRequests": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "summary": "List pull requests in a merge queue.", - "description": "Returns a paginated list of pull requests in the merge queue. Optionally filter by state and/or by a time range for concluded pull requests.", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The host of the repository (e.g., github.com).", - "examples": [ - "github.com" - ] - }, - "owner": { - "type": "string", - "description": "The owner of the repository.", - "examples": [ - "trunk-io" - ] - }, - "name": { - "type": "string", - "description": "The name of the repository.", - "examples": [ - "trunk" - ] - } - }, - "required": [ - "host", - "owner", - "name" - ] - }, - "targetBranch": { - "type": "string", - "description": "The target branch of the merge queue.", - "examples": [ - "main" - ] - }, - "state": { - "type": "string", - "enum": [ - "not_ready", - "pending", - "testing", - "tests_passed", - "merged", - "failed", - "cancelled", - "pending_failure" - ], - "description": "Optional filter for the state of pull requests. If not provided, pull requests of all states will be returned." - }, - "since": { - "type": "string", - "format": "date-time", - "description": "Optional ISO 8601 timestamp. When provided, returns pull requests that concluded (merged, failed, or cancelled) since this time.", - "examples": [ - "2024-01-01T00:00:00Z" - ] - }, - "cursor": { - "type": "string", - "format": "uuid", - "description": "Optional cursor for pagination. Use the nextCursor from the previous response." - }, - "take": { - "type": "integer", - "minimum": 1, - "maximum": 100, - "default": 50, - "description": "Number of pull requests to return (1-100). Defaults to 50." - } - }, - "required": [ - "repo", - "targetBranch" - ] - } - } - } - }, - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "pullRequests": { - "type": "array", - "items": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "state": { - "type": "string", - "enum": [ - "not_ready", - "pending", - "testing", - "tests_passed", - "merged", - "failed", - "cancelled", - "pending_failure" - ], - "description": "The state of a pull request in the merge queue. See https://docs.trunk.io/merge-queue/using-the-queue/reference#pull-request-states for the full description of each state." - }, - "stateChangedAt": { - "type": "string" - }, - "priorityValue": { - "type": "number" - }, - "priorityName": { - "type": "string" - }, - "usedDefaultPriorityName": { - "type": "string" - }, - "skipTheLine": { - "type": "boolean" - }, - "noBatch": { - "type": "boolean" - }, - "prNumber": { - "type": "number" - }, - "prTitle": { - "type": "string" - }, - "prSha": { - "type": "string" - }, - "prBaseBranch": { - "type": "string" - }, - "prAuthor": { - "type": "string" - } - }, - "required": [ - "stateChangedAt", - "priorityValue", - "priorityName", - "skipTheLine", - "noBatch", - "prNumber", - "prTitle", - "prSha", - "prBaseBranch", - "prAuthor" - ] - }, - "description": "List of pull requests in the merge queue." - }, - "nextCursor": { - "type": "string", - "format": "uuid", - "description": "Cursor for the next page of results. If absent, there are no more results." - } - }, - "required": [ - "pullRequests" - ] - } - } - } - }, - "400": { - "description": "Bad Request", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "404": { - "description": "Not Found", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "500": { - "description": "Internal Server Error", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - }, - "/getQueue": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "summary": "Get the merge queue.", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "type": "object", - "properties": { - "host": { - "type": "string" - }, - "owner": { - "type": "string" - }, - "name": { - "type": "string" - } - }, - "required": [ - "host", - "owner", - "name" - ] - }, - "targetBranch": { - "type": "string" - } - }, - "required": [ - "repo", - "targetBranch" - ] - } - } - } - }, - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "running", - "paused", - "draining", - "switching_modes" - ], - "description": "The state of the merge queue. See https://docs.trunk.io/merge-queue/administration/advanced-settings#merge-queue-state for the full description of each state." - }, - "branch": { - "type": "string" - }, - "concurrency": { - "type": "number" - }, - "testingTimeoutMinutes": { - "type": "number" - }, - "mode": { - "type": "string", - "enum": [ - "single", - "parallel" - ] - }, - "canOptimisticallyMerge": { - "type": "boolean" - }, - "pendingFailureDepth": { - "type": "number" - }, - "batch": { - "type": "boolean" - }, - "batchingMaxWaitTimeMinutes": { - "type": "number" - }, - "batchingMinSize": { - "type": "number" - }, - "createPrsForTestingBranches": { - "type": "boolean" - }, - "commentsEnabled": { - "type": "boolean" - }, - "commandsEnabled": { - "type": "boolean" - }, - "statusCheckEnabled": { - "type": "boolean" - }, - "bisectionConcurrency": { - "type": "number" - }, - "requiredStatuses": { - "type": "array", - "items": { - "type": "string" - } - }, - "directMergeMode": { - "type": "string", - "enum": [ - "off", - "always" - ], - "description": "Controls whether PRs can skip the queue's test run and merge directly when already up to date with the target branch. See https://docs.trunk.io/merge-queue/administration/advanced-settings#direct-merge-to-main for details." - }, - "optimizationMode": { - "type": "string", - "enum": [ - "off", - "bisection_skip_redundant_tests" - ], - "description": "The optimization strategy for the merge queue. `off` — no optimizations. See https://docs.trunk.io/merge-queue/optimizations/batching#test-caching-during-bisection for details on `bisection_skip_redundant_tests`." - }, - "mergeMethod": { - "type": "string", - "enum": [ - "merge_commit", - "squash", - "rebase" - ], - "description": "The Git strategy used to merge a PR into the target branch. See https://docs.trunk.io/merge-queue/administration/advanced-settings#merge-method for details." - }, - "enqueuedPullRequests": { - "type": "array", - "items": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "state": { - "type": "string", - "enum": [ - "not_ready", - "pending", - "testing", - "tests_passed", - "merged", - "failed", - "cancelled", - "pending_failure" - ], - "description": "The state of a pull request in the merge queue. See https://docs.trunk.io/merge-queue/using-the-queue/reference#pull-request-states for the full description of each state." - }, - "stateChangedAt": { - "type": "string" - }, - "priorityValue": { - "type": "number" - }, - "priorityName": { - "type": "string" - }, - "usedDefaultPriorityName": { - "type": "string" - }, - "skipTheLine": { - "type": "boolean" - }, - "noBatch": { - "type": "boolean" - }, - "prNumber": { - "type": "number" - }, - "prTitle": { - "type": "string" - }, - "prSha": { - "type": "string" - }, - "prBaseBranch": { - "type": "string" - }, - "prAuthor": { - "type": "string" - } - }, - "required": [ - "stateChangedAt", - "priorityValue", - "priorityName", - "skipTheLine", - "noBatch", - "prNumber", - "prTitle", - "prSha", - "prBaseBranch", - "prAuthor" - ] - } - } - }, - "required": [ - "state", - "branch", - "concurrency", - "testingTimeoutMinutes", - "mode", - "canOptimisticallyMerge", - "pendingFailureDepth", - "batch", - "batchingMaxWaitTimeMinutes", - "batchingMinSize", - "createPrsForTestingBranches", - "commentsEnabled", - "commandsEnabled", - "statusCheckEnabled", - "bisectionConcurrency", - "requiredStatuses", - "directMergeMode", - "optimizationMode", - "mergeMethod", - "enqueuedPullRequests" - ] - } - } - } - }, - "400": { - "description": "Bad Request", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "404": { - "description": "Not Found", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "500": { - "description": "Internal Server Error", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - }, - "/updateQueue": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "summary": "Update the merge queue.", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "type": "object", - "properties": { - "host": { - "type": "string" - }, - "owner": { - "type": "string" - }, - "name": { - "type": "string" - } - }, - "required": [ - "host", - "owner", - "name" - ] - }, - "targetBranch": { - "type": "string", - "description": "The branch that the merge queue is targeting." - }, - "state": { - "type": "string", - "enum": [ - "running", - "paused", - "draining" - ], - "description": "The desired state of the merge queue. Valid values: RUNNING, PAUSED, DRAINING." - }, - "concurrency": { - "type": "integer", - "minimum": 1, - "maximum": 4294967295, - "description": "The number of PRs or batches of PRs the queue can test at once." - }, - "bisectionConcurrency": { - "type": "integer", - "minimum": 1, - "maximum": 4294967295, - "description": "The number of tests the merge queue can run when bisecting a batch to figure out what PR in the batch failed." - }, - "testingTimeoutMinutes": { - "type": "integer", - "minimum": 1, - "maximum": 4294967295, - "description": "The maximum number of minutes the merge queue will wait for tests to complete before timing out." - }, - "pendingFailureDepth": { - "type": "integer", - "minimum": 1, - "maximum": 4294967295, - "description": "When enabled, PRs that fail tests will wait for the specified number of PRs below them to finish testing before getting kicked from the queue. This works best with optimistic merging enabled." - }, - "canOptimisticallyMerge": { - "type": "boolean", - "description": "When enabled, a PR that passes tests will also cause any PR ahead of it in the queue to also get marked as passing, since tests have passed with those commits." - }, - "batch": { - "type": "boolean", - "description": "Enable or disable batching. When enabled, the merge queue will group PRs into batches for testing." - }, - "batchingMaxWaitTimeMinutes": { - "type": "integer", - "minimum": 1, - "maximum": 4294967295, - "description": "The maximum number of minutes the merge queue will wait to collect PRs into a batch before starting tests." - }, - "batchingMinSize": { - "type": "integer", - "minimum": 1, - "maximum": 4294967295, - "description": "The minimum number of PRs required to form a batch." - }, - "mode": { - "type": "string", - "enum": [ - "single", - "parallel" - ], - "description": "The queue mode. 'single' processes PRs one at a time. 'parallel' processes multiple PRs concurrently." - }, - "commentsEnabled": { - "type": "boolean", - "description": "Whether or not Merge Queue will post GitHub comments on PRs." - }, - "commandsEnabled": { - "type": "boolean", - "description": "Whether or not users are allowed to submit PRs to the merge queue by commenting `/trunk merge`." - }, - "createPrsForTestingBranches": { - "type": "boolean", - "description": "Whether or not the merge queue will create PRs for its testing branches, allowing CI to run on them." - }, - "directMergeMode": { - "type": "string", - "enum": [ - "off", - "always" - ], - "description": "Allow PRs to merge directly into the target branch if they're up to date with the target branch when submitting them to the queue instead of running tests on them in the merge queue." - }, - "optimizationMode": { - "type": "string", - "enum": [ - "off", - "bisection_skip_redundant_tests" - ], - "description": "The optimization strategy for the merge queue. 'off' disables optimizations. 'bisection_skip_redundant_tests' uses bisection and skips redundant tests." - }, - "mergeMethod": { - "type": "string", - "enum": [ - "merge_commit", - "squash", - "rebase" - ], - "description": "The Git merge method used when merging PRs into the target branch. Valid values: merge_commit, squash, rebase." - }, - "statusCheckEnabled": { - "type": "boolean", - "description": "Post a GitHub status check on PRs with the status of the PR in the merge queue." - }, - "requiredStatuses": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Allows setting the statuses that must pass when the merge queue performs tests in order for a PR to merge. Setting the statuses here will override GitHub branch protection settings or your `.trunk/trunk.yaml`." - }, - "deleteRequiredStatuses": { - "type": "boolean", - "description": "Removes a manually specified set of required statuses. After this, the statuses that must pass when the merge queue performs testing will be pulled from either GitHub branch protection settings or your `.trunk/trunk.yaml`." - } - }, - "required": [ - "repo", - "targetBranch" - ] - } - } - } - }, - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "running", - "paused", - "draining", - "switching_modes" - ], - "description": "The state of the merge queue. See https://docs.trunk.io/merge-queue/administration/advanced-settings#merge-queue-state for the full description of each state." - }, - "branch": { - "type": "string" - }, - "concurrency": { - "type": "number" - }, - "testingTimeoutMinutes": { - "type": "number" - }, - "mode": { - "type": "string", - "enum": [ - "single", - "parallel" - ] - }, - "canOptimisticallyMerge": { - "type": "boolean" - }, - "pendingFailureDepth": { - "type": "number" - }, - "batch": { - "type": "boolean" - }, - "batchingMaxWaitTimeMinutes": { - "type": "number" - }, - "batchingMinSize": { - "type": "number" - }, - "createPrsForTestingBranches": { - "type": "boolean" - }, - "commentsEnabled": { - "type": "boolean" - }, - "commandsEnabled": { - "type": "boolean" - }, - "statusCheckEnabled": { - "type": "boolean" - }, - "bisectionConcurrency": { - "type": "number" - }, - "requiredStatuses": { - "type": "array", - "items": { - "type": "string" - } - }, - "directMergeMode": { - "type": "string", - "enum": [ - "off", - "always" - ], - "description": "Controls whether PRs can skip the queue's test run and merge directly when already up to date with the target branch. See https://docs.trunk.io/merge-queue/administration/advanced-settings#direct-merge-to-main for details." - }, - "optimizationMode": { - "type": "string", - "enum": [ - "off", - "bisection_skip_redundant_tests" - ], - "description": "The optimization strategy for the merge queue. `off` — no optimizations. See https://docs.trunk.io/merge-queue/optimizations/batching#test-caching-during-bisection for details on `bisection_skip_redundant_tests`." - }, - "mergeMethod": { - "type": "string", - "enum": [ - "merge_commit", - "squash", - "rebase" - ], - "description": "The Git strategy used to merge a PR into the target branch. See https://docs.trunk.io/merge-queue/administration/advanced-settings#merge-method for details." - } - }, - "required": [ - "state", - "branch", - "concurrency", - "testingTimeoutMinutes", - "mode", - "canOptimisticallyMerge", - "pendingFailureDepth", - "batch", - "batchingMaxWaitTimeMinutes", - "batchingMinSize", - "createPrsForTestingBranches", - "commentsEnabled", - "commandsEnabled", - "statusCheckEnabled", - "bisectionConcurrency", - "requiredStatuses", - "directMergeMode", - "optimizationMode", - "mergeMethod" - ] - } - } - } - }, - "400": { - "description": "Bad Request", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "404": { - "description": "Not Found", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "500": { - "description": "Internal Server Error", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - }, - "/createQueue": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "summary": "Create a new merge queue.", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "type": "object", - "properties": { - "host": { - "type": "string" - }, - "owner": { - "type": "string" - }, - "name": { - "type": "string" - } - }, - "required": [ - "host", - "owner", - "name" - ] - }, - "targetBranch": { - "type": "string" - }, - "mode": { - "type": "string", - "enum": [ - "single", - "parallel" - ], - "description": "The queue processing mode. See https://docs.trunk.io/merge-queue/administration/advanced-settings#merge-queue-mode for details." - }, - "concurrency": { - "type": "integer", - "minimum": 1, - "maximum": 4294967295 - } - }, - "required": [ - "repo", - "targetBranch" - ] - } - } - } - }, - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": {} - } - }, - "400": { - "description": "Bad Request", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "404": { - "description": "Not Found", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "500": { - "description": "Internal Server Error", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - }, - "/deleteQueue": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "summary": "Delete the specified merge queue. The queue must be empty in order to be deleted.", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "type": "object", - "properties": { - "host": { - "type": "string" - }, - "owner": { - "type": "string" - }, - "name": { - "type": "string" - } - }, - "required": [ - "host", - "owner", - "name" - ] - }, - "targetBranch": { - "type": "string" - } - }, - "required": [ - "repo", - "targetBranch" - ] - } - } - } - }, - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": {} - } - }, - "400": { - "description": "Bad Request", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "404": { - "description": "Not Found", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "500": { - "description": "Internal Server Error", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - }, - "/getMergeQueueTestingDetails": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "summary": "Get details about testing that Merge Queue is performing", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The host of the repository (e.g. `github.com`)." - }, - "owner": { - "type": "string", - "description": "The owner of the repository (e.g. `my-org`)." - }, - "name": { - "type": "string", - "description": "The name of the repository (e.g. `my-repo`)." - } - }, - "required": [ - "host", - "owner", - "name" - ], - "description": "The repository whose merge queue is running the test run." - }, - "testRunId": { - "type": "string" - }, - "targetBranch": { - "type": "string", - "description": "The target branch of the merge queue that is running the test run (e.g. `main`)." - } - }, - "required": [ - "repo", - "testRunId", - "targetBranch" - ] - } - } - } - }, - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "requiredStatuses": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The list of status check names that must all pass for the test run to succeed and its PRs to merge." - }, - "requiredStatusesSource": { - "type": "string", - "enum": [ - "trunk_config", - "repo_provider_branch_protection", - "merge_instance" - ], - "description": "Where the merge queue sourced the list of required statuses from. `trunk_config` — from the repository's `.trunk/trunk.yaml`. `repo_provider_branch_protection` — from the Git provider's branch protection settings (e.g. GitHub branch protection rules). `merge_instance` — from the merge queue's own configuration (e.g. set via the API)." - }, - "testBranch": { - "type": "string", - "description": "The name of the temporary branch the merge queue created to run tests against (e.g. `trunk-merge/pr-1815/5df78918-...`)." - }, - "testBranchSha": { - "type": "string", - "description": "The commit SHA at the tip of the test branch that the test run was started on." - }, - "createdAt": { - "type": "string", - "description": "ISO 8601 timestamp of when the test run was created." - }, - "status": { - "type": "string", - "enum": [ - "in_progress", - "failed", - "cancelled", - "succeeded" - ], - "description": "The status of a merge queue test run. `in_progress` — tests are currently running. `succeeded` — all required statuses passed. `failed` — at least one required status failed. `cancelled` — the test run was cancelled before completion." - }, - "checkSuites": { - "type": "array", - "items": { - "type": "object", - "properties": { - "checkRuns": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "The name of the GitHub check run (e.g. `build`, `lint`)." - }, - "url": { - "type": "string", - "description": "URL to the check run on GitHub." - }, - "status": { - "type": "string", - "enum": [ - "QUEUED", - "IN_PROGRESS", - "COMPLETED" - ], - "description": "The status of a GitHub check run. Mirrors GitHub's check run status values." - }, - "conclusion": { - "type": "string", - "enum": [ - "ACTION_REQUIRED", - "CANCELLED", - "FAILURE", - "NEUTRAL", - "SUCCESS", - "SKIPPED", - "STALE", - "TIMED_OUT" - ], - "description": "The conclusion of a completed GitHub check run. Mirrors GitHub's check run conclusion values; only set once `status` is `COMPLETED`." - } - }, - "required": [ - "name", - "url" - ] - }, - "description": "The individual check runs that make up this check suite." - } - }, - "required": [ - "checkRuns" - ] - }, - "description": "GitHub check suites reported against the test branch." - }, - "statusChecks": { - "type": "array", - "items": { - "type": "object", - "properties": { - "context": { - "type": "string", - "description": "The context (name) of the status check as posted to GitHub (e.g. `ci/lint`)." - }, - "url": { - "type": "string", - "description": "URL with more details about this status check, if any." - }, - "state": { - "type": "string", - "enum": [ - "ERROR", - "FAILURE", - "PENDING", - "SUCCESS" - ], - "description": "The state of a GitHub commit status check. Mirrors GitHub's commit status state values." - } - }, - "required": [ - "context" - ] - }, - "description": "GitHub commit status checks reported against the test branch SHA." - }, - "testedPullRequests": { - "type": "array", - "items": { - "type": "object", - "properties": { - "prNumber": { - "type": "number", - "description": "The pull request number on the Git provider." - }, - "prUrl": { - "type": "string", - "description": "URL of the pull request on the Git provider." - }, - "title": { - "type": "string", - "description": "The title of the pull request." - } - }, - "required": [ - "prNumber", - "prUrl", - "title" - ] - }, - "description": "The pull requests that this test run is testing. Contains multiple entries when batching is enabled." - }, - "impactedTargets": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The union of impacted build/test targets across all PRs involved in the test run, including any dependent PRs previously merged into the test branch. Only present when impacted targets are being uploaded for the repository." - }, - "impactedTargetsForTestedPrs": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The impacted build/test targets for only the PRs being tested in this run (i.e. `testedPullRequests`), excluding any dependent PRs. Only present when impacted targets are being uploaded for the repository." - }, - "dependentPrs": { - "type": "array", - "items": { - "type": "number" - }, - "description": "PR numbers of other PRs this test run depends on but is not itself testing. Populated in parallel-mode queues when previously-passed PRs ahead of the tested PR are merged into the test branch to form the predicted base." - } - }, - "required": [ - "requiredStatuses", - "testBranch", - "testBranchSha", - "checkSuites", - "statusChecks", - "testedPullRequests" - ] - } - } - } - }, - "400": { - "description": "Bad Request", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "404": { - "description": "Not Found", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "500": { - "description": "Internal Server Error", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - }, - "/getMergeQueueMetrics": { - "get": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "summary": "Get Prometheus-format metrics for a merge queue", - "responses": { - "200": { - "description": "Prometheus-format metrics", - "content": { - "text/plain": { - "schema": { - "type": "string" - } - } - } - }, - "400": { - "description": "Bad Request", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "404": { - "description": "Not Found", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "500": { - "description": "Internal Server Error", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - }, - "/setImpactedTargets": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "summary": "Set impacted targets for a pull request.", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "type": "object", - "properties": { - "host": { - "type": "string" - }, - "owner": { - "type": "string" - }, - "name": { - "type": "string" - } - }, - "required": [ - "host", - "owner", - "name" - ] - }, - "pr": { - "type": "object", - "properties": { - "number": { - "type": "integer", - "minimum": 1, - "maximum": 4294967295 - }, - "sha": { - "type": "string" - } - }, - "required": [ - "number", - "sha" - ] - }, - "targetBranch": { - "type": "string" - }, - "impactedTargets": { - "anyOf": [ - { - "type": "array", - "items": { - "type": "string" - } - }, - { - "type": "string", - "const": "ALL" - } - ] - } - }, - "required": [ - "repo", - "pr", - "targetBranch", - "impactedTargets" - ] - } - } - } - }, - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": {} - } - }, - "400": { - "description": "Bad Request", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "404": { - "description": "Not Found", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "500": { - "description": "Internal Server Error", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - }, - "/flaky-ci/create-ci-run": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "summary": "[alpha] Record a CI run", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "type": "object", - "properties": { - "host": { - "type": "string" - }, - "owner": { - "type": "string" - }, - "name": { - "type": "string" - } - }, - "required": [ - "host", - "owner", - "name" - ] - }, - "org_url_slug": { - "type": "string" - }, - "ci_job_name": { - "type": "string" - }, - "provider": { - "type": "string", - "enum": [ - "github", - "jenkins", - "circleci", - "semaphore", - "travis", - "webappio", - "codebuild", - "bitbucket", - "azure", - "gitlab", - "drone", - "buildkite" - ] - }, - "run_start": { - "type": "string", - "format": "date-time" - }, - "run_end": { - "type": "string", - "format": "date-time" - }, - "branch": { - "type": "string" - }, - "sha": { - "type": "string" - }, - "attempt": { - "type": "integer", - "minimum": 0, - "maximum": 4294967295 - }, - "run_conclusion": { - "type": "string", - "enum": [ - "success", - "failure", - "skipped", - "cancelled" - ] - }, - "run_conclusion_cause": { - "type": "string", - "enum": [ - "test", - "ci" - ] - }, - "providers_job_id": { - "type": "string" - }, - "providers_run_id": { - "type": "string" - } - }, - "required": [ - "repo", - "org_url_slug", - "ci_job_name", - "run_start", - "run_end", - "branch", - "sha", - "attempt" - ] - } - } - } - }, - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "run_id": { - "type": "string" - } - }, - "required": [ - "run_id" - ] - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - }, - "/flaky-tests/list-quarantined-tests": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "summary": "Get a list of quarantined tests", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The host of the repository. For example, `github.com` or `gitlab.com`. If self-hosting, this will be the hostname of your instance.", - "examples": [ - "github.com", - "gitlab.com", - "github-enterprise.my-org-tld.com", - "gitlab-enterprise.my-org-tld.com" - ] - }, - "owner": { - "type": "string", - "description": "The owner of the repository. For example, `my-github-org` or `my-gitlab-org/my/sub/group`. `owner` for GitLab will include the GitLab org, plus the project path, but excluding the repo name itself.", - "examples": [ - "my-github-org", - "my-gitlab-org/my/sub/group" - ] - }, - "name": { - "type": "string", - "description": "The name of the repository.", - "examples": [ - "my-repo" - ] - } - }, - "required": [ - "host", - "owner", - "name" - ], - "description": "The repository to list tests for." - }, - "org_url_slug": { - "type": "string", - "description": "The slug of your organization. Find this at https://app.trunk.io/trunk/settings under \"Organization Name\" > \"Slug\"", - "examples": [ - "my-trunk-org-slug" - ] - }, - "page_query": { - "type": "object", - "properties": { - "page_size": { - "type": "integer", - "minimum": 1, - "maximum": 100, - "description": "The number of tests to return per page." - }, - "page_token": { - "type": "string", - "description": "The page token to use for pagination. This is returned from the previous call to this endpoint. For the first page, this should be empty.", - "examples": [ - "" - ] - } - }, - "required": [ - "page_size" - ], - "description": "Pagination options for the list of tests." - } - }, - "required": [ - "repo", - "org_url_slug", - "page_query" - ] - } - } - } - }, - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "quarantined_tests": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "The name of the test case." - }, - "parent": { - "type": [ - "string", - "null" - ], - "description": "The parent of the test case." - }, - "file": { - "type": [ - "string", - "null" - ], - "description": "The file of the test case." - }, - "classname": { - "type": [ - "string", - "null" - ], - "description": "The class name of the test case." - }, - "status": { - "type": "string", - "enum": [ - "HEALTHY", - "FLAKY", - "BROKEN" - ], - "description": "The status of the test case." - }, - "codeowners": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The latest codeowners of the test case." - }, - "quarantine_setting": { - "type": "string", - "enum": [ - "ALWAYS_QUARANTINE", - "AUTO_QUARANTINE" - ], - "description": "The quarantine setting of the test case." - }, - "quarantined_at": { - "type": "string", - "format": "date-time", - "description": "The time at which the test case was quarantined, if applicable." - }, - "status_last_updated_at": { - "type": "string", - "format": "date-time", - "description": "The last time the status of the test case was updated." - }, - "test_case_id": { - "type": "string", - "description": "The ID of the test case. This value is unstable and should not be relied upon." - }, - "variant": { - "type": "string", - "description": "The variant of the test case." - } - }, - "required": [ - "name", - "parent", - "file", - "classname", - "status", - "codeowners", - "quarantine_setting", - "quarantined_at", - "status_last_updated_at", - "test_case_id", - "variant" - ], - "description": "A quarantined test case." - }, - "description": "A page of quarantined test cases." - }, - "page": { - "type": "object", - "properties": { - "total_rows": { - "type": "number", - "minimum": 0, - "description": "The total number of test cases in the paginated list." - }, - "total_pages": { - "type": "number", - "minimum": 0, - "description": "The total number of pages in the paginated list of test cases." - }, - "next_page_token": { - "type": "string", - "description": "The next page token to use for pagination. See `page_token` in the request for more information." - }, - "prev_page_token": { - "type": "string", - "description": "The previous page token to use for pagination. See `page_token` in the request for more information." - }, - "last_page_token": { - "type": "string", - "description": "The last page token to use for pagination. See `page_token` in the request for more information." - }, - "page_index": { - "type": "number", - "minimum": 0, - "description": "The index of the current page in the paginated list of test cases." - } - }, - "required": [ - "total_rows", - "total_pages", - "next_page_token", - "prev_page_token", - "last_page_token", - "page_index" - ], - "description": "Pagination information for the list of test cases." - } - }, - "required": [ - "quarantined_tests", - "page" - ] - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - }, - "/flaky-tests/list-unhealthy-tests": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "summary": "Get a list of unhealthy tests", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The host of the repository. For example, `github.com` or `gitlab.com`. If self-hosting, this will be the hostname of your instance.", - "examples": [ - "github.com", - "gitlab.com", - "github-enterprise.my-org-tld.com", - "gitlab-enterprise.my-org-tld.com" - ] - }, - "owner": { - "type": "string", - "description": "The owner of the repository. For example, `my-github-org` or `my-gitlab-org/my/sub/group`. `owner` for GitLab will include the GitLab org, plus the project path, but excluding the repo name itself.", - "examples": [ - "my-github-org", - "my-gitlab-org/my/sub/group" - ] - }, - "name": { - "type": "string", - "description": "The name of the repository.", - "examples": [ - "my-repo" - ] - } - }, - "required": [ - "host", - "owner", - "name" - ], - "description": "The repository to list tests for." - }, - "org_url_slug": { - "type": "string", - "description": "The slug of your organization. Find this at https://app.trunk.io/trunk/settings under \"Organization Name\" > \"Slug\"", - "examples": [ - "my-trunk-org-slug" - ] - }, - "page_query": { - "type": "object", - "properties": { - "page_size": { - "type": "integer", - "minimum": 1, - "maximum": 100, - "description": "The number of tests to return per page." - }, - "page_token": { - "type": "string", - "description": "The page token to use for pagination. This is returned from the previous call to this endpoint. For the first page, this should be empty.", - "examples": [ - "" - ] - } - }, - "required": [ - "page_size" - ], - "description": "Pagination options for the list of tests." - }, - "status": { - "type": "string", - "enum": [ - "FLAKY", - "BROKEN" - ], - "description": "The status filter for unhealthy tests." - } - }, - "required": [ - "repo", - "org_url_slug", - "page_query", - "status" - ] - } - } - } - }, - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "tests": { - "type": "array", - "items": { - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uuid", - "description": "A stable unique identifier for the test" - }, - "repository": { - "type": "object", - "properties": { - "html_url": { - "type": "string", - "format": "uri", - "description": "The URL of the repository" - } - }, - "required": [ - "html_url" - ] - }, - "html_url": { - "type": "string", - "format": "uri", - "description": "The URL of the test details" - }, - "name": { - "type": "string", - "description": "The name of the test" - }, - "variant": { - "type": "string", - "description": "The name of the test variant" - }, - "status": { - "type": "object", - "properties": { - "value": { - "type": "string", - "enum": [ - "healthy", - "flaky", - "broken" - ], - "description": "The current status value in lowercase" - }, - "reason": { - "type": "string", - "description": "The reason for the current status" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The timestamp of the current status change" - } - }, - "required": [ - "value", - "reason", - "timestamp" - ] - }, - "file_path": { - "type": "string", - "description": "The file path of the test" - }, - "parent": { - "type": "string", - "description": "The parent of the test. This includes the test suite (depending on the test runner)" - }, - "classname": { - "type": "string", - "description": "The class name of the test" - }, - "codeowners": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Code owners for the test" - }, - "pull_requests_impacted_last_7d": { - "type": "integer", - "minimum": 0, - "description": "The number of pull requests impacted in the last 7 days" - }, - "quarantined": { - "type": "boolean", - "description": "Whether the test is quarantined.\n\n This is `true` when quarantining is enabled for the repo and either of the following applies:\n\n - The quarantine override is set to `ALWAYS_QUARANTINE` for this test\n - Automatic quarantining is enabled for the repo, and this test's status is either `flaky` or `broken`\n\n If this is `true`, the next test run will be marked as passed even if the test run conclusion is\n failed." - }, - "ticket": { - "type": "object", - "properties": { - "html_url": { - "type": "string", - "format": "uri", - "description": "The URL of the associated ticket" - } - }, - "required": [ - "html_url" - ] - } - }, - "required": [ - "id", - "repository", - "html_url", - "name", - "variant", - "status", - "codeowners", - "pull_requests_impacted_last_7d", - "quarantined" - ] - }, - "description": "A page of unhealthy test cases." - }, - "page": { - "type": "object", - "properties": { - "total_rows": { - "type": "number", - "minimum": 0, - "description": "The total number of test cases in the paginated list." - }, - "total_pages": { - "type": "number", - "minimum": 0, - "description": "The total number of pages in the paginated list of test cases." - }, - "next_page_token": { - "type": "string", - "description": "The next page token to use for pagination. See `page_token` in the request for more information." - }, - "prev_page_token": { - "type": "string", - "description": "The previous page token to use for pagination. See `page_token` in the request for more information." - }, - "last_page_token": { - "type": "string", - "description": "The last page token to use for pagination. See `page_token` in the request for more information." - }, - "page_index": { - "type": "number", - "minimum": 0, - "description": "The index of the current page in the paginated list of test cases." - } - }, - "required": [ - "total_rows", - "total_pages", - "next_page_token", - "prev_page_token", - "last_page_token", - "page_index" - ], - "description": "Pagination information for the list of test cases." - } - }, - "required": [ - "tests", - "page" - ] - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - }, - "/flaky-tests/list-failing-tests": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "summary": "Get a list of distinct tests that failed in the given time range", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The host of the repository. For example, `github.com` or `gitlab.com`. If self-hosting, this will be the hostname of your instance.", - "examples": [ - "github.com", - "gitlab.com", - "github-enterprise.my-org-tld.com", - "gitlab-enterprise.my-org-tld.com" - ] - }, - "owner": { - "type": "string", - "description": "The owner of the repository. For example, `my-github-org` or `my-gitlab-org/my/sub/group`. `owner` for GitLab will include the GitLab org, plus the project path, but excluding the repo name itself.", - "examples": [ - "my-github-org", - "my-gitlab-org/my/sub/group" - ] - }, - "name": { - "type": "string", - "description": "The name of the repository.", - "examples": [ - "my-repo" - ] - } - }, - "required": [ - "host", - "owner", - "name" - ], - "description": "The repository to list tests for." - }, - "org_url_slug": { - "type": "string", - "description": "The slug of your organization. Find this at https://app.trunk.io/trunk/settings under \"Organization Name\" > \"Slug\"", - "examples": [ - "my-trunk-org-slug" - ] - }, - "start_time": { - "type": "string", - "format": "date-time", - "description": "The start time of the failing tests (inclusive). Must be within 7 days of the end time." - }, - "end_time": { - "type": "string", - "format": "date-time", - "description": "The end time of the failing tests (exclusive). Must be within 7 days of the start time." - }, - "page_query": { - "type": "object", - "properties": { - "page_size": { - "type": "integer", - "minimum": 1, - "maximum": 100, - "description": "The number of tests to return per page." - }, - "page_token": { - "type": "string", - "description": "The page token to use for pagination. This is returned from the previous call to this endpoint. For the first page, this should be empty.", - "examples": [ - "" - ] - } - }, - "required": [ - "page_size" - ], - "description": "Pagination options for the list of tests." - } - }, - "required": [ - "repo", - "org_url_slug", - "start_time", - "end_time", - "page_query" - ] - } - } - } - }, - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "tests": { - "type": "array", - "items": { - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uuid", - "description": "A stable unique identifier for the test" - }, - "repository": { - "type": "object", - "properties": { - "html_url": { - "type": "string", - "format": "uri", - "description": "The URL of the repository" - } - }, - "required": [ - "html_url" - ] - }, - "html_url": { - "type": "string", - "format": "uri", - "description": "The URL of the test details" - }, - "name": { - "type": "string", - "description": "The name of the test" - }, - "variant": { - "type": "string", - "description": "The name of the test variant" - }, - "status": { - "type": "object", - "properties": { - "value": { - "type": "string", - "enum": [ - "healthy", - "flaky", - "broken" - ], - "description": "The current status value in lowercase" - }, - "reason": { - "type": "string", - "description": "The reason for the current status" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The timestamp of the current status change" - } - }, - "required": [ - "value", - "reason", - "timestamp" - ] - }, - "most_common_failures": { - "type": "array", - "items": { - "type": "object", - "properties": { - "summary": { - "type": "string", - "description": "The summary of the failure" - }, - "occurrence_count": { - "type": "integer", - "minimum": 0, - "description": "The number of occurrences of this failure" - }, - "last_occurrence": { - "type": "string", - "format": "date-time", - "description": "The timestamp of the last occurrence" - } - }, - "required": [ - "summary", - "occurrence_count" - ] - }, - "description": "Several of the most common failures of the test. This is behind a feature flag, access to this feature can be requested by reaching out to the Trunk team." - }, - "failure_rate_last_7d": { - "type": "number", - "description": "The failure rate over the last 7 days" - }, - "failure_rate_last_24h": { - "type": "number", - "description": "The failure rate over the last 24 hours" - }, - "file_path": { - "type": "string", - "description": "The file path of the test" - }, - "parent": { - "type": "string", - "description": "The parent of the test. This includes the test suite (depending on the test runner)" - }, - "classname": { - "type": "string", - "description": "The class name of the test" - }, - "codeowners": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Code owners for the test" - }, - "pull_requests_impacted_last_7d": { - "type": "integer", - "minimum": 0, - "description": "The number of pull requests impacted in the last 7 days" - }, - "quarantined": { - "type": "boolean", - "description": "Whether the test is quarantined.\n\n This is `true` when quarantining is enabled for the repo and either of the following applies:\n\n - The quarantine override is set to `ALWAYS_QUARANTINE` for this test\n - Automatic quarantining is enabled for the repo, and this test's status is either `flaky` or `broken`\n\n If this is `true`, the next test run will be marked as passed even if the test run conclusion is\n failed." - }, - "ticket": { - "type": "object", - "properties": { - "html_url": { - "type": "string", - "format": "uri", - "description": "The URL of the associated ticket" - } - }, - "required": [ - "html_url" - ] - } - }, - "required": [ - "id", - "repository", - "html_url", - "name", - "variant", - "status", - "most_common_failures", - "failure_rate_last_7d", - "failure_rate_last_24h", - "codeowners", - "pull_requests_impacted_last_7d", - "quarantined" - ] - }, - "description": "A page of failing test cases." - }, - "page": { - "type": "object", - "properties": { - "total_rows": { - "type": "number", - "minimum": 0, - "description": "The total number of test cases in the paginated list." - }, - "total_pages": { - "type": "number", - "minimum": 0, - "description": "The total number of pages in the paginated list of test cases." - }, - "next_page_token": { - "type": "string", - "description": "The next page token to use for pagination. See `page_token` in the request for more information." - }, - "prev_page_token": { - "type": "string", - "description": "The previous page token to use for pagination. See `page_token` in the request for more information." - }, - "last_page_token": { - "type": "string", - "description": "The last page token to use for pagination. See `page_token` in the request for more information." - }, - "page_index": { - "type": "number", - "minimum": 0, - "description": "The index of the current page in the paginated list of test cases." - } - }, - "required": [ - "total_rows", - "total_pages", - "next_page_token", - "prev_page_token", - "last_page_token", - "page_index" - ], - "description": "Pagination information for the list of test cases." - } - }, - "required": [ - "tests", - "page" - ] - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - }, - "/flaky-tests/get-test-details": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "summary": "Get the details of a test case", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "repo": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The host of the repository. For example, `github.com` or `gitlab.com`. If self-hosting, this will be the hostname of your instance.", - "examples": [ - "github.com", - "gitlab.com", - "github-enterprise.my-org-tld.com", - "gitlab-enterprise.my-org-tld.com" - ] - }, - "owner": { - "type": "string", - "description": "The owner of the repository. For example, `my-github-org` or `my-gitlab-org/my/sub/group`. `owner` for GitLab will include the GitLab org, plus the project path, but excluding the repo name itself.", - "examples": [ - "my-github-org", - "my-gitlab-org/my/sub/group" - ] - }, - "name": { - "type": "string", - "description": "The name of the repository.", - "examples": [ - "my-repo" - ] - } - }, - "required": [ - "host", - "owner", - "name" - ], - "description": "The repository to list tests for." - }, - "org_url_slug": { - "type": "string", - "description": "The slug of your organization. Find this at https://app.trunk.io/trunk/settings under \"Organization Name\" > \"Slug\"", - "examples": [ - "my-trunk-org-slug" - ] - }, - "test_id": { - "type": "string", - "format": "uuid", - "description": "The id of a test case. Should be a UUID.", - "examples": [ - "01234567-0123-0123-0123-0123456789ab" - ] - } - }, - "required": [ - "repo", - "org_url_slug", - "test_id" - ] - } - } - } - }, - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "test": { - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uuid", - "description": "A stable unique identifier for the test" - }, - "repository": { - "type": "object", - "properties": { - "html_url": { - "type": "string", - "format": "uri", - "description": "The URL of the repository" - } - }, - "required": [ - "html_url" - ] - }, - "html_url": { - "type": "string", - "format": "uri", - "description": "The URL of the test details" - }, - "name": { - "type": "string", - "description": "The name of the test" - }, - "variant": { - "type": "string", - "description": "The name of the test variant" - }, - "status": { - "type": "object", - "properties": { - "value": { - "type": "string", - "enum": [ - "healthy", - "flaky", - "broken" - ], - "description": "The current status value in lowercase" - }, - "reason": { - "type": "string", - "description": "The reason for the current status" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The timestamp of the current status change" - } - }, - "required": [ - "value", - "reason", - "timestamp" - ] - }, - "most_common_failures": { - "type": "array", - "items": { - "type": "object", - "properties": { - "summary": { - "type": "string", - "description": "The summary of the failure" - }, - "occurrence_count": { - "type": "integer", - "minimum": 0, - "description": "The number of occurrences of this failure" - }, - "last_occurrence": { - "type": "string", - "format": "date-time", - "description": "The timestamp of the last occurrence" - } - }, - "required": [ - "summary", - "occurrence_count" - ] - }, - "description": "Several of the most common failures of the test. This is behind a feature flag, access to this feature can be requested by reaching out to the Trunk team." - }, - "failure_rate_last_7d": { - "type": "number", - "description": "The failure rate over the last 7 days" - }, - "failure_rate_last_24h": { - "type": "number", - "description": "The failure rate over the last 24 hours" - }, - "file_path": { - "type": "string", - "description": "The file path of the test" - }, - "parent": { - "type": "string", - "description": "The parent of the test. This includes the test suite (depending on the test runner)" - }, - "classname": { - "type": "string", - "description": "The class name of the test" - }, - "codeowners": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Code owners for the test" - }, - "pull_requests_impacted_last_7d": { - "type": "integer", - "minimum": 0, - "description": "The number of pull requests impacted in the last 7 days" - }, - "quarantined": { - "type": "boolean", - "description": "Whether the test is quarantined.\n\n This is `true` when quarantining is enabled for the repo and either of the following applies:\n\n - The quarantine override is set to `ALWAYS_QUARANTINE` for this test\n - Automatic quarantining is enabled for the repo, and this test's status is either `flaky` or `broken`\n\n If this is `true`, the next test run will be marked as passed even if the test run conclusion is\n failed." - }, - "ticket": { - "type": "object", - "properties": { - "html_url": { - "type": "string", - "format": "uri", - "description": "The URL of the associated ticket" - } - }, - "required": [ - "html_url" - ] - } - }, - "required": [ - "id", - "repository", - "html_url", - "name", - "variant", - "status", - "most_common_failures", - "failure_rate_last_7d", - "failure_rate_last_24h", - "codeowners", - "pull_requests_impacted_last_7d", - "quarantined" - ], - "description": "The details of a test case." - } - }, - "required": [ - "test" - ] - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - }, - "/flaky-tests/link-ticket-to-test-case": { - "post": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "summary": "Link a ticket to a test case", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "test_case_id": { - "type": "string", - "format": "uuid", - "description": "The id of the test case. Should be a UUID.", - "examples": [ - "01234567-0123-0123-0123-0123456789ab" - ] - }, - "external_ticket_id": { - "type": "string", - "description": "The external identifier of the ticket. For Jira this is the ticket number prefixed by the Project Key. For Linear this is the ticket number prefixed by the Team Identifier", - "examples": [ - "KAN-123", - "TRUNK-1234" - ] - }, - "repo": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The host of the repository. For example, `github.com` or `gitlab.com`. If self-hosting, this will be the hostname of your instance.", - "examples": [ - "github.com", - "gitlab.com", - "github-enterprise.my-org-tld.com", - "gitlab-enterprise.my-org-tld.com" - ] - }, - "owner": { - "type": "string", - "description": "The owner of the repository. For example, `my-github-org` or `my-gitlab-org/my/sub/group`. `owner` for GitLab will include the GitLab org, plus the project path, but excluding the repo name itself.", - "examples": [ - "my-github-org", - "my-gitlab-org/my/sub/group" - ] - }, - "name": { - "type": "string", - "description": "The name of the repository.", - "examples": [ - "my-repo" - ] - } - }, - "required": [ - "host", - "owner", - "name" - ], - "description": "The repository to list tests for." - } - }, - "required": [ - "test_case_id", - "external_ticket_id", - "repo" - ] - } - } - } - }, - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": {} - } - }, - "400": { - "description": "Bad Request", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "404": { - "description": "Not Found", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "500": { - "description": "Internal Server Error", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - }, - "/status": { - "get": { - "security": [], - "summary": "Get the status of Trunk services", - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "overallStatus": { - "oneOf": [ - { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "up" - }, - "color": { - "type": "string", - "enum": [ - "green" - ] - }, - "overallStatusDescription": { - "type": "string", - "const": "All systems operational" - } - }, - "required": [ - "type", - "color", - "overallStatusDescription" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "impacted" - }, - "color": { - "type": "string", - "enum": [ - "yellow", - "red" - ] - }, - "overallStatusDescription": { - "type": "string", - "const": "Impacted" - }, - "impactedStatuses": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "color": { - "type": "string", - "enum": [ - "yellow", - "red" - ] - }, - "statusDescription": { - "type": "string" - } - }, - "required": [ - "name", - "color", - "statusDescription" - ] - } - } - }, - "required": [ - "type", - "color", - "overallStatusDescription", - "impactedStatuses" - ] - } - ] - }, - "statuses": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "color": { - "type": "string", - "enum": [ - "green", - "yellow", - "red" - ] - }, - "statusDescription": { - "type": "string" - } - }, - "required": [ - "name", - "color", - "statusDescription" - ] - } - } - }, - "required": [ - "overallStatus", - "statuses" - ] - } - } - } - } - } - } - }, - "/device-auth/initiate": { - "post": { - "security": [], - "summary": "Initiate device login flow via WorkOS CLI auth", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object" - } - } - } - }, - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "deviceCode": { - "type": "string", - "description": "Device code for polling" - }, - "userCode": { - "type": "string", - "description": "User code displayed to the user" - }, - "verificationUri": { - "type": "string", - "description": "URI the user should visit to authenticate" - }, - "verificationUriComplete": { - "type": "string", - "description": "URI with the user code pre-filled for one-click authentication" - }, - "expiresIn": { - "type": "number", - "exclusiveMinimum": 0, - "description": "Seconds until the codes expire" - }, - "interval": { - "type": "number", - "exclusiveMinimum": 0, - "description": "Polling interval in seconds" - } - }, - "required": [ - "deviceCode", - "userCode", - "verificationUri", - "verificationUriComplete", - "expiresIn", - "interval" - ] - } - } - } - }, - "400": { - "description": "Bad Request", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "404": { - "description": "Not Found", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "500": { - "description": "Internal Server Error", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - }, - "/device-auth/tokens": { - "post": { - "security": [], - "summary": "Poll for device login tokens via WorkOS CLI auth", - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "deviceCode": { - "type": "string", - "description": "Device code returned from initiate" - } - }, - "required": [ - "deviceCode" - ] - } - } - } - }, - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "status": { - "type": "string", - "enum": [ - "pending", - "success", - "denied", - "expired", - "slow_down" - ], - "description": "Authentication status" - }, - "sealedSession": { - "type": "string", - "description": "Sealed session for authenticating subsequent requests, present on success" - }, - "interval": { - "type": "number", - "description": "Updated polling interval in seconds, present on slow_down" - } - }, - "required": [ - "status" - ] - } - } - } - }, - "400": { - "description": "Bad Request", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "401": { - "description": "Unauthorized", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "404": { - "description": "Not Found", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - }, - "500": { - "description": "Internal Server Error", - "content": { - "application/plain-text": { - "schema": { - "type": "string" - } - } - } - } - } - } - } - }, - "webhooks": { - "test_case.quarantining_setting_changed": { - "post": { - "operationId": "test_case.quarantining_setting_changed", - "summary": "Test case quarantining setting changed", - "description": "Emitted when the quarantining setting of a test case changes", - "security": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "test_case.quarantining_setting_changed", - "description": "The type of webhook event that occurred" - }, - "test_case": { - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uuid", - "description": "A stable unique identifier for the test" - }, - "repository": { - "type": "object", - "properties": { - "html_url": { - "type": "string", - "format": "uri", - "description": "The URL of the repository" - } - }, - "required": [ - "html_url" - ] - }, - "html_url": { - "type": "string", - "format": "uri", - "description": "The URL of the test details" - }, - "name": { - "type": "string", - "description": "The name of the test" - }, - "variant": { - "type": "string", - "description": "The name of the test variant" - }, - "status": { - "type": "object", - "properties": { - "value": { - "type": "string", - "enum": [ - "healthy", - "flaky", - "broken" - ], - "description": "The current status value in lowercase" - }, - "reason": { - "type": "string", - "description": "The reason for the current status" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The timestamp of the current status change" - } - }, - "required": [ - "value", - "reason", - "timestamp" - ] - }, - "most_common_failures": { - "type": "array", - "items": { - "type": "object", - "properties": { - "summary": { - "type": "string", - "description": "The summary of the failure" - }, - "occurrence_count": { - "type": "integer", - "minimum": 0, - "description": "The number of occurrences of this failure" - }, - "last_occurrence": { - "type": "string", - "format": "date-time", - "description": "The timestamp of the last occurrence" - } - }, - "required": [ - "summary", - "occurrence_count" - ] - }, - "description": "Several of the most common failures of the test. This is behind a feature flag, access to this feature can be requested by reaching out to the Trunk team." - }, - "failure_rate_last_7d": { - "type": "number", - "description": "The failure rate over the last 7 days" - }, - "failure_rate_last_24h": { - "type": "number", - "description": "The failure rate over the last 24 hours" - }, - "file_path": { - "type": "string", - "description": "The file path of the test" - }, - "parent": { - "type": "string", - "description": "The parent of the test. This includes the test suite (depending on the test runner)" - }, - "classname": { - "type": "string", - "description": "The class name of the test" - }, - "codeowners": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Code owners for the test" - }, - "pull_requests_impacted_last_7d": { - "type": "integer", - "minimum": 0, - "description": "The number of pull requests impacted in the last 7 days" - }, - "quarantined": { - "type": "boolean", - "description": "Whether the test is quarantined.\n\n This is `true` when quarantining is enabled for the repo and either of the following applies:\n\n - The quarantine override is set to `ALWAYS_QUARANTINE` for this test\n - Automatic quarantining is enabled for the repo, and this test's status is either `flaky` or `broken`\n\n If this is `true`, the next test run will be marked as passed even if the test run conclusion is\n failed." - }, - "ticket": { - "type": "object", - "properties": { - "html_url": { - "type": "string", - "format": "uri", - "description": "The URL of the associated ticket" - } - }, - "required": [ - "html_url" - ] - }, - "test_suite": { - "type": "string", - "description": "The test suite of the test (deprecated, use `parent` instead)" - } - }, - "required": [ - "id", - "repository", - "html_url", - "name", - "variant", - "status", - "most_common_failures", - "failure_rate_last_7d", - "failure_rate_last_24h", - "codeowners", - "pull_requests_impacted_last_7d", - "quarantined" - ] - }, - "quarantine_setting_changed": { - "type": "object", - "properties": { - "actor": { - "type": "object", - "properties": { - "full_name": { - "type": "string", - "description": "The full name of the user that changed the quarantining setting" - }, - "email": { - "type": "string", - "description": "The email of the user that changed the quarantining setting" - } - }, - "required": [ - "full_name", - "email" - ], - "description": "The details of the user that changed the quarantining setting" - }, - "reason": { - "type": [ - "string", - "null" - ], - "description": "The annotated reason for the quarantining setting change" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The timestamp of the quarantining setting change" - }, - "previous_quarantining_setting": { - "type": "string", - "enum": [ - "ALWAYS_QUARANTINE", - "NEVER_QUARANTINE", - "UNSPECIFIED" - ], - "description": "The previous quarantining setting (`UNSPECIFIED` means the repo default)", - "example": "UNSPECIFIED" - }, - "updated_quarantining_setting": { - "type": "string", - "enum": [ - "ALWAYS_QUARANTINE", - "NEVER_QUARANTINE", - "UNSPECIFIED" - ], - "description": "The updated quarantining setting (`UNSPECIFIED` means the repo default)", - "example": "ALWAYS_QUARANTINE" - } - }, - "required": [ - "actor", - "reason", - "timestamp", - "previous_quarantining_setting", - "updated_quarantining_setting" - ], - "description": "The quarantining setting that was changed" - } - }, - "required": [ - "type", - "test_case", - "quarantine_setting_changed" - ], - "description": "Emitted when the quarantining setting of a test case changes" - }, - "example": { - "type": "test_case.quarantining_setting_changed", - "test_case": { - "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", - "repository": { - "html_url": "http://example.com" - }, - "html_url": "http://example.com", - "name": "string", - "variant": "string", - "status": { - "value": "healthy", - "reason": "string", - "timestamp": "2019-08-24T14:15:22Z" - }, - "most_common_failures": [ - { - "summary": "string", - "occurrence_count": 0, - "last_occurrence": "2019-08-24T14:15:22Z" - } - ], - "failure_rate_last_7d": 0, - "failure_rate_last_24h": 0, - "file_path": "string", - "parent": "string", - "classname": "string", - "codeowners": [ - "string" - ], - "pull_requests_impacted_last_7d": 0, - "quarantined": true, - "ticket": { - "html_url": "http://example.com" - }, - "test_suite": "string" - }, - "quarantine_setting_changed": { - "actor": { - "full_name": "string", - "email": "string" - }, - "reason": "string", - "timestamp": "2019-08-24T14:15:22Z", - "previous_quarantining_setting": "UNSPECIFIED", - "updated_quarantining_setting": "ALWAYS_QUARANTINE" - } - } - } - } - }, - "responses": {} - } - }, - "test_case.status_changed": { - "post": { - "operationId": "test_case.status_changed", - "summary": "Test case status changed", - "description": "Emitted when the health status of a test case changes. Test status can transition between `healthy`, `flaky`, and `broken`. Learn how test health is detected: https://docs.trunk.io/flaky-tests/detection", - "security": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "test_case.status_changed", - "description": "The type of webhook event that occurred" - }, - "test_case": { - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uuid", - "description": "A stable unique identifier for the test" - }, - "repository": { - "type": "object", - "properties": { - "html_url": { - "type": "string", - "format": "uri", - "description": "The URL of the repository" - } - }, - "required": [ - "html_url" - ] - }, - "html_url": { - "type": "string", - "format": "uri", - "description": "The URL of the test details" - }, - "name": { - "type": "string", - "description": "The name of the test" - }, - "variant": { - "type": "string", - "description": "The name of the test variant" - }, - "status": { - "type": "object", - "properties": { - "value": { - "type": "string", - "enum": [ - "healthy", - "flaky", - "broken" - ], - "description": "The current status value in lowercase" - }, - "reason": { - "type": "string", - "description": "The reason for the current status" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The timestamp of the current status change" - } - }, - "required": [ - "value", - "reason", - "timestamp" - ] - }, - "most_common_failures": { - "type": "array", - "items": { - "type": "object", - "properties": { - "summary": { - "type": "string", - "description": "The summary of the failure" - }, - "occurrence_count": { - "type": "integer", - "minimum": 0, - "description": "The number of occurrences of this failure" - }, - "last_occurrence": { - "type": "string", - "format": "date-time", - "description": "The timestamp of the last occurrence" - } - }, - "required": [ - "summary", - "occurrence_count" - ] - }, - "description": "Several of the most common failures of the test. This is behind a feature flag, access to this feature can be requested by reaching out to the Trunk team." - }, - "failure_rate_last_7d": { - "type": "number", - "description": "The failure rate over the last 7 days" - }, - "failure_rate_last_24h": { - "type": "number", - "description": "The failure rate over the last 24 hours" - }, - "file_path": { - "type": "string", - "description": "The file path of the test" - }, - "parent": { - "type": "string", - "description": "The parent of the test. This includes the test suite (depending on the test runner)" - }, - "classname": { - "type": "string", - "description": "The class name of the test" - }, - "codeowners": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Code owners for the test" - }, - "pull_requests_impacted_last_7d": { - "type": "integer", - "minimum": 0, - "description": "The number of pull requests impacted in the last 7 days" - }, - "quarantined": { - "type": "boolean", - "description": "Whether the test is quarantined.\n\n This is `true` when quarantining is enabled for the repo and either of the following applies:\n\n - The quarantine override is set to `ALWAYS_QUARANTINE` for this test\n - Automatic quarantining is enabled for the repo, and this test's status is either `flaky` or `broken`\n\n If this is `true`, the next test run will be marked as passed even if the test run conclusion is\n failed." - }, - "ticket": { - "type": "object", - "properties": { - "html_url": { - "type": "string", - "format": "uri", - "description": "The URL of the associated ticket" - } - }, - "required": [ - "html_url" - ] - }, - "test_suite": { - "type": "string", - "description": "The test suite of the test (deprecated, use `parent` instead)" - } - }, - "required": [ - "id", - "repository", - "html_url", - "name", - "variant", - "status", - "most_common_failures", - "failure_rate_last_7d", - "failure_rate_last_24h", - "codeowners", - "pull_requests_impacted_last_7d", - "quarantined" - ] - }, - "status_change": { - "type": "object", - "properties": { - "previous_status": { - "type": "string", - "enum": [ - "healthy", - "flaky", - "broken" - ], - "description": "The previous status of the test in lowercase" - }, - "current_status": { - "type": "object", - "properties": { - "value": { - "type": "string", - "enum": [ - "healthy", - "flaky", - "broken" - ], - "description": "The current status value in lowercase" - }, - "reason": { - "type": "string", - "description": "The reason for the current status" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The timestamp of the current status change" - } - }, - "required": [ - "value", - "reason", - "timestamp" - ] - } - }, - "required": [ - "previous_status", - "current_status" - ] - } - }, - "required": [ - "type", - "test_case", - "status_change" - ], - "description": "Emitted when the health status of a test case changes. Test status can transition between `healthy`, `flaky`, and `broken`. Learn how test health is detected: https://docs.trunk.io/flaky-tests/detection" - }, - "example": { - "type": "test_case.status_changed", - "test_case": { - "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08", - "repository": { - "html_url": "http://example.com" - }, - "html_url": "http://example.com", - "name": "string", - "variant": "string", - "status": { - "value": "healthy", - "reason": "string", - "timestamp": "2019-08-24T14:15:22Z" - }, - "most_common_failures": [ - { - "summary": "string", - "occurrence_count": 0, - "last_occurrence": "2019-08-24T14:15:22Z" - } - ], - "failure_rate_last_7d": 0, - "failure_rate_last_24h": 0, - "file_path": "string", - "parent": "string", - "classname": "string", - "codeowners": [ - "string" - ], - "pull_requests_impacted_last_7d": 0, - "quarantined": true, - "ticket": { - "html_url": "http://example.com" - }, - "test_suite": "string" - }, - "status_change": { - "previous_status": "healthy", - "current_status": { - "value": "healthy", - "reason": "string", - "timestamp": "2019-08-24T14:15:22Z" - } - } - } - } - } - }, - "responses": {} - } - }, - "pull_request.failed": { - "post": { - "operationId": "pull_request.failed", - "summary": "Pull request failed", - "description": "Triggered when a PR fails while in the merge queue", - "security": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "test_branch": { - "type": "string", - "description": "The branch the pull request or pull request batch was tested on" - }, - "test_branch_sha": { - "type": "string", - "description": "The SHA of the branch the pull request or pull request batch was tested on" - }, - "test_pr_number": { - "type": "integer", - "minimum": 0, - "description": "The number of the pull request used to test the pull request or pull request batch" - }, - "batched": { - "type": "boolean", - "description": "Whether this pull request was part of a batch" - }, - "failure_reason": { - "type": "string", - "enum": [ - "START_TEST_RUN_INVALID_EVENT", - "START_TEST_RUN_MERGE_CONFLICT", - "START_TEST_RUN_CONFIG_NOT_FOUND", - "START_TEST_RUN_CONFIG_PARSING_FAILURE", - "START_TEST_RUN_CONFIG_MISSING_REQUIRED_STATUSES", - "START_TEST_RUN_CONFIG_BAD_REQUIRED_STATUSES", - "START_TEST_RUN_CONFIG_BAD_VERSION", - "START_TEST_RUN_NO_REQUIRED_STATUSES", - "TEST_RUN_REQUIRED_STATUS_FAILED", - "TEST_RUN_TIMEOUT", - "GITHUB_API_MERGE_PR_FAILED", - "GITHUB_API_DIRECT_MERGE_PR_FAILED", - "PR_UPDATED_AT_MERGE_TIME", - "START_TEST_RUN_RETRY_EXHAUSTED", - "START_TEST_RUN_DRAFT_PRS_NOT_ALLOWED", - "TEST_RUN_FAILED_BY_OPTIMIZATION" - ], - "description": "The reason the PR failed" - }, - "is_bisection": { - "type": "boolean", - "description": "Whether this event is part of a bisection test run" - }, - "type": { - "type": "string", - "const": "pull_request.failed", - "description": "The type of webhook event that occurred" - }, - "action": { - "type": "string", - "const": "failed", - "description": "The type of action that triggered the webhook" - }, - "author": { - "type": "string", - "description": "The username of the author of the PR" - }, - "http_details_url": { - "type": "string", - "format": "uri", - "description": "Web link to the details about this PR" - }, - "pr_number": { - "type": "integer", - "minimum": 0, - "description": "The PR number" - }, - "repository": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The type of source control for this repository" - }, - "owner": { - "type": "string", - "description": "The name of the repository" - }, - "name": { - "type": "string", - "description": "The user or organization that owns the repository" - } - }, - "required": [ - "host", - "owner", - "name" - ], - "description": "The repository that the webhook event occurred in" - }, - "status": { - "type": "string", - "const": "failed", - "description": "The current status of the PR" - }, - "target_branch": { - "type": "string", - "description": "The name of the branch targeted by the merge queue where the event occurred" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The ISO 8601 timestamp of when this state transition occurred" - } - }, - "required": [ - "is_bisection", - "type", - "action", - "author", - "http_details_url", - "pr_number", - "repository", - "status", - "target_branch", - "timestamp" - ], - "description": "Triggered when a PR fails while in the merge queue" - }, - "example": { - "test_branch": "string", - "test_branch_sha": "string", - "test_pr_number": 0, - "batched": true, - "failure_reason": "START_TEST_RUN_INVALID_EVENT", - "is_bisection": true, - "type": "pull_request.failed", - "action": "failed", - "author": "string", - "http_details_url": "http://example.com", - "pr_number": 0, - "repository": { - "host": "string", - "owner": "string", - "name": "string" - }, - "status": "failed", - "target_branch": "string", - "timestamp": "2019-08-24T14:15:22Z" - } - } - } - }, - "responses": {} - } - }, - "pull_request.pending_failure": { - "post": { - "operationId": "pull_request.pending_failure", - "summary": "Pull request pending failure", - "description": "Triggered when a PR enters pending failure state while in the merge queue", - "security": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "test_branch": { - "type": "string", - "description": "The branch the pull request or pull request batch was tested on" - }, - "test_branch_sha": { - "type": "string", - "description": "The SHA of the branch the pull request or pull request batch was tested on" - }, - "test_pr_number": { - "type": "integer", - "minimum": 0, - "description": "The number of the pull request used to test the pull request or pull request batch" - }, - "batched": { - "type": "boolean", - "description": "Whether this pull request was part of a batch" - }, - "failure_reason": { - "type": "string", - "enum": [ - "START_TEST_RUN_INVALID_EVENT", - "START_TEST_RUN_MERGE_CONFLICT", - "START_TEST_RUN_CONFIG_NOT_FOUND", - "START_TEST_RUN_CONFIG_PARSING_FAILURE", - "START_TEST_RUN_CONFIG_MISSING_REQUIRED_STATUSES", - "START_TEST_RUN_CONFIG_BAD_REQUIRED_STATUSES", - "START_TEST_RUN_CONFIG_BAD_VERSION", - "START_TEST_RUN_NO_REQUIRED_STATUSES", - "TEST_RUN_REQUIRED_STATUS_FAILED", - "TEST_RUN_TIMEOUT", - "GITHUB_API_MERGE_PR_FAILED", - "GITHUB_API_DIRECT_MERGE_PR_FAILED", - "PR_UPDATED_AT_MERGE_TIME", - "START_TEST_RUN_RETRY_EXHAUSTED", - "START_TEST_RUN_DRAFT_PRS_NOT_ALLOWED", - "TEST_RUN_FAILED_BY_OPTIMIZATION" - ], - "description": "The reason the PR is pending failure" - }, - "type": { - "type": "string", - "const": "pull_request.pending_failure", - "description": "The type of webhook event that occurred" - }, - "action": { - "type": "string", - "const": "pending_failure", - "description": "The type of action that triggered the webhook" - }, - "author": { - "type": "string", - "description": "The username of the author of the PR" - }, - "http_details_url": { - "type": "string", - "format": "uri", - "description": "Web link to the details about this PR" - }, - "pr_number": { - "type": "integer", - "minimum": 0, - "description": "The PR number" - }, - "repository": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The type of source control for this repository" - }, - "owner": { - "type": "string", - "description": "The name of the repository" - }, - "name": { - "type": "string", - "description": "The user or organization that owns the repository" - } - }, - "required": [ - "host", - "owner", - "name" - ], - "description": "The repository that the webhook event occurred in" - }, - "status": { - "type": "string", - "const": "pending_failure", - "description": "The current status of the PR" - }, - "target_branch": { - "type": "string", - "description": "The name of the branch targeted by the merge queue where the event occurred" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The ISO 8601 timestamp of when this state transition occurred" - } - }, - "required": [ - "type", - "action", - "author", - "http_details_url", - "pr_number", - "repository", - "status", - "target_branch", - "timestamp" - ], - "description": "Triggered when a PR enters pending failure state while in the merge queue" - }, - "example": { - "test_branch": "string", - "test_branch_sha": "string", - "test_pr_number": 0, - "batched": true, - "failure_reason": "START_TEST_RUN_INVALID_EVENT", - "type": "pull_request.pending_failure", - "action": "pending_failure", - "author": "string", - "http_details_url": "http://example.com", - "pr_number": 0, - "repository": { - "host": "string", - "owner": "string", - "name": "string" - }, - "status": "pending_failure", - "target_branch": "string", - "timestamp": "2019-08-24T14:15:22Z" - } - } - } - }, - "responses": {} - } - }, - "pull_request.canceled": { - "post": { - "operationId": "pull_request.canceled", - "summary": "Pull request canceled", - "description": "Triggered when a PR is canceled", - "security": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "test_branch": { - "type": "string", - "description": "The branch the pull request or pull request batch was tested on" - }, - "test_branch_sha": { - "type": "string", - "description": "The SHA of the branch the pull request or pull request batch was tested on" - }, - "test_pr_number": { - "type": "integer", - "minimum": 0, - "description": "The number of the pull request used to test the pull request or pull request batch" - }, - "batched": { - "type": "boolean", - "description": "Whether this pull request was part of a batch" - }, - "cancellation_reason": { - "type": "string", - "enum": [ - "USER_REQUESTED", - "PR_CLOSED", - "PR_PUSHED_TO", - "PR_BASE_BRANCH_CHANGED", - "TRUNK_ADMIN_REQUESTED", - "PR_CONVERTED_TO_DRAFT", - "STACKED_PR_MEMBER_PUSHED_TO", - "STACKED_PR_MEMBER_CLOSED", - "STACKED_PR_MEMBER_MERGED", - "STACKED_PR_MEMBER_BASE_BRANCH_CHANGED" - ], - "description": "The reason the PR was canceled" - }, - "is_bisection": { - "type": "boolean", - "description": "Whether this event is part of a bisection test run" - }, - "type": { - "type": "string", - "const": "pull_request.canceled", - "description": "The type of webhook event that occurred" - }, - "action": { - "type": "string", - "const": "canceled", - "description": "The type of action that triggered the webhook" - }, - "author": { - "type": "string", - "description": "The username of the author of the PR" - }, - "http_details_url": { - "type": "string", - "format": "uri", - "description": "Web link to the details about this PR" - }, - "pr_number": { - "type": "integer", - "minimum": 0, - "description": "The PR number" - }, - "repository": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The type of source control for this repository" - }, - "owner": { - "type": "string", - "description": "The name of the repository" - }, - "name": { - "type": "string", - "description": "The user or organization that owns the repository" - } - }, - "required": [ - "host", - "owner", - "name" - ], - "description": "The repository that the webhook event occurred in" - }, - "status": { - "type": "string", - "const": "canceled", - "description": "The current status of the PR" - }, - "target_branch": { - "type": "string", - "description": "The name of the branch targeted by the merge queue where the event occurred" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The ISO 8601 timestamp of when this state transition occurred" - } - }, - "required": [ - "is_bisection", - "type", - "action", - "author", - "http_details_url", - "pr_number", - "repository", - "status", - "target_branch", - "timestamp" - ], - "description": "Triggered when a PR is canceled" - }, - "example": { - "test_branch": "string", - "test_branch_sha": "string", - "test_pr_number": 0, - "batched": true, - "cancellation_reason": "USER_REQUESTED", - "is_bisection": true, - "type": "pull_request.canceled", - "action": "canceled", - "author": "string", - "http_details_url": "http://example.com", - "pr_number": 0, - "repository": { - "host": "string", - "owner": "string", - "name": "string" - }, - "status": "canceled", - "target_branch": "string", - "timestamp": "2019-08-24T14:15:22Z" - } - } - } - }, - "responses": {} - } - }, - "pull_request.merged": { - "post": { - "operationId": "pull_request.merged", - "summary": "Pull request merged", - "description": "Triggered when a PR is merged", - "security": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "test_branch": { - "type": "string", - "description": "The branch the pull request or pull request batch was tested on" - }, - "test_branch_sha": { - "type": "string", - "description": "The SHA of the branch the pull request or pull request batch was tested on" - }, - "test_pr_number": { - "type": "integer", - "minimum": 0, - "description": "The number of the pull request used to test the pull request or pull request batch" - }, - "batched": { - "type": "boolean", - "description": "Whether this pull request was part of a batch" - }, - "merged_reason": { - "type": "string", - "enum": [ - "MERGE_GRAPH_MERGE", - "MANUAL_MERGE", - "MERGE_GRAPH_DIRECT_MERGE" - ], - "description": "The reason the PR was merged" - }, - "type": { - "type": "string", - "const": "pull_request.merged", - "description": "The type of webhook event that occurred" - }, - "action": { - "type": "string", - "const": "merged", - "description": "The type of action that triggered the webhook" - }, - "author": { - "type": "string", - "description": "The username of the author of the PR" - }, - "http_details_url": { - "type": "string", - "format": "uri", - "description": "Web link to the details about this PR" - }, - "pr_number": { - "type": "integer", - "minimum": 0, - "description": "The PR number" - }, - "repository": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The type of source control for this repository" - }, - "owner": { - "type": "string", - "description": "The name of the repository" - }, - "name": { - "type": "string", - "description": "The user or organization that owns the repository" - } - }, - "required": [ - "host", - "owner", - "name" - ], - "description": "The repository that the webhook event occurred in" - }, - "status": { - "type": "string", - "const": "merged", - "description": "The current status of the PR" - }, - "target_branch": { - "type": "string", - "description": "The name of the branch targeted by the merge queue where the event occurred" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The ISO 8601 timestamp of when this state transition occurred" - } - }, - "required": [ - "type", - "action", - "author", - "http_details_url", - "pr_number", - "repository", - "status", - "target_branch", - "timestamp" - ], - "description": "Triggered when a PR is merged" - }, - "example": { - "test_branch": "string", - "test_branch_sha": "string", - "test_pr_number": 0, - "batched": true, - "merged_reason": "MERGE_GRAPH_MERGE", - "type": "pull_request.merged", - "action": "merged", - "author": "string", - "http_details_url": "http://example.com", - "pr_number": 0, - "repository": { - "host": "string", - "owner": "string", - "name": "string" - }, - "status": "merged", - "target_branch": "string", - "timestamp": "2019-08-24T14:15:22Z" - } - } - } - }, - "responses": {} - } - }, - "pull_request.queued": { - "post": { - "operationId": "pull_request.queued", - "summary": "Pull request queued", - "description": "Triggered when a PR has passed any branch protection requirements and is ready to be tested in the merge queue", - "security": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "queued_reason": { - "type": "string", - "enum": [ - "MANUAL_PUSH_TO_TARGET_BRANCH", - "PREREQUISITE_MERGE_ITEM_CANCELLED", - "PREREQUISITE_MERGE_ITEM_FAILED", - "PREREQUISITE_MERGE_ITEM_MANUALLY_MERGED", - "MERGE_ITEM_NOW_READY", - "TESTS_RESTARTED_BY_USER", - "TESTS_RESTARTED_BY_API_REQUEST", - "SKIP_THE_LINE_INTERRUPTED", - "BISECTION_REQUIRED", - "BISECTION_TEST_RUN_PASSED" - ], - "description": "The reason the PR was queued" - }, - "type": { - "type": "string", - "const": "pull_request.queued", - "description": "The type of webhook event that occurred" - }, - "action": { - "type": "string", - "const": "queued", - "description": "The type of action that triggered the webhook" - }, - "author": { - "type": "string", - "description": "The username of the author of the PR" - }, - "http_details_url": { - "type": "string", - "format": "uri", - "description": "Web link to the details about this PR" - }, - "pr_number": { - "type": "integer", - "minimum": 0, - "description": "The PR number" - }, - "repository": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The type of source control for this repository" - }, - "owner": { - "type": "string", - "description": "The name of the repository" - }, - "name": { - "type": "string", - "description": "The user or organization that owns the repository" - } - }, - "required": [ - "host", - "owner", - "name" - ], - "description": "The repository that the webhook event occurred in" - }, - "status": { - "type": "string", - "const": "queued", - "description": "The current status of the PR" - }, - "target_branch": { - "type": "string", - "description": "The name of the branch targeted by the merge queue where the event occurred" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The ISO 8601 timestamp of when this state transition occurred" - } - }, - "required": [ - "type", - "action", - "author", - "http_details_url", - "pr_number", - "repository", - "status", - "target_branch", - "timestamp" - ], - "description": "Triggered when a PR has passed any branch protection requirements and is ready to be tested in the merge queue" - }, - "example": { - "queued_reason": "MANUAL_PUSH_TO_TARGET_BRANCH", - "type": "pull_request.queued", - "action": "queued", - "author": "string", - "http_details_url": "http://example.com", - "pr_number": 0, - "repository": { - "host": "string", - "owner": "string", - "name": "string" - }, - "status": "queued", - "target_branch": "string", - "timestamp": "2019-08-24T14:15:22Z" - } - } - } - }, - "responses": {} - } - }, - "pull_request.submitted": { - "post": { - "operationId": "pull_request.submitted", - "summary": "Pull request submitted", - "description": "Triggered when a PR is submitted to Trunk Merge", - "security": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "submitted_reason": { - "type": "string", - "enum": [ - "USER_REQUESTED", - "TRUNK_ADMIN_REQUESTED", - "API_REQUESTED" - ], - "description": "The reason the PR was submitted" - }, - "type": { - "type": "string", - "const": "pull_request.submitted", - "description": "The type of webhook event that occurred" - }, - "action": { - "type": "string", - "const": "submitted", - "description": "The type of action that triggered the webhook" - }, - "author": { - "type": "string", - "description": "The username of the author of the PR" - }, - "http_details_url": { - "type": "string", - "format": "uri", - "description": "Web link to the details about this PR" - }, - "pr_number": { - "type": "integer", - "minimum": 0, - "description": "The PR number" - }, - "repository": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The type of source control for this repository" - }, - "owner": { - "type": "string", - "description": "The name of the repository" - }, - "name": { - "type": "string", - "description": "The user or organization that owns the repository" - } - }, - "required": [ - "host", - "owner", - "name" - ], - "description": "The repository that the webhook event occurred in" - }, - "status": { - "type": "string", - "const": "submitted", - "description": "The current status of the PR" - }, - "target_branch": { - "type": "string", - "description": "The name of the branch targeted by the merge queue where the event occurred" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The ISO 8601 timestamp of when this state transition occurred" - } - }, - "required": [ - "type", - "action", - "author", - "http_details_url", - "pr_number", - "repository", - "status", - "target_branch", - "timestamp" - ], - "description": "Triggered when a PR is submitted to Trunk Merge" - }, - "example": { - "submitted_reason": "USER_REQUESTED", - "type": "pull_request.submitted", - "action": "submitted", - "author": "string", - "http_details_url": "http://example.com", - "pr_number": 0, - "repository": { - "host": "string", - "owner": "string", - "name": "string" - }, - "status": "submitted", - "target_branch": "string", - "timestamp": "2019-08-24T14:15:22Z" - } - } - } - }, - "responses": {} - } - }, - "pull_request.testing": { - "post": { - "operationId": "pull_request.testing", - "summary": "Pull request testing", - "description": "Triggered when a PR has started testing in the merge queue", - "security": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "test_branch": { - "type": "string", - "description": "The branch the pull request or pull request batch was tested on" - }, - "test_branch_sha": { - "type": "string", - "description": "The SHA of the branch the pull request or pull request batch was tested on" - }, - "test_pr_number": { - "type": "integer", - "minimum": 0, - "description": "The number of the pull request used to test the pull request or pull request batch" - }, - "batched": { - "type": "boolean", - "description": "Whether this pull request was part of a batch" - }, - "is_bisection": { - "type": "boolean", - "description": "Whether this event is part of a bisection test run" - }, - "type": { - "type": "string", - "const": "pull_request.testing", - "description": "The type of webhook event that occurred" - }, - "action": { - "type": "string", - "const": "testing", - "description": "The type of action that triggered the webhook" - }, - "author": { - "type": "string", - "description": "The username of the author of the PR" - }, - "http_details_url": { - "type": "string", - "format": "uri", - "description": "Web link to the details about this PR" - }, - "pr_number": { - "type": "integer", - "minimum": 0, - "description": "The PR number" - }, - "repository": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The type of source control for this repository" - }, - "owner": { - "type": "string", - "description": "The name of the repository" - }, - "name": { - "type": "string", - "description": "The user or organization that owns the repository" - } - }, - "required": [ - "host", - "owner", - "name" - ], - "description": "The repository that the webhook event occurred in" - }, - "status": { - "type": "string", - "const": "testing", - "description": "The current status of the PR" - }, - "target_branch": { - "type": "string", - "description": "The name of the branch targeted by the merge queue where the event occurred" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The ISO 8601 timestamp of when this state transition occurred" - } - }, - "required": [ - "is_bisection", - "type", - "action", - "author", - "http_details_url", - "pr_number", - "repository", - "status", - "target_branch", - "timestamp" - ], - "description": "Triggered when a PR has started testing in the merge queue" - }, - "example": { - "test_branch": "string", - "test_branch_sha": "string", - "test_pr_number": 0, - "batched": true, - "is_bisection": true, - "type": "pull_request.testing", - "action": "testing", - "author": "string", - "http_details_url": "http://example.com", - "pr_number": 0, - "repository": { - "host": "string", - "owner": "string", - "name": "string" - }, - "status": "testing", - "target_branch": "string", - "timestamp": "2019-08-24T14:15:22Z" - } - } - } - }, - "responses": {} - } - }, - "pull_request_batch.failed": { - "post": { - "operationId": "pull_request_batch.failed", - "summary": "Pull request batch failed", - "description": "Triggered when a batch of PRs fails while in the merge queue", - "security": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "test_branch": { - "type": "string", - "description": "The branch the pull request or pull request batch was tested on" - }, - "test_branch_sha": { - "type": "string", - "description": "The SHA of the branch the pull request or pull request batch was tested on" - }, - "test_pr_number": { - "type": "integer", - "minimum": 0, - "description": "The number of the pull request used to test the pull request or pull request batch" - }, - "failure_reason": { - "type": "string", - "enum": [ - "START_TEST_RUN_INVALID_EVENT", - "START_TEST_RUN_MERGE_CONFLICT", - "START_TEST_RUN_CONFIG_NOT_FOUND", - "START_TEST_RUN_CONFIG_PARSING_FAILURE", - "START_TEST_RUN_CONFIG_MISSING_REQUIRED_STATUSES", - "START_TEST_RUN_CONFIG_BAD_REQUIRED_STATUSES", - "START_TEST_RUN_CONFIG_BAD_VERSION", - "START_TEST_RUN_NO_REQUIRED_STATUSES", - "TEST_RUN_REQUIRED_STATUS_FAILED", - "TEST_RUN_TIMEOUT", - "GITHUB_API_MERGE_PR_FAILED", - "GITHUB_API_DIRECT_MERGE_PR_FAILED", - "PR_UPDATED_AT_MERGE_TIME", - "START_TEST_RUN_RETRY_EXHAUSTED", - "START_TEST_RUN_DRAFT_PRS_NOT_ALLOWED", - "TEST_RUN_FAILED_BY_OPTIMIZATION" - ], - "description": "The reason the batch failed" - }, - "is_bisection": { - "type": "boolean", - "description": "Whether this event is part of a bisection test run" - }, - "type": { - "type": "string", - "const": "pull_request_batch.failed", - "description": "The type of webhook event that occurred" - }, - "action": { - "type": "string", - "const": "failed", - "description": "The type of action that triggered the webhook" - }, - "pr_numbers": { - "type": "array", - "items": { - "type": "integer", - "minimum": 0 - }, - "description": "The PR numbers of the pull requests in the batch" - }, - "pr_numbers_in_batch": { - "type": "array", - "items": { - "type": "integer", - "minimum": 0 - }, - "description": "The PR numbers of all pull requests that were tested together in this batch. This may be a superset of pr_numbers if some PRs in the batch failed to merge." - }, - "repository": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The type of source control for this repository" - }, - "owner": { - "type": "string", - "description": "The name of the repository" - }, - "name": { - "type": "string", - "description": "The user or organization that owns the repository" - } - }, - "required": [ - "host", - "owner", - "name" - ], - "description": "The repository that the webhook event occurred in" - }, - "status": { - "type": "string", - "const": "failed", - "description": "The current status of the Pull Request batch" - }, - "target_branch": { - "type": "string", - "description": "The name of the branch targeted by the merge queue where the event occurred" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The ISO 8601 timestamp of when this state transition occurred" - } - }, - "required": [ - "test_branch", - "test_branch_sha", - "is_bisection", - "type", - "action", - "pr_numbers", - "repository", - "status", - "target_branch", - "timestamp" - ], - "description": "Triggered when a batch of PRs fails while in the merge queue" - }, - "example": { - "test_branch": "string", - "test_branch_sha": "string", - "test_pr_number": 0, - "failure_reason": "START_TEST_RUN_INVALID_EVENT", - "is_bisection": true, - "type": "pull_request_batch.failed", - "action": "failed", - "pr_numbers": [ - 0 - ], - "pr_numbers_in_batch": [ - 0 - ], - "repository": { - "host": "string", - "owner": "string", - "name": "string" - }, - "status": "failed", - "target_branch": "string", - "timestamp": "2019-08-24T14:15:22Z" - } - } - } - }, - "responses": {} - } - }, - "pull_request_batch.pending_failure": { - "post": { - "operationId": "pull_request_batch.pending_failure", - "summary": "Pull request batch pending failure", - "description": "Triggered when the last pull request in a batch enters pending failure state", - "security": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "test_branch": { - "type": "string", - "description": "The branch the pull request or pull request batch was tested on" - }, - "test_branch_sha": { - "type": "string", - "description": "The SHA of the branch the pull request or pull request batch was tested on" - }, - "test_pr_number": { - "type": "integer", - "minimum": 0, - "description": "The number of the pull request used to test the pull request or pull request batch" - }, - "failure_reason": { - "type": "string", - "enum": [ - "START_TEST_RUN_INVALID_EVENT", - "START_TEST_RUN_MERGE_CONFLICT", - "START_TEST_RUN_CONFIG_NOT_FOUND", - "START_TEST_RUN_CONFIG_PARSING_FAILURE", - "START_TEST_RUN_CONFIG_MISSING_REQUIRED_STATUSES", - "START_TEST_RUN_CONFIG_BAD_REQUIRED_STATUSES", - "START_TEST_RUN_CONFIG_BAD_VERSION", - "START_TEST_RUN_NO_REQUIRED_STATUSES", - "TEST_RUN_REQUIRED_STATUS_FAILED", - "TEST_RUN_TIMEOUT", - "GITHUB_API_MERGE_PR_FAILED", - "GITHUB_API_DIRECT_MERGE_PR_FAILED", - "PR_UPDATED_AT_MERGE_TIME", - "START_TEST_RUN_RETRY_EXHAUSTED", - "START_TEST_RUN_DRAFT_PRS_NOT_ALLOWED", - "TEST_RUN_FAILED_BY_OPTIMIZATION" - ], - "description": "The reason the batch is pending failure" - }, - "type": { - "type": "string", - "const": "pull_request_batch.pending_failure", - "description": "The type of webhook event that occurred" - }, - "action": { - "type": "string", - "const": "pending_failure", - "description": "The type of action that triggered the webhook" - }, - "pr_numbers": { - "type": "array", - "items": { - "type": "integer", - "minimum": 0 - }, - "description": "The PR numbers of the pull requests in the batch" - }, - "pr_numbers_in_batch": { - "type": "array", - "items": { - "type": "integer", - "minimum": 0 - }, - "description": "The PR numbers of all pull requests that were tested together in this batch. This may be a superset of pr_numbers if some PRs in the batch failed to merge." - }, - "repository": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The type of source control for this repository" - }, - "owner": { - "type": "string", - "description": "The name of the repository" - }, - "name": { - "type": "string", - "description": "The user or organization that owns the repository" - } - }, - "required": [ - "host", - "owner", - "name" - ], - "description": "The repository that the webhook event occurred in" - }, - "status": { - "type": "string", - "const": "pending_failure", - "description": "The current status of the Pull Request batch" - }, - "target_branch": { - "type": "string", - "description": "The name of the branch targeted by the merge queue where the event occurred" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The ISO 8601 timestamp of when this state transition occurred" - } - }, - "required": [ - "test_branch", - "test_branch_sha", - "type", - "action", - "pr_numbers", - "repository", - "status", - "target_branch", - "timestamp" - ], - "description": "Triggered when the last pull request in a batch enters pending failure state" - }, - "example": { - "test_branch": "string", - "test_branch_sha": "string", - "test_pr_number": 0, - "failure_reason": "START_TEST_RUN_INVALID_EVENT", - "type": "pull_request_batch.pending_failure", - "action": "pending_failure", - "pr_numbers": [ - 0 - ], - "pr_numbers_in_batch": [ - 0 - ], - "repository": { - "host": "string", - "owner": "string", - "name": "string" - }, - "status": "pending_failure", - "target_branch": "string", - "timestamp": "2019-08-24T14:15:22Z" - } - } - } - }, - "responses": {} - } - }, - "pull_request_batch.canceled": { - "post": { - "operationId": "pull_request_batch.canceled", - "summary": "Pull request batch canceled", - "description": "Triggered when a batch of PRs is canceled", - "security": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "test_branch": { - "type": "string", - "description": "The branch the pull request or pull request batch was tested on" - }, - "test_branch_sha": { - "type": "string", - "description": "The SHA of the branch the pull request or pull request batch was tested on" - }, - "test_pr_number": { - "type": "integer", - "minimum": 0, - "description": "The number of the pull request used to test the pull request or pull request batch" - }, - "cancellation_reason": { - "type": "string", - "enum": [ - "USER_REQUESTED", - "PR_CLOSED", - "PR_PUSHED_TO", - "PR_BASE_BRANCH_CHANGED", - "TRUNK_ADMIN_REQUESTED", - "PR_CONVERTED_TO_DRAFT", - "STACKED_PR_MEMBER_PUSHED_TO", - "STACKED_PR_MEMBER_CLOSED", - "STACKED_PR_MEMBER_MERGED", - "STACKED_PR_MEMBER_BASE_BRANCH_CHANGED" - ], - "description": "The reason the batch was canceled" - }, - "is_bisection": { - "type": "boolean", - "description": "Whether this event is part of a bisection test run" - }, - "type": { - "type": "string", - "const": "pull_request_batch.canceled", - "description": "The type of webhook event that occurred" - }, - "action": { - "type": "string", - "const": "canceled", - "description": "The type of action that triggered the webhook" - }, - "pr_numbers": { - "type": "array", - "items": { - "type": "integer", - "minimum": 0 - }, - "description": "The PR numbers of the pull requests in the batch" - }, - "pr_numbers_in_batch": { - "type": "array", - "items": { - "type": "integer", - "minimum": 0 - }, - "description": "The PR numbers of all pull requests that were tested together in this batch. This may be a superset of pr_numbers if some PRs in the batch failed to merge." - }, - "repository": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The type of source control for this repository" - }, - "owner": { - "type": "string", - "description": "The name of the repository" - }, - "name": { - "type": "string", - "description": "The user or organization that owns the repository" - } - }, - "required": [ - "host", - "owner", - "name" - ], - "description": "The repository that the webhook event occurred in" - }, - "status": { - "type": "string", - "const": "canceled", - "description": "The current status of the Pull Request batch" - }, - "target_branch": { - "type": "string", - "description": "The name of the branch targeted by the merge queue where the event occurred" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The ISO 8601 timestamp of when this state transition occurred" - } - }, - "required": [ - "test_branch", - "test_branch_sha", - "is_bisection", - "type", - "action", - "pr_numbers", - "repository", - "status", - "target_branch", - "timestamp" - ], - "description": "Triggered when a batch of PRs is canceled" - }, - "example": { - "test_branch": "string", - "test_branch_sha": "string", - "test_pr_number": 0, - "cancellation_reason": "USER_REQUESTED", - "is_bisection": true, - "type": "pull_request_batch.canceled", - "action": "canceled", - "pr_numbers": [ - 0 - ], - "pr_numbers_in_batch": [ - 0 - ], - "repository": { - "host": "string", - "owner": "string", - "name": "string" - }, - "status": "canceled", - "target_branch": "string", - "timestamp": "2019-08-24T14:15:22Z" - } - } - } - }, - "responses": {} - } - }, - "pull_request_batch.merged": { - "post": { - "operationId": "pull_request_batch.merged", - "summary": "Pull request batch merged", - "description": "Triggered when the last pull request in a batch gets merged", - "security": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "test_branch": { - "type": "string", - "description": "The branch the pull request or pull request batch was tested on" - }, - "test_branch_sha": { - "type": "string", - "description": "The SHA of the branch the pull request or pull request batch was tested on" - }, - "test_pr_number": { - "type": "integer", - "minimum": 0, - "description": "The number of the pull request used to test the pull request or pull request batch" - }, - "merged_reason": { - "type": "string", - "enum": [ - "MERGE_GRAPH_MERGE", - "MANUAL_MERGE", - "MERGE_GRAPH_DIRECT_MERGE" - ], - "description": "The reason the batch was merged" - }, - "type": { - "type": "string", - "const": "pull_request_batch.merged", - "description": "The type of webhook event that occurred" - }, - "action": { - "type": "string", - "const": "merged", - "description": "The type of action that triggered the webhook" - }, - "pr_numbers": { - "type": "array", - "items": { - "type": "integer", - "minimum": 0 - }, - "description": "The PR numbers of the pull requests in the batch" - }, - "pr_numbers_in_batch": { - "type": "array", - "items": { - "type": "integer", - "minimum": 0 - }, - "description": "The PR numbers of all pull requests that were tested together in this batch. This may be a superset of pr_numbers if some PRs in the batch failed to merge." - }, - "repository": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The type of source control for this repository" - }, - "owner": { - "type": "string", - "description": "The name of the repository" - }, - "name": { - "type": "string", - "description": "The user or organization that owns the repository" - } - }, - "required": [ - "host", - "owner", - "name" - ], - "description": "The repository that the webhook event occurred in" - }, - "status": { - "type": "string", - "const": "merged", - "description": "The current status of the Pull Request batch" - }, - "target_branch": { - "type": "string", - "description": "The name of the branch targeted by the merge queue where the event occurred" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The ISO 8601 timestamp of when this state transition occurred" - } - }, - "required": [ - "test_branch", - "test_branch_sha", - "type", - "action", - "pr_numbers", - "repository", - "status", - "target_branch", - "timestamp" - ], - "description": "Triggered when the last pull request in a batch gets merged" - }, - "example": { - "test_branch": "string", - "test_branch_sha": "string", - "test_pr_number": 0, - "merged_reason": "MERGE_GRAPH_MERGE", - "type": "pull_request_batch.merged", - "action": "merged", - "pr_numbers": [ - 0 - ], - "pr_numbers_in_batch": [ - 0 - ], - "repository": { - "host": "string", - "owner": "string", - "name": "string" - }, - "status": "merged", - "target_branch": "string", - "timestamp": "2019-08-24T14:15:22Z" - } - } - } - }, - "responses": {} - } - }, - "pull_request_batch.queued": { - "post": { - "operationId": "pull_request_batch.queued", - "summary": "Pull request batch queued", - "description": "Triggered when a batch of PRs is queued", - "security": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "test_branch": { - "type": "string", - "description": "The branch the pull request or pull request batch was tested on" - }, - "test_branch_sha": { - "type": "string", - "description": "The SHA of the branch the pull request or pull request batch was tested on" - }, - "test_pr_number": { - "type": "integer", - "minimum": 0, - "description": "The number of the pull request used to test the pull request or pull request batch" - }, - "queued_reason": { - "type": "string", - "enum": [ - "MANUAL_PUSH_TO_TARGET_BRANCH", - "PREREQUISITE_MERGE_ITEM_CANCELLED", - "PREREQUISITE_MERGE_ITEM_FAILED", - "PREREQUISITE_MERGE_ITEM_MANUALLY_MERGED", - "MERGE_ITEM_NOW_READY", - "TESTS_RESTARTED_BY_USER", - "TESTS_RESTARTED_BY_API_REQUEST", - "SKIP_THE_LINE_INTERRUPTED", - "BISECTION_REQUIRED", - "BISECTION_TEST_RUN_PASSED" - ], - "description": "The reason the batch was queued" - }, - "type": { - "type": "string", - "const": "pull_request_batch.queued", - "description": "The type of webhook event that occurred" - }, - "action": { - "type": "string", - "const": "queued", - "description": "The type of action that triggered the webhook" - }, - "pr_numbers": { - "type": "array", - "items": { - "type": "integer", - "minimum": 0 - }, - "description": "The PR numbers of the pull requests in the batch" - }, - "pr_numbers_in_batch": { - "type": "array", - "items": { - "type": "integer", - "minimum": 0 - }, - "description": "The PR numbers of all pull requests that were tested together in this batch. This may be a superset of pr_numbers if some PRs in the batch failed to merge." - }, - "repository": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The type of source control for this repository" - }, - "owner": { - "type": "string", - "description": "The name of the repository" - }, - "name": { - "type": "string", - "description": "The user or organization that owns the repository" - } - }, - "required": [ - "host", - "owner", - "name" - ], - "description": "The repository that the webhook event occurred in" - }, - "status": { - "type": "string", - "const": "queued", - "description": "The current status of the Pull Request batch" - }, - "target_branch": { - "type": "string", - "description": "The name of the branch targeted by the merge queue where the event occurred" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The ISO 8601 timestamp of when this state transition occurred" - } - }, - "required": [ - "test_branch", - "test_branch_sha", - "type", - "action", - "pr_numbers", - "repository", - "status", - "target_branch", - "timestamp" - ], - "description": "Triggered when a batch of PRs is queued" - }, - "example": { - "test_branch": "string", - "test_branch_sha": "string", - "test_pr_number": 0, - "queued_reason": "MANUAL_PUSH_TO_TARGET_BRANCH", - "type": "pull_request_batch.queued", - "action": "queued", - "pr_numbers": [ - 0 - ], - "pr_numbers_in_batch": [ - 0 - ], - "repository": { - "host": "string", - "owner": "string", - "name": "string" - }, - "status": "queued", - "target_branch": "string", - "timestamp": "2019-08-24T14:15:22Z" - } - } - } - }, - "responses": {} - } - }, - "pull_request_batch.submitted": { - "post": { - "operationId": "pull_request_batch.submitted", - "summary": "Pull request batch submitted", - "description": "Triggered when a batch of PRs is submitted", - "security": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "test_branch": { - "type": "string", - "description": "The branch the pull request or pull request batch was tested on" - }, - "test_branch_sha": { - "type": "string", - "description": "The SHA of the branch the pull request or pull request batch was tested on" - }, - "test_pr_number": { - "type": "integer", - "minimum": 0, - "description": "The number of the pull request used to test the pull request or pull request batch" - }, - "submitted_reason": { - "type": "string", - "enum": [ - "USER_REQUESTED", - "TRUNK_ADMIN_REQUESTED", - "API_REQUESTED" - ], - "description": "The reason the batch was submitted" - }, - "type": { - "type": "string", - "const": "pull_request_batch.submitted", - "description": "The type of webhook event that occurred" - }, - "action": { - "type": "string", - "const": "submitted", - "description": "The type of action that triggered the webhook" - }, - "pr_numbers": { - "type": "array", - "items": { - "type": "integer", - "minimum": 0 - }, - "description": "The PR numbers of the pull requests in the batch" - }, - "pr_numbers_in_batch": { - "type": "array", - "items": { - "type": "integer", - "minimum": 0 - }, - "description": "The PR numbers of all pull requests that were tested together in this batch. This may be a superset of pr_numbers if some PRs in the batch failed to merge." - }, - "repository": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The type of source control for this repository" - }, - "owner": { - "type": "string", - "description": "The name of the repository" - }, - "name": { - "type": "string", - "description": "The user or organization that owns the repository" - } - }, - "required": [ - "host", - "owner", - "name" - ], - "description": "The repository that the webhook event occurred in" - }, - "status": { - "type": "string", - "const": "submitted", - "description": "The current status of the Pull Request batch" - }, - "target_branch": { - "type": "string", - "description": "The name of the branch targeted by the merge queue where the event occurred" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The ISO 8601 timestamp of when this state transition occurred" - } - }, - "required": [ - "test_branch", - "test_branch_sha", - "type", - "action", - "pr_numbers", - "repository", - "status", - "target_branch", - "timestamp" - ], - "description": "Triggered when a batch of PRs is submitted" - }, - "example": { - "test_branch": "string", - "test_branch_sha": "string", - "test_pr_number": 0, - "submitted_reason": "USER_REQUESTED", - "type": "pull_request_batch.submitted", - "action": "submitted", - "pr_numbers": [ - 0 - ], - "pr_numbers_in_batch": [ - 0 - ], - "repository": { - "host": "string", - "owner": "string", - "name": "string" - }, - "status": "submitted", - "target_branch": "string", - "timestamp": "2019-08-24T14:15:22Z" - } - } - } - }, - "responses": {} - } - }, - "pull_request_batch.testing": { - "post": { - "operationId": "pull_request_batch.testing", - "summary": "Pull request batch testing", - "description": "Triggered when all pull requests in a batch start testing in the merge queue", - "security": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "type": "object", - "properties": { - "test_branch": { - "type": "string", - "description": "The branch the pull request or pull request batch was tested on" - }, - "test_branch_sha": { - "type": "string", - "description": "The SHA of the branch the pull request or pull request batch was tested on" - }, - "test_pr_number": { - "type": "integer", - "minimum": 0, - "description": "The number of the pull request used to test the pull request or pull request batch" - }, - "is_bisection": { - "type": "boolean", - "description": "Whether this event is part of a bisection test run" - }, - "type": { - "type": "string", - "const": "pull_request_batch.testing", - "description": "The type of webhook event that occurred" - }, - "action": { - "type": "string", - "const": "testing", - "description": "The type of action that triggered the webhook" - }, - "pr_numbers": { - "type": "array", - "items": { - "type": "integer", - "minimum": 0 - }, - "description": "The PR numbers of the pull requests in the batch" - }, - "pr_numbers_in_batch": { - "type": "array", - "items": { - "type": "integer", - "minimum": 0 - }, - "description": "The PR numbers of all pull requests that were tested together in this batch. This may be a superset of pr_numbers if some PRs in the batch failed to merge." - }, - "repository": { - "type": "object", - "properties": { - "host": { - "type": "string", - "description": "The type of source control for this repository" - }, - "owner": { - "type": "string", - "description": "The name of the repository" - }, - "name": { - "type": "string", - "description": "The user or organization that owns the repository" - } - }, - "required": [ - "host", - "owner", - "name" - ], - "description": "The repository that the webhook event occurred in" - }, - "status": { - "type": "string", - "const": "testing", - "description": "The current status of the Pull Request batch" - }, - "target_branch": { - "type": "string", - "description": "The name of the branch targeted by the merge queue where the event occurred" - }, - "timestamp": { - "type": "string", - "format": "date-time", - "description": "The ISO 8601 timestamp of when this state transition occurred" - } - }, - "required": [ - "test_branch", - "test_branch_sha", - "is_bisection", - "type", - "action", - "pr_numbers", - "repository", - "status", - "target_branch", - "timestamp" - ], - "description": "Triggered when all pull requests in a batch start testing in the merge queue" - }, - "example": { - "test_branch": "string", - "test_branch_sha": "string", - "test_pr_number": 0, - "is_bisection": true, - "type": "pull_request_batch.testing", - "action": "testing", - "pr_numbers": [ - 0 - ], - "pr_numbers_in_batch": [ - 0 - ], - "repository": { - "host": "string", - "owner": "string", - "name": "string" - }, - "status": "testing", - "target_branch": "string", - "timestamp": "2019-08-24T14:15:22Z" - } - } - } - }, - "responses": {} - } - } - }, - "components": { - "securitySchemes": { - "ApiKeyAuth": { - "type": "apiKey", - "in": "header", - "name": "x-api-token" - } - } - } -} \ No newline at end of file diff --git a/setup-and-administration/apis.mdx b/setup-and-administration/apis.mdx new file mode 100644 index 0000000..f130518 --- /dev/null +++ b/setup-and-administration/apis.mdx @@ -0,0 +1,45 @@ +--- +title: "API Reference" +description: "Trunk APIs for building custom integrations." +--- +## REST API Overview + +Trunk provides HTTP REST APIs for each of our features. The APIs use status codes to indicate the success or failure of requests, return JSON from all requests, and use standard HTTP response codes. All API requests must be authenticated. + +## Available APIs + +* [Flaky Tests](/flaky-tests/flaky-tests): for accessing information like quarantined tests in your repo. +* [Merge API](/merge-queue/reference/merge) : for controlling the Trunk Merge Queue. + +## Authentication + +Authenticate to the API with an API key using the header `x-api-token`. + +### Finding your API token + +You can find your API token in the [Trunk App](https://app.trunk.io). + + +Explore the interactive walkthrough in a new tab. + + +### Example + +To submit an empty list of events to be tracked, do the following from the command line. + +```sh +curl \ + -i \ + -X POST https://api.trunk.io/v1/metrics/trackEvents \ + -H "Content-Type: application/json" \ + -H "x-source: curl-sample" \ + -H "x-api-token: {REDACTED}" \ + -d '{ + "repo": { + "host": "github.com", + "owner": "trunk-io", + "name": "jenkins-plugin" + }, + "events":[] + }' +``` diff --git a/setup-and-administration/apis/index.mdx b/setup-and-administration/apis/index.mdx deleted file mode 100644 index 259d839..0000000 --- a/setup-and-administration/apis/index.mdx +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: "API Reference" -description: "Trunk APIs for building custom integrations." ---- -## REST API Overview - -Trunk provides HTTP REST APIs for each of our features. The APIs use status codes to indicate the success or failure of requests, return JSON from all requests, and use standard HTTP response codes. All API requests must be authenticated. - -## Available APIs - -* [Flaky Tests](../../flaky-tests/reference/api-reference): for accessing information like quarantined tests in your repo. -* [Merge API](../../merge-queue/reference/merge) : for controlling the Trunk Merge Queue. - -## Authentication - -Authenticate to the API with an API token using the header `x-api-token`. - -### Finding your API token - -You can find your API token in the [Trunk App](https://app.trunk.io). - - diff --git a/setup-and-administration/apis/webhooks.mdx b/setup-and-administration/apis/webhooks.mdx index 4dd6c25..57fc894 100644 --- a/setup-and-administration/apis/webhooks.mdx +++ b/setup-and-administration/apis/webhooks.mdx @@ -2,33 +2,16 @@ title: "Webhooks Reference" description: "Documentation on the various webhooks that are provided by Trunk" --- - - -![](/assets/webhook-event-catalog.png) - +
Svix generates a reference for all the exposed webhook events. You can find all possible webhooks from Trunk can be viewed in the [Webhook Events Catalog](https://www.svix.com/event-types/us/org_2eQPL41Ew5XSHxiXZIamIUIXg8H/): - - www.svix.com + +Open the referenced resource in a new tab. ### Guides and Examples The documentation of each Trunk product contains guides and examples for using webhooks. - - - - +
Webhooks for Flaky Testswebhooks
Merge Queue Webhookswebhooks
diff --git a/setup-and-administration/billing.mdx b/setup-and-administration/billing.mdx index 1f83cc3..85b5b27 100644 --- a/setup-and-administration/billing.mdx +++ b/setup-and-administration/billing.mdx @@ -17,36 +17,27 @@ Public repositories are always free for up to 5 million test spans per month. If you exceed these limits, you'll be prompted to upgrade based on the number of private committers and additional test span usage. -Similar to [calculating user counts](./billing#calculating-user-counts), our free tier limits are calculated based on a 30-day rolling window. +Similar to [calculating user counts](#calculating-user-counts), our free tier limits are calculated based on a 30-day rolling window. -| Feature | Metric | Free Tier Limit | -|---|---|---| -| All | Users | Up to 5 private repo committers; unlimited on public repos | -| Flaky Tests | Number of [test spans](#user-content-fn-1) | 5 committers and 5M test spans per month | -| Merge Queue | PRs merged per month | Unlimited usage; pricing begins if >5 private repo committers | -| Code Quality | Quality & security metrics | Unlimited usage | +
FeatureMetricFree Tier Limit
AllUsersUp to 5 private repo committers; unlimited on public repos
Flaky TestsNumber of test spans5 committers and 5M test spans per month
Merge QueuePRs merged per monthUnlimited usage; pricing begins if >5 private repo committers
Code QualityQuality & security metricsUnlimited usage
### Team plan Trunk Team Plans offers a monthly subscription plan using a per-seat model. At the end of every billing period, we calculate the number of users using Trunk and update the next month’s invoice to reflect the latest user count. Each seat has access to all of Trunk's features. -| Feature | Metric | Limits | -|---|---|---| -| Code Quality | Quality & security metrics | Unlimited | -| Merge Queue | PRs merged per month | Unlimited | -| Flaky Tests | Number of [test spans](#user-content-fn-1) | 1 million test spans per seat per month. $3 for each additional 1 million test spans. | +
FeatureMetricLimits
Code QualityQuality & security metricsUnlimited
Merge QueuePRs merged per monthUnlimited
Flaky TestsNumber of test spans

1 million test spans per seat per month.

$3 for each additional 1 million test spans.

### **Enterprise plan** -Trunk Enterprise offers powerful admin controls, dedicated support, access to custom billing or terms, and features like SSO. If your team is interested in an enterprise plan, please contact [sales@trunk.io](mailto:sales@trunk.io). +Trunk Enterprise offers powerful admin controls, dedicated support, access to custom billing or terms, and features like SSO. If your team is interested in an enterprise plan, please contact sales@trunk.io. **Trials** -You and your team can trial Trunk before signing up for an Enterprise or Team plan. To try Trunk, please contact [sales@trunk.io](mailto:sales@trunk.io). To extend or cancel the trial, please contact [sales@trunk.io](mailto:sales@trunk.io). +You and your team can trial Trunk before signing up for an Enterprise or Team plan. To try Trunk, please contact sales@trunk.io. To extend or cancel the trial, please contact sales@trunk.io. ### Calculating user counts -A user is a non-bot user who has made a commit to a private repo with Trunk enabled in the last 30 days. Specifically, we look at their username; if someone changes their username on Git, _we would consider that a separate user_. We do not count contributions to public (open source) repos. Contributor counts are displayed on **Settings** > **Billing**. +A user is a non-bot user who has made a commit to a private repo with Trunk enabled in the last 30 days. Specifically, we look at their username; if someone changes their username on Git, *we would consider that a separate user*. We do not count contributions to public (open source) repos. Contributor counts are displayed on **Settings** > **Billing**. Trunk requires the [Trunk GitHub App](https://github.com/apps/trunk-io) to be installed in your repo to count seats. @@ -66,22 +57,20 @@ On Day 31, the user count would be **two**. We consider days 2 through 31 (inclu ### Calculating an invoice -At the end of every billing cycle, Trunk calculates what the next invoice should be. Trunk determines feature usage and the number of seats used over the [free tier usage limits](./billing#free-plans-and-trials) . See our section on [calculating user counts](./billing#calculating-user-counts) to determine how much we charge per feature usage. +At the end of every billing cycle, Trunk calculates what the next invoice should be. Trunk determines feature usage and the number of seats used over the [free tier usage limits](#free-plans-and-trials) . See our section on [calculating user counts](#calculating-user-counts) to determine how much we charge per feature usage. ### Editing payment details -You can edit your payment details by navigating to **Settings** > **Billing** and clicking on the pencil icon on the credit card. Trunk accepts both credit card and ACH; if you require a different payment method, please contact us at [sales@trunk.io](mailto:sales@trunk.io). +You can edit your payment details by navigating to **Settings** > **Billing** and clicking on the pencil icon on the credit card. Trunk accepts both credit card and ACH; if you require a different payment method, please contact us at sales@trunk.io. - ![](https://files.readme.io/d7adf4f-Screen_Shot_2023-01-17_at_8.08.17_PM.png) - ### Cancelling a plan -You can cancel an active Trunk subscription by navigating to **Settings** > **Billing** and clicking the **Cancel Subscription** button. Your plan will transition back into the [free tier](./billing#free-plans-and-trials); if you want to re-enable this plan, please contact us at [sales@trunk.io](mailto:sales@trunk.io). +You can cancel an active Trunk subscription by navigating to **Settings** > **Billing** and clicking the **Cancel Subscription** button. Your plan will transition back into the [free tier](#free-plans-and-trials); if you want to re-enable this plan, please contact us at sales@trunk.io. -Cancelling a Trunk Plan and then re-enabling it will degrade the product experience and may lose data. Please contact us at [sales@trunk.io](mailto:sales@trunk.io) to re-enable any canceled plan. +Cancelling a Trunk Plan and then re-enabling it will degrade the product experience and may lose data. Please contact us at sales@trunk.io to re-enable any canceled plan. ### A note on security diff --git a/setup-and-administration/connecting-to-trunk.mdx b/setup-and-administration/connecting-to-trunk.mdx index d0204f2..b8534a8 100644 --- a/setup-and-administration/connecting-to-trunk.mdx +++ b/setup-and-administration/connecting-to-trunk.mdx @@ -1,5 +1,6 @@ --- title: "Account Setup" +description: "Welcome to Trunk! Before you can access Trunk's toolkit to ship faster, you must create a Trunk account. You can connect to Trunk in two ways:" --- ### Create a Trunk account @@ -12,20 +13,18 @@ Welcome to Trunk! Before you can access Trunk's toolkit to ship faster, you must After creating a Trunk Account, you'll be invited to [create an organization](https://app.trunk.io/onboarding). Organizations are shared workspaces for your team, and **individual repositories** connected to Trunk will fall under your team's organization. - -![](/assets/onboarding-add-org.png) - +
To create your organization, you need the following: * **Workspace Name**: This is the display name of your organization. This can be changed later. -* **URL Slug**: This is the link to access your workspace and also your [Organization Slug](./managing-your-organization#slug). This cannot be changed. +* **URL Slug**: This is the link to access your workspace and also your [Organization Slug](/setup-and-administration/managing-your-organization#slug). This cannot be changed. - + **Trying to join your team?** -If your team is already on Trunk and you're looking to join an existing organization, see the docs on[ inviting team members](./managing-your-organization#inviting-team-members) and contact an organization admin. - +If your team is already on Trunk and you're looking to join an existing organization, see the docs on[ inviting team members](/setup-and-administration/managing-your-organization#inviting-team-members) and contact an organization admin. + ### Start using Trunk @@ -33,28 +32,6 @@ After connecting to Trunk, you're ready to get started. ### Next steps - - - - - - - - - - - +
Flaky TestsFlakyTests.pngoverview
Merge QueueMerge.pngmerge-queue
+ +
Manage your Organizationmanaging-your-organization
Install the CLIBroken link
diff --git a/setup-and-administration/github-app-permissions.mdx b/setup-and-administration/github-app-permissions.mdx index e2fd67b..dba033e 100644 --- a/setup-and-administration/github-app-permissions.mdx +++ b/setup-and-administration/github-app-permissions.mdx @@ -2,16 +2,12 @@ title: "Trunk GitHub App" description: "Learn more about which permissions the Trunk GitHub app requests and why Trunk needs them." --- -The Trunk GitHub app lets you integrate Merge Queue and Flaky Test features with your GitHub repos. It can help you[ ](../code-quality/overview/setup-and-installation/github-integration)manage [merge queue branches](../merge-queue/getting-started/), and post [PR comments about your test results](../flaky-tests/management/github-pull-request-comments). +The Trunk GitHub app lets you integrate Merge Queue and Flaky Test features with your GitHub repos. It can help you[ ](https://docs.trunk.io/code-quality/overview/setup-and-installation/github-integration)manage [merge queue branches](https://github.com/trunk-io/docs/blob/main/merge-queue/set-up-trunk-merge/README), and post [PR comments about your test results](/flaky-tests/github-pull-request-comments). You can install the Trunk GitHub App by going to **Settings** > **Organization** > **GitHub** and clicking **Install GitHub App**. You'll be redirected to GitHub to select the repositories where the GitHub app will be installed. The Trunk GitHub App enables functionality for all of Trunk's tools. Not every permission is required for every feature of Trunk, and if you have only enabled one of our tools, we will only access the permissions pertaining to that tool. For complete documentation of the individual permissions the Trunk GitHub App requires, see the [GitHub developer docs](https://docs.github.com/en/rest/authentication/permissions-required-for-github-apps). Below, find an explanation of every permission the Trunk GitHub App requires and what Trunk uses it for. - -**Looking for the Trunk Sudo app?** Trunk also provides a separate [Trunk Sudo GitHub App](./trunk-sudo-app), used by features that need to merge pull requests while bypassing GitHub branch protections (such as [Force merge](../merge-queue/using-the-queue/force-merge)). It's optional and only required if you use one of those features. - - ## **Repository permissions** Repository permissions permit access to repositories and related resources. @@ -68,7 +64,7 @@ Trunk uses this permission to view and merge pull requests managed by Merge. This permission includes access to update GitHub Action workflow files. -Trunk uses this permission to facilitate onboarding to running Trunk Check on CI, for users who use GitHub Actions. +Trunk uses this permission to facilitate onboarding to running Trunk Check on CI, for users who use Github Actions. ## **Organization permissions** diff --git a/setup-and-administration/managing-your-organization.mdx b/setup-and-administration/managing-your-organization.mdx index e137993..1d9e885 100644 --- a/setup-and-administration/managing-your-organization.mdx +++ b/setup-and-administration/managing-your-organization.mdx @@ -1,25 +1,22 @@ --- title: "Managing your Organization" +description: "Integrating with Trunk through Webhooks, APIs, or CLI will require authentication using your organization's slug and token." --- ## Organization Slug and Token -Integrating with Trunk through [Webhooks](../flaky-tests/webhooks/), [APIs](./apis/), or [CLI](/broken/pages/OJc6wVrAfc2SLQZlJ2m1) will require authentication using your organization's slug and token. +Integrating with Trunk through [Webhooks](/flaky-tests/webhooks), [APIs](/setup-and-administration/apis), or CLI will require authentication using your organization's slug and token. You can find your organization slug and token by going to **Settings** > **Organization** > **General**. - -![](/assets/org-slug-and-token.png) - +
-*** +--- ## Managing Team Members You can manage a team member's role and remove team members by navigating to **Settings** > **Organization** > **Team** > **Members** and clicking on the name of a team member. You can change the role of a team member between user and admin, as well as removing the user from your organization. - -![](/assets/org-team-members.png) - +
### Inviting Team Members @@ -32,9 +29,7 @@ Available Roles are: * **Member**: Full access with limited permissions (default) * **Admin**: Full administrative access - -![](/assets/org-team-members-invite.png) - +
### Pending Invites @@ -42,9 +37,7 @@ You can view and manage pending email invites by navigating to **Settings** > ** From this page you can copy the member invite link or revoke the invite using the **three dots** menu. - -![](/assets/org-team-pending-invites.png) - +
### Team Domains @@ -53,30 +46,21 @@ If your team uses emails managed by Google or Microsoft under a common domain, y You can enable team domains under **Settings > Organization > Team > Domains** and clicking **Add Domain**. + - -![](/assets/org-team.png) - - - - -![](/assets/org-team-manage-domain_(1).png) - - - - -Trunk also supports SSO login. If you wish to use SSO, please contact us at support@trunk.io. - +
-*** + -## Leaving an Organization + -You can remove yourself from an organization at any time by navigating to **Settings** > **Organization** > **General** and clicking **Leave Organization**. +
-A confirmation dialog will appear. Once you leave, you lose access to the organization's settings, repositories, and test data. Another member can invite you back if needed. +
-**If you are an admin**, the **Leave Organization** button appears in the **Danger Zone** section at the bottom of the General settings page alongside the **Delete Organization** option. + -**If you are not an admin**, the **Leave Organization** option appears as a standalone card outside the Danger Zone section. \ No newline at end of file + +Trunk also supports SSO login. If you wish to use SSO, please contact us at support@trunk.io. + diff --git a/setup-and-administration/security.mdx b/setup-and-administration/security.mdx index 5734791..379dd5c 100644 --- a/setup-and-administration/security.mdx +++ b/setup-and-administration/security.mdx @@ -58,7 +58,6 @@ Flaky Tests works by uploading test results from your CI jobs to Trunk's backend * **Encryption in transit**: All data transmitted to and from Trunk uses TLS (Transport Layer Security) and HSTS * **Encryption at rest**: All customer data is encrypted using AES-256 * **Network isolation**: Production services run in isolated AWS VPCs with restricted access; all services are within private subnets with no internet access and use a network gateway to permit specific traffic -* **Clickjacking protection**: All Trunk app pages set `Content-Security-Policy: frame-ancestors 'self'` and `X-Frame-Options: SAMEORIGIN`, preventing Trunk pages from being embedded in external iframes #### Access Controls @@ -92,4 +91,4 @@ Our most recent SOC 2 Type II audit confirmed that: * Controls operated effectively throughout the period * No significant security incidents occurred during the audit period -**To request a copy of our SOC 2 report**, please contact us at [security@trunk.io](mailto:security@trunk.io) +**To request a copy of our SOC 2 report**, please contact us at security@trunk.io diff --git a/setup-and-administration/support.mdx b/setup-and-administration/support.mdx index 3e32509..f26dcfb 100644 --- a/setup-and-administration/support.mdx +++ b/setup-and-administration/support.mdx @@ -4,14 +4,7 @@ description: "Need help with Trunk?" --- ### How to reach us -| Contact Method | Description | -|---|---| -| [Community Slack](https://slack.trunk.io) | We offer near real-time support in our [Slack community](https://slack.trunk.io). Post your questions in #getting-started, #bugs, or #general for quick answers. Want to discuss something privately? Simply drop a note in any channel and we'll continue in DMs. | -| [Meeting or Demo](https://calendly.com/trunk/demo) | For sales-related matters, you can email us at [sales@trunk.io](mailto:sales@trunk.io) or [schedule a meeting here](https://calendly.com/trunk/demo). | -| [GitHub Discussions](https://github.com/orgs/trunk-io/discussions/) | If there's something you want us to build, you can post on Slack in #feature-requests or start a discussion on [GitHub Discussions](https://github.com/orgs/trunk-io/discussions/). | -| [Email](mailto:support@trunk.io) | Email us at [support@trunk.io](mailto:support@trunk.io). We'll get back to you within 48 hours, though for fastest support, we recommend our [Slack community](https://slack.trunk.io/)! | -| [Security Concerns (Email)](mailto:security@trunk.io) | Report security issues or request security information by emailing us at [security@trunk.io](mailto:security@trunk.io) | -| [Report Outages](https://status.trunk.io) | Report outages and critical issues by clicking "Report an issue" at [status.trunk.io](https://status.trunk.io) | +
Contact MethodDescription
Community SlackWe offer near real-time support in our Slack community. Post your questions in #getting-started, #bugs, or #general for quick answers. Want to discuss something privately? Simply drop a note in any channel and we'll continue in DMs.
Meeting or DemoFor sales-related matters, you can email us at sales@trunk.io or schedule a meeting here.
GitHub DiscussionsIf there's something you want us to build, you can post on Slack in #feature-requests or start a discussion on GitHub Discussions.
EmailEmail us at support@trunk.io. We'll get back to you within 48 hours, though for fastest support, we recommend our Slack community!
Security Concerns (Email)Report security issues or request security information by emailing us at security@trunk.io
Report OutagesReport outages and critical issues by clicking "Report an issue" at status.trunk.io
### Enterprise support @@ -27,7 +20,7 @@ description: "Need help with Trunk?" * General Issues: 1 business day maximum response time (business hours) -_Response time refers to initial acknowledgment and issue triage. Resolution timeframes depend on technical complexity and may require coordination across multiple systems._ +*Response time refers to initial acknowledgment and issue triage. Resolution timeframes depend on technical complexity and may require coordination across multiple systems.* #### Account management diff --git a/setup-and-administration/trunk-sudo-app.mdx b/setup-and-administration/trunk-sudo-app.mdx deleted file mode 100644 index 7b55963..0000000 --- a/setup-and-administration/trunk-sudo-app.mdx +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: "Trunk Sudo GitHub App" -description: "Install and configure the Trunk Sudo GitHub App, a secondary Trunk app used by features that need to merge pull requests while bypassing GitHub branch protections." ---- -Trunk Sudo is a second Trunk GitHub App, separate from the [main Trunk GitHub App](./github-app-permissions). Its only purpose is to programmatically merge pull requests while bypassing GitHub branch protections, on behalf of Trunk features that need that capability. - -Trunk Sudo is a prerequisite for bypass-dependent features. Today the only such feature is [Force merge](../merge-queue/using-the-queue/force-merge). - - -**Trunk Sudo is optional.** You only need to install it if you plan to use a feature that requires it. If you don't use any bypass-dependent features, you can skip this setup. - - -### Prerequisites - -Before you begin, make sure you have: - -* [ ] Admin access to your GitHub organization -* [ ] The [main Trunk GitHub App](./github-app-permissions) already installed -* [ ] Branch protection already configured for your merge branch (classic rules, rulesets, or both) - -### Install the Trunk Sudo GitHub App - -You can install Trunk Sudo from either the Trunk web app or directly on GitHub — both paths land at the same GitHub install flow. - -1. **From the Trunk web app (recommended):** Navigate to your repository's **Merge Queue** settings page. The Trunk Sudo setup panel includes an **Install** button that opens GitHub's install flow. -2. **Directly on GitHub:** Go to [https://github.com/apps/trunk-sudo](https://github.com/apps/trunk-sudo) and click **Install**. - -In the GitHub install flow: - -1. Select whether to install on all repositories or only specific ones. You must include every repository where you want to use a bypass-dependent feature. -2. Review and approve the required permissions (see [Permissions reference](#permissions-reference) below). -3. Complete the installation. - -### Configure branch protection for Trunk Sudo - -Installing the app isn't enough on its own — your branch protection configuration must also allow Trunk Sudo to bypass the relevant rules when it merges. GitHub has two systems for branch protection: **classic branch protection rules** and **rulesets**. Both can coexist on the same branch. - -**Rulesets are strongly recommended.** Classic branch protection has rules that cannot be bypassed by any GitHub App (notably required status checks and "Require branches to be up to date"), so using classic protection alone will block Trunk Sudo from merging. Rulesets don't have this limitation. - -#### Option A — GitHub Rulesets (recommended) - -In GitHub, navigate to **Settings → Rules → Rulesets**. For every active ruleset that applies to your merge branch: - -1. Open the ruleset. -2. Under **Bypass list**, add the **Trunk Sudo** GitHub App. -3. Set its bypass mode to **Exempt**. -4. Save. - - -![GitHub ruleset bypass list with Trunk Sudo set to Exempt](/assets/trunk-sudo-ruleset-bypass-mode.png) - - - -**This is the most common setup mistake.** When you add an actor to a ruleset's bypass list, GitHub defaults the bypass mode to **Always** — which sounds like it covers everything but does not cover pull request merges. Trunk Sudo must be set to **Exempt**; it's the only mode that lets a GitHub App merge a PR without interactive confirmation. If Trunk Sudo isn't set to Exempt, merges will silently fail. - - -#### Option B — Classic branch protection - -If you're using classic branch protection rules, navigate to **Settings → Branches → Branch protection rules** and edit the rule for your merge branch. - -1. If **"Require a pull request before merging" → "Require approvals"** is enabled, enable **"Allow specified actors to bypass required pull requests"** and add **Trunk Sudo** to the allow list. -2. If **"Restrict who can push to matching branches"** is enabled, add **Trunk Sudo** to the allowed actors list. -3. Remove any entries under **"Require status checks to pass before merging"**. Classic branch protection does not allow apps to bypass required status checks. -4. Disable the nested **"Require branches to be up to date before merging"** checkbox. This setting also cannot be bypassed on classic protection. - - -**Classic branch protection has unbypassable rules.** Required status checks and "Require branches to be up to date" cannot be bypassed by any GitHub App. If you need those protections, move the rule to a ruleset with Trunk Sudo listed as an exempt bypass actor — otherwise Trunk Sudo will be unable to merge. - - -### Verify your setup - -The settings page for each Trunk Merge Queue includes a live checklist that validates every piece of the Trunk Sudo configuration end-to-end. **This checklist is the source of truth for whether your setup is correct** — if the checklist is green, the app is ready to merge. - - -To see this checklist, go to a merge queue in the repo where you are setting up Trunk Sudo, then click the **Settings** tab. - - - -![Trunk Sudo setup checklist in the Merge Queue settings page](/assets/trunk-sudo-setup-checklist.png) - - -Each row shows the status of one check (installation, classic branch protection, and one row per active ruleset on the merge branch). If a row is red, revisit the corresponding section above — the check IDs map directly to the configuration surfaces described here. - -### Permissions reference - -Trunk Sudo requests the following repository permissions. Each one is required for a specific part of the merge bypass flow. - -#### Administration (Read-only) - -This permission includes read-only access to repository settings, teams, and collaborators. - -Trunk Sudo uses this permission to read your current branch protection and ruleset configuration so it can determine whether it is correctly set up to bypass protections before attempting a merge. - -#### Metadata (Read-only) - -This permission includes access to search repositories, list collaborators, and access repository metadata. - -This permission is required by all GitHub applications that access repository information. - -#### Contents (Read and write) - -This permission includes access to repository contents, commits, branches, downloads, releases, and merges. - -Trunk Sudo uses this permission to merge pull requests into your merge branch. - -#### Pull requests (Read and write) - -This permission includes access to pull requests and merges. - -Trunk Sudo uses this permission to read PR state and to complete the merge operation. - -#### Workflows (Read and write) - -This permission includes access to update GitHub Action workflow files. - -Required so Trunk Sudo can merge PRs that modify files under `.github/`. GitHub blocks any merge that touches workflow files unless the merging actor has this permission. - -### Next steps - -→ [**Force merge**](../merge-queue/using-the-queue/force-merge) — use the Trunk Sudo app to merge PRs through the queue that don't satisfy branch protection. - -→ For the main Trunk GitHub App and its permissions, see [Trunk GitHub App](./github-app-permissions). diff --git a/styles.css b/styles.css deleted file mode 100644 index 5fabf4b..0000000 --- a/styles.css +++ /dev/null @@ -1,26 +0,0 @@ -th { - text-align: left; -} - -.tabs { - border: 1px solid #e2e6ec; - border-radius: 0.5rem; -} - -.dark .tabs { - border: 1px solid #383a3c; -} -.tabs > ul > li > div { - padding-left: 10px; - padding-right: 10px; -} - -.tabs > div { - margin: 0 10px; -} - -h5, -h6 { - margin-top: 20px; - font-weight: bold; -}