Skip to content

chore: bump

chore: bump #24

Workflow file for this run

name: QA
on:
pull_request:
push:
branches:
- main
jobs:
QA:
name: QA checks
runs-on: ubuntu-latest
steps:
- name: Check out repository
uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@v5
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version-file: .python-version
- name: Sync packages
run: uv sync --locked --all-packages
- name: Check import sorting
run: uv run ruff check --select I .
- name: Check formatting
run: uv run ruff format --check .
- name: Type check package
run: uv run basedpyright -p pyproject.toml
- name: Contract tests
run: uv run pytest -n auto
benchmark:
name: Generation benchmark
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Check out repository
uses: actions/checkout@v4
- name: Check out base repository
if: github.event_name == 'pull_request'
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.base.sha }}
path: .benchmark/base
- name: Install uv
uses: astral-sh/setup-uv@v5
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version-file: .python-version
- name: Sync packages
run: uv sync --locked --all-packages
- name: Benchmark base generation
if: github.event_name == 'pull_request'
run: >
uv run python scripts/benchmark_generate.py run --package-path
.benchmark/base --spec tests/performance/nautobot.json.gz --repeat 5
--warmup 1 --output .benchmark/base.json
- name: Benchmark current generation
run: >
uv run python scripts/benchmark_generate.py run --package-path .
--spec tests/performance/nautobot.json.gz --repeat 5 --warmup 1
--output .benchmark/current.json
- name: Check generation regression
if: github.event_name == 'pull_request'
run: >
uv run python scripts/benchmark_generate.py compare --baseline
.benchmark/base.json --candidate .benchmark/current.json
--max-regression 0.02