Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
75 changes: 20 additions & 55 deletions .github/workflows/benchmarks.yaml
Original file line number Diff line number Diff line change
@@ -1,83 +1,48 @@
name: Benchmarks

on:
push:
branches:
- main
pull_request:
types:
- opened
- synchronize
# `workflow_dispatch` allows CodSpeed to trigger backtest
# performance analysis in order to generate initial data.
workflow_dispatch:

jobs:
benchmark:
name: Benchmark tests
name: Run benchmarks
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: write
strategy:
matrix:
python_version: [3.12]
id-token: write
steps:
- name: Checkout branch
- name: Checkout
uses: actions/checkout@v4
with:
path: pr

- name: Checkout main
uses: actions/checkout@v4
with:
ref: main
path: main

- name: Install python
uses: actions/setup-python@v5
with:
python-version: ${{matrix.python_version}}
python-version: "3.12"

- name: Install uv
uses: astral-sh/setup-uv@v4
with:
enable-cache: true
cache-dependency-glob: "main/uv.lock"

- name: Setup benchmarks
run: |
echo "BASE_SHA=$(echo ${{ github.event.pull_request.base.sha }} | cut -c1-8)" >> $GITHUB_ENV
echo "HEAD_SHA=$(echo ${{ github.event.pull_request.head.sha }} | cut -c1-8)" >> $GITHUB_ENV
echo "PR_COMMENT=$(mktemp)" >> $GITHUB_ENV

- name: Run benchmarks on PR
working-directory: ./pr
run: |
uv sync --group test
uv run pytest --benchmark-only --benchmark-save=pr

- name: Run benchmarks on main
working-directory: ./main
continue-on-error: true
run: |
uv sync --group test
uv run pytest --benchmark-only --benchmark-save=base
cache-dependency-glob: "uv.lock"

- name: Compare results
continue-on-error: false
run: |
uvx pytest-benchmark compare **/.benchmarks/**/*.json | tee cmp_results
- name: Install project
run: uv sync --group test

echo 'Benchmark comparison for [`${{ env.BASE_SHA }}`](${{ github.event.repository.html_url }}/commit/${{ github.event.pull_request.base.sha }}) (base) vs [`${{ env.HEAD_SHA }}`](${{ github.event.repository.html_url }}/commit/${{ github.event.pull_request.head.sha }}) (PR)' >> pr_comment
echo '```' >> pr_comment
cat cmp_results >> pr_comment
echo '```' >> pr_comment
cat pr_comment > ${{ env.PR_COMMENT }}

- name: Comment on PR
uses: actions/github-script@v7
- name: Run benchmarks
uses: CodSpeedHQ/action@v4
env:
RAY_ENABLE_UV_RUN_RUNTIME_ENV: 0
PLUGBOARD_IO_READ_TIMEOUT: 5.0
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: require('fs').readFileSync('${{ env.PR_COMMENT }}').toString()
});
mode: walltime
run: uv run pytest tests/benchmark/ --codspeed

2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ test = [
"optuna>=3.0,<5",
"pytest>=8.3,<10",
"pytest-asyncio>=1.0,<2",
"pytest-benchmark>=5.1.0",
"pytest-codspeed>=4.3.0",
"pytest-cases>=3.8,<4",
"pytest-env>=1.1,<2",
"pytest-rerunfailures>=15.0,<17",
Expand Down
84 changes: 66 additions & 18 deletions tests/benchmark/test_benchmarking.py
Original file line number Diff line number Diff line change
@@ -1,35 +1,83 @@
"""Simple benchmark tests for Plugboard models."""
"""Benchmark tests for Plugboard processes."""

import asyncio

from pytest_benchmark.fixture import BenchmarkFixture
import pytest
from pytest_codspeed import BenchmarkFixture

from plugboard.connector import AsyncioConnector
from plugboard.process import LocalProcess, Process
from plugboard.connector import AsyncioConnector, Connector, RayConnector, ZMQConnector
from plugboard.process import LocalProcess, Process, RayProcess
from plugboard.schemas import ConnectorSpec
from tests.integration.test_process_with_components_run import A, B


def _setup_process() -> tuple[tuple[Process], dict]:
comp_a = A(name="comp_a", iters=1000)
ITERS = 1000

CONNECTOR_PROCESS_PARAMS = [
(AsyncioConnector, LocalProcess),
(ZMQConnector, LocalProcess),
(RayConnector, RayProcess),
]
CONNECTOR_PROCESS_IDS = ["asyncio", "zmq", "ray"]


def _build_process(connector_cls: type[Connector], process_cls: type[Process]) -> Process:
"""Build a process with the given connector and process class."""
comp_a = A(name="comp_a", iters=ITERS)
comp_b1 = B(name="comp_b1", factor=1)
comp_b2 = B(name="comp_b2", factor=2)
components = [comp_a, comp_b1, comp_b2]
connectors = [
AsyncioConnector(spec=ConnectorSpec(source="comp_a.out_1", target="comp_b1.in_1")),
AsyncioConnector(spec=ConnectorSpec(source="comp_b1.out_1", target="comp_b2.in_1")),
connector_cls(spec=ConnectorSpec(source="comp_a.out_1", target="comp_b1.in_1")),
connector_cls(spec=ConnectorSpec(source="comp_b1.out_1", target="comp_b2.in_1")),
]
process = LocalProcess(components=components, connectors=connectors)
# Initialise process so that this is excluded from the benchmark timing
asyncio.run(process.init())
# Return args and kwargs tuple for benchmark.pedantic
return (process,), {}
return process_cls(components=components, connectors=connectors)


@pytest.mark.parametrize(
"connector_cls, process_cls",
CONNECTOR_PROCESS_PARAMS,
ids=CONNECTOR_PROCESS_IDS,
)
def test_benchmark_process_run(
benchmark: BenchmarkFixture,
connector_cls: type[Connector],
process_cls: type[Process],
ray_ctx: None,
) -> None:
"""Benchmark running of a Plugboard Process."""

def _setup() -> tuple[tuple[Process], dict]:
async def _init() -> Process:
process = _build_process(connector_cls, process_cls)
await process.init()
return process

return (asyncio.run(_init()),), {}

def _run(process: Process) -> None:
asyncio.run(process.run())

benchmark.pedantic(_run, setup=_setup, rounds=5)


def _run_process(process: Process) -> None:
asyncio.run(process.run())
@pytest.mark.benchmark
@pytest.mark.parametrize(
"connector_cls, process_cls",
CONNECTOR_PROCESS_PARAMS,
ids=CONNECTOR_PROCESS_IDS,
)
def test_benchmark_process_lifecycle(
connector_cls: type[Connector],
process_cls: type[Process],
ray_ctx: None,
) -> None:
"""Benchmark the full lifecycle (init, run, destroy) of a Plugboard Process."""

async def _lifecycle() -> None:
process = _build_process(connector_cls, process_cls)
await process.init()
await process.run()
await process.destroy()

def test_benchmark_process_run(benchmark: BenchmarkFixture) -> None:
"""Benchmark the running of a Plugboard Process."""
benchmark.pedantic(_run_process, setup=_setup_process, rounds=5)
asyncio.run(_lifecycle())
58 changes: 32 additions & 26 deletions uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading