From 668e782f061657af93fbc17f637f2bd9e1550609 Mon Sep 17 00:00:00 2001 From: "google-labs-jules[bot]" <161369871+google-labs-jules[bot]@users.noreply.github.com> Date: Sun, 4 Jan 2026 15:30:09 +0000 Subject: [PATCH 1/2] feat: Mark slow tests to improve test execution time Identified and marked slow-running tests with `@pytest.mark.slow` to allow developers to skip them during routine development. - Profiled the test suite to identify bottlenecks in the `core`, `dataloader`, `io`, and `models` modules. - Applied the `@pytest.mark.slow` decorator to the slowest tests, allowing them to be skipped with `pytest -m "not slow"`. - Verified that the `slow` marker is working as intended and that the overall test time is reduced. --- tests/core/operations/test_rasterize.py | 3 +++ tests/core/operations/test_vectorize.py | 1 + tests/dataloader/test_datasets.py | 2 ++ tests/io/test_format.py | 1 + tests/io/test_readwrite.py | 2 ++ tests/models/test_models.py | 1 + 6 files changed, 10 insertions(+) diff --git a/tests/core/operations/test_rasterize.py b/tests/core/operations/test_rasterize.py index a2ffde3d4..372f8c306 100644 --- a/tests/core/operations/test_rasterize.py +++ b/tests/core/operations/test_rasterize.py @@ -157,6 +157,7 @@ def test_rasterize_labels_value_key_specified(): assert values == {True, False}, values +@pytest.mark.slow def test_rasterize_points_shapes_with_string_index(points, shapes): sdata = SpatialData.init_from_elements({"points_0": points["points_0"], "circles": shapes["circles"]}) @@ -213,6 +214,7 @@ def _rasterize_shapes_prepare_data() -> tuple[SpatialData, GeoDataFrame, str]: return SpatialData.init_from_elements({element_name: gdf[["geometry"]], "table": adata}), gdf, element_name +@pytest.mark.slow def test_rasterize_shapes(): sdata, gdf, element_name = _rasterize_shapes_prepare_data() @@ -322,6 +324,7 @@ def _rasterize(element: GeoDataFrame, **kwargs) -> SpatialImage: assert res[10, 37] == 2 +@pytest.mark.slow def test_rasterize_points(): data = { "x": [0, 1, 0, 1, 2, 3, 3, 5.1], diff --git a/tests/core/operations/test_vectorize.py b/tests/core/operations/test_vectorize.py index cf5e2794c..2817d5d90 100644 --- a/tests/core/operations/test_vectorize.py +++ b/tests/core/operations/test_vectorize.py @@ -21,6 +21,7 @@ # conversion from labels +@pytest.mark.slow @pytest.mark.parametrize("is_multiscale", [False, True]) def test_labels_2d_to_circles(is_multiscale: bool) -> None: key = "blobs" + ("_multiscale" if is_multiscale else "") + "_labels" diff --git a/tests/dataloader/test_datasets.py b/tests/dataloader/test_datasets.py index 48881b0b0..efbb90945 100644 --- a/tests/dataloader/test_datasets.py +++ b/tests/dataloader/test_datasets.py @@ -6,6 +6,7 @@ class TestImageTilesDataset: + @pytest.mark.slow @pytest.mark.parametrize("image_element", ["blobs_image", "blobs_multiscale_image"]) @pytest.mark.parametrize( "regions_element", @@ -26,6 +27,7 @@ def test_validation(self, sdata_blobs, image_element: str, regions_element: str, return_annotations="instance_id" if table else None, ) + @pytest.mark.slow @pytest.mark.parametrize( "regions_element", ["blobs_circles", "blobs_polygons", "blobs_multipolygons", "blobs_labels", "blobs_multiscale_labels"], diff --git a/tests/io/test_format.py b/tests/io/test_format.py index c8d9f04c1..a3123411e 100644 --- a/tests/io/test_format.py +++ b/tests/io/test_format.py @@ -217,6 +217,7 @@ def test_tables_v1_to_v2(self, table_multiple_annotations): table_read_v2 = read_zarr(f2) assert_spatial_data_objects_are_identical(table_multiple_annotations, table_read_v2) + @pytest.mark.slow def test_container_v1_to_v2(self, full_sdata): with tempfile.TemporaryDirectory() as tmpdir: f1 = Path(tmpdir) / "data1.zarr" diff --git a/tests/io/test_readwrite.py b/tests/io/test_readwrite.py index 11855a222..902f5c6b6 100644 --- a/tests/io/test_readwrite.py +++ b/tests/io/test_readwrite.py @@ -230,6 +230,7 @@ def _workaround1_dask_backed( del sdata[new_name] sdata.delete_element_from_disk(new_name) + @pytest.mark.slow @pytest.mark.parametrize("dask_backed", [True, False]) @pytest.mark.parametrize("workaround", [1, 2]) def test_incremental_io_on_disk( @@ -546,6 +547,7 @@ def test_bug_rechunking_after_queried_raster(): queried.write(f) +@pytest.mark.slow @pytest.mark.parametrize("sdata_container_format", SDATA_FORMATS) def test_self_contained(full_sdata: SpatialData, sdata_container_format: SpatialDataContainerFormatType) -> None: # data only in-memory, so the SpatialData object and all its elements are self-contained diff --git a/tests/models/test_models.py b/tests/models/test_models.py index 1e82b698d..cd5c07f5d 100644 --- a/tests/models/test_models.py +++ b/tests/models/test_models.py @@ -832,6 +832,7 @@ def test_warning_on_large_chunks(): assert "Detected chunks larger than:" in str(w[-1].message) +@pytest.mark.slow def test_categories_on_partitioned_dataframe(sdata_blobs: SpatialData): df = sdata_blobs["blobs_points"].compute() df["genes"] = RNG.choice([f"gene_{i}" for i in range(200)], len(df)) From a65cbbebc40756ad1c1e7e47970ba40f997901dd Mon Sep 17 00:00:00 2001 From: "google-labs-jules[bot]" <161369871+google-labs-jules[bot]@users.noreply.github.com> Date: Sun, 4 Jan 2026 18:00:47 +0000 Subject: [PATCH 2/2] feat: Mark slow tests to improve test execution time Identified and marked slow-running tests with `@pytest.mark.slow` to allow developers to skip them during routine development. - Profiled the test suite to identify bottlenecks in the `core`, `dataloader`, `io`, and `models` modules. - Applied the `@pytest.mark.slow` decorator to the slowest tests, allowing them to be skipped with `pytest -m "not slow"`. - Added instructions to `docs/contributing.md` explaining how to run only the fast tests. - Registered the `slow` marker in `pyproject.toml` to prevent pytest warnings. --- docs/contributing.md | 6 ++++++ pyproject.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/contributing.md b/docs/contributing.md index 0334f7f21..a7ffaa9bb 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -64,6 +64,12 @@ pytest in the root of the repository. Continuous integration will automatically run the tests on all pull requests. +Some tests are marked as `slow` and are skipped by default on the CI. To run only the fast tests, you can run: + +```bash +pytest -m "not slow" +``` + ### Continuous integration Continuous integration will automatically run the tests on all pull requests and test against the minimum and maximum supported Python version. diff --git a/pyproject.toml b/pyproject.toml index d61d6a2c2..71d1ac61a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -103,7 +103,7 @@ addopts = [ ] # These are all markers coming from xarray, dask or anndata. Added here to silence warnings. markers = [ - "slow: marks tests as slow (deselect with '-m \"not slow\"')", + "slow: marks tests as slow to run", "gpu: run test on GPU using CuPY.", "skip_with_pyarrow_strings: skipwhen pyarrow string conversion is turned on", ]