Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions docs/contributing.md
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,12 @@ pytest

in the root of the repository. Continuous integration will automatically run the tests on all pull requests.

Some tests are marked as `slow` and are skipped by default on the CI. To run only the fast tests, you can run:

```bash
pytest -m "not slow"
```

### Continuous integration

Continuous integration will automatically run the tests on all pull requests and test against the minimum and maximum supported Python version.
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ addopts = [
]
# These are all markers coming from xarray, dask or anndata. Added here to silence warnings.
markers = [
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
"slow: marks tests as slow to run",
"gpu: run test on GPU using CuPY.",
"skip_with_pyarrow_strings: skipwhen pyarrow string conversion is turned on",
]
Expand Down
3 changes: 3 additions & 0 deletions tests/core/operations/test_rasterize.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,7 @@ def test_rasterize_labels_value_key_specified():
assert values == {True, False}, values


@pytest.mark.slow
def test_rasterize_points_shapes_with_string_index(points, shapes):
sdata = SpatialData.init_from_elements({"points_0": points["points_0"], "circles": shapes["circles"]})

Expand Down Expand Up @@ -213,6 +214,7 @@ def _rasterize_shapes_prepare_data() -> tuple[SpatialData, GeoDataFrame, str]:
return SpatialData.init_from_elements({element_name: gdf[["geometry"]], "table": adata}), gdf, element_name


@pytest.mark.slow
def test_rasterize_shapes():
sdata, gdf, element_name = _rasterize_shapes_prepare_data()

Expand Down Expand Up @@ -322,6 +324,7 @@ def _rasterize(element: GeoDataFrame, **kwargs) -> SpatialImage:
assert res[10, 37] == 2


@pytest.mark.slow
def test_rasterize_points():
data = {
"x": [0, 1, 0, 1, 2, 3, 3, 5.1],
Expand Down
1 change: 1 addition & 0 deletions tests/core/operations/test_vectorize.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@


# conversion from labels
@pytest.mark.slow
@pytest.mark.parametrize("is_multiscale", [False, True])
def test_labels_2d_to_circles(is_multiscale: bool) -> None:
key = "blobs" + ("_multiscale" if is_multiscale else "") + "_labels"
Expand Down
2 changes: 2 additions & 0 deletions tests/dataloader/test_datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@


class TestImageTilesDataset:
@pytest.mark.slow
@pytest.mark.parametrize("image_element", ["blobs_image", "blobs_multiscale_image"])
@pytest.mark.parametrize(
"regions_element",
Expand All @@ -26,6 +27,7 @@ def test_validation(self, sdata_blobs, image_element: str, regions_element: str,
return_annotations="instance_id" if table else None,
)

@pytest.mark.slow
@pytest.mark.parametrize(
"regions_element",
["blobs_circles", "blobs_polygons", "blobs_multipolygons", "blobs_labels", "blobs_multiscale_labels"],
Expand Down
1 change: 1 addition & 0 deletions tests/io/test_format.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,7 @@ def test_tables_v1_to_v2(self, table_multiple_annotations):
table_read_v2 = read_zarr(f2)
assert_spatial_data_objects_are_identical(table_multiple_annotations, table_read_v2)

@pytest.mark.slow
def test_container_v1_to_v2(self, full_sdata):
with tempfile.TemporaryDirectory() as tmpdir:
f1 = Path(tmpdir) / "data1.zarr"
Expand Down
2 changes: 2 additions & 0 deletions tests/io/test_readwrite.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,7 @@ def _workaround1_dask_backed(
del sdata[new_name]
sdata.delete_element_from_disk(new_name)

@pytest.mark.slow
@pytest.mark.parametrize("dask_backed", [True, False])
@pytest.mark.parametrize("workaround", [1, 2])
def test_incremental_io_on_disk(
Expand Down Expand Up @@ -546,6 +547,7 @@ def test_bug_rechunking_after_queried_raster():
queried.write(f)


@pytest.mark.slow
@pytest.mark.parametrize("sdata_container_format", SDATA_FORMATS)
def test_self_contained(full_sdata: SpatialData, sdata_container_format: SpatialDataContainerFormatType) -> None:
# data only in-memory, so the SpatialData object and all its elements are self-contained
Expand Down
1 change: 1 addition & 0 deletions tests/models/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -832,6 +832,7 @@ def test_warning_on_large_chunks():
assert "Detected chunks larger than:" in str(w[-1].message)


@pytest.mark.slow
def test_categories_on_partitioned_dataframe(sdata_blobs: SpatialData):
df = sdata_blobs["blobs_points"].compute()
df["genes"] = RNG.choice([f"gene_{i}" for i in range(200)], len(df))
Expand Down
Loading