diff --git a/monai/apps/auto3dseg/bundle_gen.py b/monai/apps/auto3dseg/bundle_gen.py index d053ea37ef..ac2acc0bfa 100644 --- a/monai/apps/auto3dseg/bundle_gen.py +++ b/monai/apps/auto3dseg/bundle_gen.py @@ -264,8 +264,7 @@ def _run_cmd(self, cmd: str, devices_info: str = "") -> subprocess.CompletedProc look_up_option(self.device_setting["MN_START_METHOD"], ["bcprun"]) except ValueError as err: raise NotImplementedError( - f"{self.device_setting['MN_START_METHOD']} is not supported yet." - "Try modify BundleAlgo._run_cmd for your cluster." + f"{self.device_setting['MN_START_METHOD']} is not supported yet.Try modify BundleAlgo._run_cmd for your cluster." ) from err return _run_cmd_bcprun(cmd, n=self.device_setting["NUM_NODES"], p=self.device_setting["n_devices"]) diff --git a/monai/apps/detection/networks/retinanet_detector.py b/monai/apps/detection/networks/retinanet_detector.py index 17e70d1371..99c809096d 100644 --- a/monai/apps/detection/networks/retinanet_detector.py +++ b/monai/apps/detection/networks/retinanet_detector.py @@ -342,8 +342,7 @@ def set_regular_matcher( """ if fg_iou_thresh < bg_iou_thresh: raise ValueError( - "Require fg_iou_thresh >= bg_iou_thresh. " - f"Got fg_iou_thresh={fg_iou_thresh}, bg_iou_thresh={bg_iou_thresh}." + f"Require fg_iou_thresh >= bg_iou_thresh. Got fg_iou_thresh={fg_iou_thresh}, bg_iou_thresh={bg_iou_thresh}." ) self.proposal_matcher = Matcher( fg_iou_thresh, bg_iou_thresh, allow_low_quality_matches=allow_low_quality_matches @@ -519,7 +518,7 @@ def forward( else: if self.inferer is None: raise ValueError( - "`self.inferer` is not defined." "Please refer to function self.set_sliding_window_inferer(*)." + "`self.inferer` is not defined.Please refer to function self.set_sliding_window_inferer(*)." ) head_outputs = predict_with_inferer( images, self.network, keys=[self.cls_key, self.box_reg_key], inferer=self.inferer diff --git a/monai/auto3dseg/analyzer.py b/monai/auto3dseg/analyzer.py index 8d662df83d..8fde120d8b 100644 --- a/monai/auto3dseg/analyzer.py +++ b/monai/auto3dseg/analyzer.py @@ -952,8 +952,7 @@ def __call__(self, data: dict) -> dict: self.hist_range = nr_channels * self.hist_range if len(self.hist_range) != nr_channels: raise ValueError( - f"There is a mismatch between the number of channels ({nr_channels}) " - f"and histogram ranges ({len(self.hist_range)})." + f"There is a mismatch between the number of channels ({nr_channels}) and histogram ranges ({len(self.hist_range)})." ) # perform calculation diff --git a/monai/data/wsi_reader.py b/monai/data/wsi_reader.py index 62081d61d1..a9f7fab1c2 100644 --- a/monai/data/wsi_reader.py +++ b/monai/data/wsi_reader.py @@ -416,8 +416,7 @@ def get_data( # Check if there are three color channels for RGB elif mode in "RGB" and patch.shape[self.channel_dim] != 3: raise ValueError( - f"The image is expected to have three color channels in '{mode}' mode but has " - f"{patch.shape[self.channel_dim]}. " + f"The image is expected to have three color channels in '{mode}' mode but has {patch.shape[self.channel_dim]}. " ) # Get patch-related metadata metadata: dict = self._get_metadata(wsi=each_wsi, patch=patch, location=location, size=size, level=level) diff --git a/monai/transforms/regularization/array.py b/monai/transforms/regularization/array.py index 6b979e564a..1eb533dd6b 100644 --- a/monai/transforms/regularization/array.py +++ b/monai/transforms/regularization/array.py @@ -24,7 +24,6 @@ class Mixer(RandomizableTransform): - def __init__(self, batch_size: int, alpha: float = 1.0) -> None: """ Mixer is a base class providing the basic logic for the mixup-class of diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index b6bf211cc4..451a0d097a 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -34,6 +34,7 @@ from monai.transforms.croppad.array import CenterSpatialCrop, ResizeWithPadOrCrop from monai.transforms.inverse import InvertibleTransform from monai.transforms.spatial.functional import ( + _compiled_unsupported, affine_func, convert_box_to_points, convert_points_to_box, @@ -2104,14 +2105,15 @@ def __call__( _align_corners = self.align_corners if align_corners is None else align_corners img_t, *_ = convert_data_type(img, torch.Tensor, dtype=_dtype, device=_device) sr = min(len(img_t.peek_pending_shape() if isinstance(img_t, MetaTensor) else img_t.shape[1:]), 3) + _use_compiled = USE_COMPILED and not _compiled_unsupported(img_t.device) backend, _interp_mode, _padding_mode, _ = resolves_modes( self.mode if mode is None else mode, self.padding_mode if padding_mode is None else padding_mode, backend=None, - use_compiled=USE_COMPILED, + use_compiled=_use_compiled, ) - if USE_COMPILED or backend == TransformBackends.NUMPY: + if _use_compiled or backend == TransformBackends.NUMPY: grid_t, *_ = convert_to_dst_type(grid[:sr], img_t, dtype=grid.dtype, wrap_sequence=True) if isinstance(grid, torch.Tensor) and grid_t.data_ptr() == grid.data_ptr(): grid_t = grid_t.clone(memory_format=torch.contiguous_format) @@ -2122,7 +2124,7 @@ def __call__( grid_t[i] = ((_dim - 1) / _dim) * grid_t[i] + t if _align_corners else grid_t[i] + t elif _align_corners: grid_t[i] = ((_dim - 1) / _dim) * (grid_t[i] + 0.5) - if USE_COMPILED and backend == TransformBackends.TORCH: # compiled is using torch backend param name + if _use_compiled and backend == TransformBackends.TORCH: # compiled is using torch backend param name grid_t = moveaxis(grid_t, 0, -1) # type: ignore out = grid_pull( img_t.unsqueeze(0), diff --git a/monai/transforms/spatial/functional.py b/monai/transforms/spatial/functional.py index 3001dd1e64..b79815b8dc 100644 --- a/monai/transforms/spatial/functional.py +++ b/monai/transforms/spatial/functional.py @@ -54,6 +54,27 @@ __all__ = ["spatial_resample", "orientation", "flip", "resize", "rotate", "zoom", "rotate90", "affine_func"] +def _compiled_unsupported(device: torch.device) -> bool: + """ + Return True if ``monai._C`` (the compiled C extension providing ``grid_pull``) is not + compiled with support for the given CUDA device's compute capability. + + ``monai._C`` is built at install time against a fixed set of CUDA architectures. + NVIDIA Blackwell GPUs (sm_120, compute capability 12.x) and newer were not included in + the default ``TORCH_CUDA_ARCH_LIST`` when the MONAI slim image was originally built, + so executing ``grid_pull`` on those devices produces incorrect results. Falling back to + the PyTorch-native ``affine_grid`` + ``grid_sample`` path (``USE_COMPILED=False``) gives + correct output on all architectures. + + The threshold (``major >= 12``) matches the first architecture family (Blackwell, sm_120) + that shipped after the highest sm supported in the current default build list (sm_90, + Hopper). Adjust this constant when ``monai._C`` is rebuilt with sm_120+ support. + """ + if device.type != "cuda": + return False + return torch.cuda.get_device_properties(device).major >= 12 + + def _maybe_new_metatensor(img, dtype=None, device=None): """create a metatensor with fresh metadata if track_meta is True otherwise convert img into a torch tensor""" return convert_to_tensor( @@ -158,7 +179,8 @@ def spatial_resample( xform_shape = [-1] + in_sp_size img = img.reshape(xform_shape) img = img.to(dtype_pt) - if isinstance(mode, int) or USE_COMPILED: + _use_compiled = USE_COMPILED and not _compiled_unsupported(img.device) + if isinstance(mode, int) or _use_compiled: dst_xform = create_translate(spatial_rank, [float(d - 1) / 2 for d in spatial_size]) xform = xform @ convert_to_dst_type(dst_xform, xform)[0] affine_xform = monai.transforms.Affine(