From ba56a6d029abed931e3d8bcac5f7c91ddd2ccaf2 Mon Sep 17 00:00:00 2001 From: "R. Garcia-Dias" Date: Wed, 5 Feb 2025 15:26:32 +0000 Subject: [PATCH 1/6] Move test_image_filter.py --- tests/{ => data}/test_image_rw.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/{ => data}/test_image_rw.py (100%) diff --git a/tests/test_image_rw.py b/tests/data/test_image_rw.py similarity index 100% rename from tests/test_image_rw.py rename to tests/data/test_image_rw.py From 09c2cd91ed96557fd568821fb23a6cc794f390a9 Mon Sep 17 00:00:00 2001 From: "R. Garcia-Dias" Date: Sun, 8 Mar 2026 12:10:57 +0000 Subject: [PATCH 2/6] fix(spatial): fall back to PyTorch path on Blackwell (sm_120) GPUs when USE_COMPILED=True monai._C (grid_pull) was not compiled with sm_120 (Blackwell) architecture support, causing spatial_resample to produce incorrect results on RTX 50-series GPUs when USE_COMPILED=True. Add _compiled_unsupported() to detect compute capability major >= 12 at runtime and transparently fall back to the PyTorch-native affine_grid + grid_sample path, which is verified correct on sm_120. Fixes test_flips_inverse_124 in tests.transforms.spatial.test_spatial_resampled on NVIDIA GeForce RTX 5090 (Blackwell, sm_120). --- monai/transforms/spatial/functional.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/monai/transforms/spatial/functional.py b/monai/transforms/spatial/functional.py index b693e7d023..25d70b7b65 100644 --- a/monai/transforms/spatial/functional.py +++ b/monai/transforms/spatial/functional.py @@ -54,6 +54,27 @@ __all__ = ["spatial_resample", "orientation", "flip", "resize", "rotate", "zoom", "rotate90", "affine_func"] +def _compiled_unsupported(device: torch.device) -> bool: + """ + Return True if ``monai._C`` (the compiled C extension providing ``grid_pull``) is not + compiled with support for the given CUDA device's compute capability. + + ``monai._C`` is built at install time against a fixed set of CUDA architectures. + NVIDIA Blackwell GPUs (sm_120, compute capability 12.x) and newer were not included in + the default ``TORCH_CUDA_ARCH_LIST`` when the MONAI slim image was originally built, + so executing ``grid_pull`` on those devices produces incorrect results. Falling back to + the PyTorch-native ``affine_grid`` + ``grid_sample`` path (``USE_COMPILED=False``) gives + correct output on all architectures. + + The threshold (``major >= 12``) matches the first architecture family (Blackwell, sm_120) + that shipped after the highest sm supported in the current default build list (sm_90, + Hopper). Adjust this constant when ``monai._C`` is rebuilt with sm_120+ support. + """ + if device.type != "cuda": + return False + return torch.cuda.get_device_properties(device).major >= 12 + + def _maybe_new_metatensor(img, dtype=None, device=None): """create a metatensor with fresh metadata if track_meta is True otherwise convert img into a torch tensor""" return convert_to_tensor( @@ -158,7 +179,8 @@ def spatial_resample( xform_shape = [-1] + in_sp_size img = img.reshape(xform_shape) img = img.to(dtype_pt) - if isinstance(mode, int) or USE_COMPILED: + _use_compiled = USE_COMPILED and not _compiled_unsupported(img.device) + if isinstance(mode, int) or _use_compiled: dst_xform = create_translate(spatial_rank, [float(d - 1) / 2 for d in spatial_size]) xform = xform @ convert_to_dst_type(dst_xform, xform)[0] affine_xform = monai.transforms.Affine( From 7cd06078ae4acb128d5f1c4ffc4453157c7900d6 Mon Sep 17 00:00:00 2001 From: "R. Garcia-Dias" Date: Sun, 8 Mar 2026 12:28:16 +0000 Subject: [PATCH 3/6] fix(spatial): extend Blackwell fallback to Resample class in array.py The same USE_COMPILED guard that was fixed in spatial_resample (functional.py) was also present in Resample.__call__ (array.py), used by Affine, RandAffine and related transforms. Apply the same _compiled_unsupported() check so that grid_pull is not called on sm_120 (Blackwell) devices when monai._C lacks sm_120 support, preventing garbage output in test_affine, test_affined, test_rand_affine and test_rand_affined on RTX 50-series GPUs. --- monai/transforms/spatial/array.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index e4ed196eff..8491c216c7 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -24,6 +24,7 @@ import torch from monai.config import USE_COMPILED, DtypeLike +from monai.transforms.spatial.functional import _compiled_unsupported from monai.config.type_definitions import NdarrayOrTensor from monai.data.box_utils import BoxMode, StandardMode from monai.data.meta_obj import get_track_meta, set_track_meta @@ -2062,14 +2063,15 @@ def __call__( _align_corners = self.align_corners if align_corners is None else align_corners img_t, *_ = convert_data_type(img, torch.Tensor, dtype=_dtype, device=_device) sr = min(len(img_t.peek_pending_shape() if isinstance(img_t, MetaTensor) else img_t.shape[1:]), 3) + _use_compiled = USE_COMPILED and not _compiled_unsupported(img_t.device) backend, _interp_mode, _padding_mode, _ = resolves_modes( self.mode if mode is None else mode, self.padding_mode if padding_mode is None else padding_mode, backend=None, - use_compiled=USE_COMPILED, + use_compiled=_use_compiled, ) - if USE_COMPILED or backend == TransformBackends.NUMPY: + if _use_compiled or backend == TransformBackends.NUMPY: grid_t, *_ = convert_to_dst_type(grid[:sr], img_t, dtype=grid.dtype, wrap_sequence=True) if isinstance(grid, torch.Tensor) and grid_t.data_ptr() == grid.data_ptr(): grid_t = grid_t.clone(memory_format=torch.contiguous_format) @@ -2080,7 +2082,7 @@ def __call__( grid_t[i] = ((_dim - 1) / _dim) * grid_t[i] + t if _align_corners else grid_t[i] + t elif _align_corners: grid_t[i] = ((_dim - 1) / _dim) * (grid_t[i] + 0.5) - if USE_COMPILED and backend == TransformBackends.TORCH: # compiled is using torch backend param name + if _use_compiled and backend == TransformBackends.TORCH: # compiled is using torch backend param name grid_t = moveaxis(grid_t, 0, -1) # type: ignore out = grid_pull( img_t.unsqueeze(0), From 3fd7546769064b4c5b4e4e557e9387fb58fedc4b Mon Sep 17 00:00:00 2001 From: "R. Garcia-Dias" Date: Mon, 9 Mar 2026 09:57:23 +0000 Subject: [PATCH 4/6] lint Signed-off-by: R. Garcia-Dias --- monai/transforms/spatial/array.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 8491c216c7..540dfc9dba 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -24,7 +24,6 @@ import torch from monai.config import USE_COMPILED, DtypeLike -from monai.transforms.spatial.functional import _compiled_unsupported from monai.config.type_definitions import NdarrayOrTensor from monai.data.box_utils import BoxMode, StandardMode from monai.data.meta_obj import get_track_meta, set_track_meta @@ -35,6 +34,7 @@ from monai.transforms.croppad.array import CenterSpatialCrop, ResizeWithPadOrCrop from monai.transforms.inverse import InvertibleTransform from monai.transforms.spatial.functional import ( + _compiled_unsupported, affine_func, convert_box_to_points, convert_points_to_box, From 4f6df0705387d794aea39fe5efc66e7a746f0ee4 Mon Sep 17 00:00:00 2001 From: "R. Garcia-Dias" Date: Mon, 9 Mar 2026 10:07:34 +0000 Subject: [PATCH 5/6] lint Signed-off-by: R. Garcia-Dias --- monai/apps/auto3dseg/bundle_gen.py | 24 +++--- .../detection/networks/retinanet_detector.py | 80 +++++++------------ monai/apps/detection/utils/anchor_utils.py | 16 ++-- monai/apps/detection/utils/detector_utils.py | 10 +-- monai/auto3dseg/analyzer.py | 57 ++++--------- monai/data/wsi_reader.py | 37 +++------ monai/losses/unified_focal_loss.py | 2 +- monai/metrics/meandice.py | 6 +- monai/networks/blocks/patchembedding.py | 2 +- monai/networks/layers/factories.py | 6 +- monai/transforms/croppad/array.py | 26 ++---- monai/transforms/regularization/array.py | 3 +- 12 files changed, 85 insertions(+), 184 deletions(-) diff --git a/monai/apps/auto3dseg/bundle_gen.py b/monai/apps/auto3dseg/bundle_gen.py index 8a54d18be7..d575ba9937 100644 --- a/monai/apps/auto3dseg/bundle_gen.py +++ b/monai/apps/auto3dseg/bundle_gen.py @@ -264,21 +264,16 @@ def _run_cmd(self, cmd: str, devices_info: str = "") -> subprocess.CompletedProc look_up_option(self.device_setting["MN_START_METHOD"], ["bcprun"]) except ValueError as err: raise NotImplementedError( - f"{self.device_setting['MN_START_METHOD']} is not supported yet." - "Try modify BundleAlgo._run_cmd for your cluster." + f"{self.device_setting['MN_START_METHOD']} is not supported yet.Try modify BundleAlgo._run_cmd for your cluster." ) from err return _run_cmd_bcprun(cmd, n=self.device_setting["NUM_NODES"], p=self.device_setting["n_devices"]) elif int(self.device_setting["n_devices"]) > 1: - return _run_cmd_torchrun( - cmd, nnodes=1, nproc_per_node=self.device_setting["n_devices"], env=ps_environ, check=True - ) + return _run_cmd_torchrun(cmd, nnodes=1, nproc_per_node=self.device_setting["n_devices"], env=ps_environ, check=True) else: return run_cmd(cmd.split(), run_cmd_verbose=True, env=ps_environ, check=True) - def train( - self, train_params: None | dict = None, device_setting: None | dict = None - ) -> subprocess.CompletedProcess: + def train(self, train_params: None | dict = None, device_setting: None | dict = None) -> subprocess.CompletedProcess: """ Load the run function in the training script of each model. Training parameter is predefined by the algo_config.yaml file, which is pre-filled by the fill_template_config function in the same instance. @@ -369,9 +364,7 @@ def get_output_path(self): # path to download the algo_templates -default_algo_zip = ( - f"https://github.com/Project-MONAI/research-contributions/releases/download/algo_templates/{ALGO_HASH}.tar.gz" -) +default_algo_zip = f"https://github.com/Project-MONAI/research-contributions/releases/download/algo_templates/{ALGO_HASH}.tar.gz" # default algorithms default_algos = { @@ -396,7 +389,7 @@ def _download_algos_url(url: str, at_path: str) -> dict[str, dict[str, str]]: try: download_and_extract(url=url, filepath=algo_compressed_file, output_dir=os.path.dirname(at_path)) except Exception as e: - msg = f"Download and extract of {url} failed, attempt {i+1}/{download_attempts}." + msg = f"Download and extract of {url} failed, attempt {i + 1}/{download_attempts}." if i < download_attempts - 1: warnings.warn(msg) time.sleep(i) @@ -660,6 +653,7 @@ def generate( gen_algo.export_to_disk(output_folder, name, fold=f_id) algo_to_pickle(gen_algo, template_path=algo.template_path) - self.history.append( - {AlgoKeys.ID: name, AlgoKeys.ALGO: gen_algo} - ) # track the previous, may create a persistent history + self.history.append({ + AlgoKeys.ID: name, + AlgoKeys.ALGO: gen_algo, + }) # track the previous, may create a persistent history diff --git a/monai/apps/detection/networks/retinanet_detector.py b/monai/apps/detection/networks/retinanet_detector.py index a0573d6cd1..265db1519a 100644 --- a/monai/apps/detection/networks/retinanet_detector.py +++ b/monai/apps/detection/networks/retinanet_detector.py @@ -59,9 +59,7 @@ from monai.networks.nets import resnet from monai.utils import BlendMode, PytorchPadMode, ensure_tuple_rep, optional_import -BalancedPositiveNegativeSampler, _ = optional_import( - "torchvision.models.detection._utils", name="BalancedPositiveNegativeSampler" -) +BalancedPositiveNegativeSampler, _ = optional_import("torchvision.models.detection._utils", name="BalancedPositiveNegativeSampler") Matcher, _ = optional_import("torchvision.models.detection._utils", name="Matcher") @@ -328,9 +326,7 @@ def set_box_regression_loss(self, box_loss: nn.Module, encode_gt: bool, decode_p self.encode_gt = encode_gt self.decode_pred = decode_pred - def set_regular_matcher( - self, fg_iou_thresh: float, bg_iou_thresh: float, allow_low_quality_matches: bool = True - ) -> None: + def set_regular_matcher(self, fg_iou_thresh: float, bg_iou_thresh: float, allow_low_quality_matches: bool = True) -> None: """ Using for training. Set torchvision matcher that matches anchors with ground truth boxes. @@ -342,12 +338,9 @@ def set_regular_matcher( """ if fg_iou_thresh < bg_iou_thresh: raise ValueError( - "Require fg_iou_thresh >= bg_iou_thresh. " - f"Got fg_iou_thresh={fg_iou_thresh}, bg_iou_thresh={bg_iou_thresh}." + f"Require fg_iou_thresh >= bg_iou_thresh. Got fg_iou_thresh={fg_iou_thresh}, bg_iou_thresh={bg_iou_thresh}." ) - self.proposal_matcher = Matcher( - fg_iou_thresh, bg_iou_thresh, allow_low_quality_matches=allow_low_quality_matches - ) + self.proposal_matcher = Matcher(fg_iou_thresh, bg_iou_thresh, allow_low_quality_matches=allow_low_quality_matches) def set_atss_matcher(self, num_candidates: int = 4, center_in_gt: bool = False) -> None: """ @@ -496,9 +489,7 @@ def forward( """ # 1. Check if input arguments are valid if self.training: - targets = check_training_targets( - input_images, targets, self.spatial_dims, self.target_label_key, self.target_box_key - ) + targets = check_training_targets(input_images, targets, self.spatial_dims, self.target_label_key, self.target_box_key) self._check_detector_training_components() # 2. Pad list of images to a single Tensor `images` with spatial size divisible by self.size_divisible. @@ -518,12 +509,8 @@ def forward( ensure_dict_value_to_list_(head_outputs) else: if self.inferer is None: - raise ValueError( - "`self.inferer` is not defined." "Please refer to function self.set_sliding_window_inferer(*)." - ) - head_outputs = predict_with_inferer( - images, self.network, keys=[self.cls_key, self.box_reg_key], inferer=self.inferer - ) + raise ValueError("`self.inferer` is not defined.Please refer to function self.set_sliding_window_inferer(*).") + head_outputs = predict_with_inferer(images, self.network, keys=[self.cls_key, self.box_reg_key], inferer=self.inferer) # 4. Generate anchors and store it in self.anchors: List[Tensor] self.generate_anchors(images, head_outputs) @@ -545,7 +532,10 @@ def forward( # 6(2). If during inference, return detection results detections = self.postprocess_detections( - head_outputs, self.anchors, image_sizes, num_anchor_locs_per_level # type: ignore + head_outputs, + self.anchors, + image_sizes, + num_anchor_locs_per_level, # type: ignore ) return detections @@ -554,9 +544,7 @@ def _check_detector_training_components(self): Check if self.proposal_matcher and self.fg_bg_sampler have been set for training. """ if not hasattr(self, "proposal_matcher"): - raise AttributeError( - "Matcher is not set. Please refer to self.set_regular_matcher(*) or self.set_atss_matcher(*)." - ) + raise AttributeError("Matcher is not set. Please refer to self.set_regular_matcher(*) or self.set_atss_matcher(*).") if self.fg_bg_sampler is None and self.debug: warnings.warn( "No balanced sampler is used. Negative samples are likely to " @@ -653,9 +641,7 @@ def postprocess_detections( """ # recover level sizes, HWA or HWDA for each level - num_anchors_per_level = [ - num_anchor_locs * self.num_anchors_per_loc for num_anchor_locs in num_anchor_locs_per_level - ] + num_anchors_per_level = [num_anchor_locs * self.num_anchors_per_loc for num_anchor_locs in num_anchor_locs_per_level] # split outputs per level split_head_outputs: dict[str, list[Tensor]] = {} @@ -672,9 +658,7 @@ def postprocess_detections( detections: list[dict[str, Tensor]] = [] for index in range(num_images): - box_regression_per_image = [ - br[index] for br in box_regression - ] # List[Tensor], each sized (HWA, 2*spatial_dims) + box_regression_per_image = [br[index] for br in box_regression] # List[Tensor], each sized (HWA, 2*spatial_dims) logits_per_image = [cl[index] for cl in class_logits] # List[Tensor], each sized (HWA, self.num_classes) anchors_per_image, img_spatial_size = split_anchors[index], image_sizes[index] # decode box regression into boxes @@ -687,13 +671,11 @@ def postprocess_detections( boxes_per_image, logits_per_image, img_spatial_size ) - detections.append( - { - self.target_box_key: selected_boxes, # Tensor, sized (N, 2*spatial_dims) - self.pred_score_key: selected_scores, # Tensor, sized (N, ) - self.target_label_key: selected_labels, # Tensor, sized (N, ) - } - ) + detections.append({ + self.target_box_key: selected_boxes, # Tensor, sized (N, 2*spatial_dims) + self.pred_score_key: selected_scores, # Tensor, sized (N, ) + self.target_label_key: selected_labels, # Tensor, sized (N, ) + }) return detections @@ -722,9 +704,7 @@ def compute_loss( """ matched_idxs = self.compute_anchor_matched_idxs(anchors, targets, num_anchor_locs_per_level) losses_cls = self.compute_cls_loss(head_outputs_reshape[self.cls_key], targets, matched_idxs) - losses_box_regression = self.compute_box_loss( - head_outputs_reshape[self.box_reg_key], targets, anchors, matched_idxs - ) + losses_box_regression = self.compute_box_loss(head_outputs_reshape[self.box_reg_key], targets, anchors, matched_idxs) return {self.cls_key: losses_cls, self.box_reg_key: losses_box_regression} def compute_anchor_matched_idxs( @@ -757,9 +737,7 @@ def compute_anchor_matched_idxs( # anchors_per_image: Tensor, targets_per_image: Dice[str, Tensor] if targets_per_image[self.target_box_key].numel() == 0: # if no GT boxes - matched_idxs.append( - torch.full((anchors_per_image.size(0),), -1, dtype=torch.int64, device=anchors_per_image.device) - ) + matched_idxs.append(torch.full((anchors_per_image.size(0),), -1, dtype=torch.int64, device=anchors_per_image.device)) continue # matched_idxs_per_image (Tensor[int64]): Tensor sized (sum(HWA),) or (sum(HWDA),) @@ -787,7 +765,7 @@ def compute_anchor_matched_idxs( ) if self.debug: - print(f"Max box overlap between anchors and gt boxes: {torch.max(match_quality_matrix,dim=1)[0]}.") + print(f"Max box overlap between anchors and gt boxes: {torch.max(match_quality_matrix, dim=1)[0]}.") if torch.max(matched_idxs_per_image) < 0: warnings.warn( @@ -799,9 +777,7 @@ def compute_anchor_matched_idxs( matched_idxs.append(matched_idxs_per_image) return matched_idxs - def compute_cls_loss( - self, cls_logits: Tensor, targets: list[dict[str, Tensor]], matched_idxs: list[Tensor] - ) -> Tensor: + def compute_cls_loss(self, cls_logits: Tensor, targets: list[dict[str, Tensor]], matched_idxs: list[Tensor]) -> Tensor: """ Compute classification losses. @@ -919,9 +895,7 @@ def get_cls_train_sample_per_image( gt_classes_target = torch.zeros_like(cls_logits_per_image) # (sum(HW(D)A), self.num_classes) gt_classes_target[ foreground_idxs_per_image, # fg anchor idx in - targets_per_image[self.target_label_key][ - matched_idxs_per_image[foreground_idxs_per_image] - ], # fg class label + targets_per_image[self.target_label_key][matched_idxs_per_image[foreground_idxs_per_image]], # fg class label ] = 1.0 if self.fg_bg_sampler is None: @@ -993,9 +967,9 @@ def get_box_train_sample_per_image( # select only the foreground boxes # matched GT boxes for foreground anchors - matched_gt_boxes_per_image = targets_per_image[self.target_box_key][ - matched_idxs_per_image[foreground_idxs_per_image] - ].to(box_regression_per_image.device) + matched_gt_boxes_per_image = targets_per_image[self.target_box_key][matched_idxs_per_image[foreground_idxs_per_image]].to( + box_regression_per_image.device + ) # predicted box regression for foreground anchors box_regression_per_image = box_regression_per_image[foreground_idxs_per_image, :] # foreground anchors diff --git a/monai/apps/detection/utils/anchor_utils.py b/monai/apps/detection/utils/anchor_utils.py index cbde3ebae9..f846be1850 100644 --- a/monai/apps/detection/utils/anchor_utils.py +++ b/monai/apps/detection/utils/anchor_utils.py @@ -136,9 +136,7 @@ def __init__( self.indexing = look_up_option(indexing, ["ij", "xy"]) self.aspect_ratios = aspect_ratios - self.cell_anchors = [ - self.generate_anchors(size, aspect_ratio) for size, aspect_ratio in zip(self.sizes, aspect_ratios) - ] + self.cell_anchors = [self.generate_anchors(size, aspect_ratio) for size, aspect_ratio in zip(self.sizes, aspect_ratios)] # This comment comes from torchvision. # TODO: https://github.com/pytorch/pytorch/issues/26792 @@ -174,13 +172,13 @@ def generate_anchors( if (self.spatial_dims >= 3) and (len(aspect_ratios_t.shape) != 2): raise ValueError( f"In {self.spatial_dims}-D image, aspect_ratios for each level should be \ - {len(aspect_ratios_t.shape)-1}-D. But got aspect_ratios with shape {aspect_ratios_t.shape}." + {len(aspect_ratios_t.shape) - 1}-D. But got aspect_ratios with shape {aspect_ratios_t.shape}." ) if (self.spatial_dims >= 3) and (aspect_ratios_t.shape[1] != self.spatial_dims - 1): raise ValueError( f"In {self.spatial_dims}-D image, aspect_ratios for each level should has \ - shape (_,{self.spatial_dims-1}). But got aspect_ratios with shape {aspect_ratios_t.shape}." + shape (_,{self.spatial_dims - 1}). But got aspect_ratios with shape {aspect_ratios_t.shape}." ) # if 2d, w:h = 1:aspect_ratios @@ -253,8 +251,7 @@ def grid_anchors(self, grid_sizes: list[list[int]], strides: list[list[Tensor]]) # compute anchor centers regarding to the image. # shifts_centers is [x_center, y_center] or [x_center, y_center, z_center] shifts_centers = [ - torch.arange(0, size[axis], dtype=torch.int32, device=device) * stride[axis] - for axis in range(self.spatial_dims) + torch.arange(0, size[axis], dtype=torch.int32, device=device) * stride[axis] for axis in range(self.spatial_dims) ] # to support torchscript, cannot directly use torch.meshgrid(shifts_centers). @@ -307,10 +304,7 @@ def forward(self, images: Tensor, feature_maps: list[Tensor]) -> list[Tensor]: batchsize = images.shape[0] dtype, device = feature_maps[0].dtype, feature_maps[0].device strides = [ - [ - torch.tensor(image_size[axis] // g[axis], dtype=torch.int64, device=device) - for axis in range(self.spatial_dims) - ] + [torch.tensor(image_size[axis] // g[axis], dtype=torch.int64, device=device) for axis in range(self.spatial_dims)] for g in grid_sizes ] diff --git a/monai/apps/detection/utils/detector_utils.py b/monai/apps/detection/utils/detector_utils.py index a687476996..dc4103cd23 100644 --- a/monai/apps/detection/utils/detector_utils.py +++ b/monai/apps/detection/utils/detector_utils.py @@ -80,9 +80,7 @@ def check_training_targets( for i in range(len(targets)): target = targets[i] if (target_label_key not in target.keys()) or (target_box_key not in target.keys()): - raise ValueError( - f"{target_label_key} and {target_box_key} are expected keys in targets. Got {target.keys()}." - ) + raise ValueError(f"{target_label_key} and {target_box_key} are expected keys in targets. Got {target.keys()}.") boxes = target[target_box_key] if not isinstance(boxes, torch.Tensor): @@ -91,12 +89,10 @@ def check_training_targets( if boxes.numel() == 0: warnings.warn( f"Warning: Given target boxes has shape of {boxes.shape}. " - f"The detector reshaped it with boxes = torch.reshape(boxes, [0, {2* spatial_dims}])." + f"The detector reshaped it with boxes = torch.reshape(boxes, [0, {2 * spatial_dims}])." ) else: - raise ValueError( - f"Expected target boxes to be a tensor of shape [N, {2* spatial_dims}], got {boxes.shape}.)." - ) + raise ValueError(f"Expected target boxes to be a tensor of shape [N, {2 * spatial_dims}], got {boxes.shape}.).") if not torch.is_floating_point(boxes): raise ValueError(f"Expected target boxes to be a float tensor, got {boxes.dtype}.") targets[i][target_box_key] = standardize_empty_box(boxes, spatial_dims=spatial_dims) # type: ignore diff --git a/monai/auto3dseg/analyzer.py b/monai/auto3dseg/analyzer.py index e60327b551..e1ae99a0e4 100644 --- a/monai/auto3dseg/analyzer.py +++ b/monai/auto3dseg/analyzer.py @@ -255,13 +255,9 @@ def __call__(self, data): else [1.0] * min(3, data[self.image_key].ndim) ) - report[ImageStatsKeys.SIZEMM] = [ - a * b for a, b in zip(report[ImageStatsKeys.SHAPE][0], report[ImageStatsKeys.SPACING]) - ] + report[ImageStatsKeys.SIZEMM] = [a * b for a, b in zip(report[ImageStatsKeys.SHAPE][0], report[ImageStatsKeys.SPACING])] - report[ImageStatsKeys.INTENSITY] = [ - self.ops[ImageStatsKeys.INTENSITY].evaluate(nda_c) for nda_c in nda_croppeds - ] + report[ImageStatsKeys.INTENSITY] = [self.ops[ImageStatsKeys.INTENSITY].evaluate(nda_c) for nda_c in nda_croppeds] if not verify_report_format(report, self.get_report_format()): raise RuntimeError(f"report generated by {self.__class__} differs from the report format.") @@ -269,7 +265,7 @@ def __call__(self, data): d[self.stats_name] = report torch.set_grad_enabled(restore_grad_state) - logger.debug(f"Get image stats spent {time.time()-start}") + logger.debug(f"Get image stats spent {time.time() - start}") return d @@ -340,9 +336,7 @@ def __call__(self, data: Mapping) -> dict: # perform calculation report = deepcopy(self.get_report_format()) - report[ImageStatsKeys.INTENSITY] = [ - self.ops[ImageStatsKeys.INTENSITY].evaluate(nda_f) for nda_f in nda_foregrounds - ] + report[ImageStatsKeys.INTENSITY] = [self.ops[ImageStatsKeys.INTENSITY].evaluate(nda_f) for nda_f in nda_foregrounds] if not verify_report_format(report, self.get_report_format()): raise RuntimeError(f"report generated by {self.__class__} differs from the report format.") @@ -350,7 +344,7 @@ def __call__(self, data: Mapping) -> dict: d[self.stats_name] = report torch.set_grad_enabled(restore_grad_state) - logger.debug(f"Get foreground image stats spent {time.time()-start}") + logger.debug(f"Get foreground image stats spent {time.time() - start}") return d @@ -378,9 +372,7 @@ class LabelStats(Analyzer): """ - def __init__( - self, image_key: str, label_key: str, stats_name: str = DataStatsKeys.LABEL_STATS, do_ccp: bool | None = True - ): + def __init__(self, image_key: str, label_key: str, stats_name: str = DataStatsKeys.LABEL_STATS, do_ccp: bool | None = True): self.image_key = image_key self.label_key = label_key self.do_ccp = do_ccp @@ -392,9 +384,7 @@ def __init__( } if self.do_ccp: - report_format[LabelStatsKeys.LABEL][0].update( - {LabelStatsKeys.LABEL_SHAPE: None, LabelStatsKeys.LABEL_NCOMP: None} - ) + report_format[LabelStatsKeys.LABEL][0].update({LabelStatsKeys.LABEL_SHAPE: None, LabelStatsKeys.LABEL_NCOMP: None}) super().__init__(stats_name, report_format) self.update_ops(LabelStatsKeys.IMAGE_INTST, SampleOperations()) @@ -483,9 +473,7 @@ def __call__(self, data: Mapping[Hashable, MetaTensor]) -> dict[Hashable, MetaTe mask_index = ndas_label == index nda_masks = [nda[mask_index] for nda in ndas] - label_dict[LabelStatsKeys.IMAGE_INTST] = [ - self.ops[LabelStatsKeys.IMAGE_INTST].evaluate(nda_m) for nda_m in nda_masks - ] + label_dict[LabelStatsKeys.IMAGE_INTST] = [self.ops[LabelStatsKeys.IMAGE_INTST].evaluate(nda_m) for nda_m in nda_masks] pixel_count = sum(mask_index) pixel_arr.append(pixel_count) @@ -508,9 +496,7 @@ def __call__(self, data: Mapping[Hashable, MetaTensor]) -> dict[Hashable, MetaTe report = deepcopy(self.get_report_format()) report[LabelStatsKeys.LABEL_UID] = unique_label - report[LabelStatsKeys.IMAGE_INTST] = [ - self.ops[LabelStatsKeys.IMAGE_INTST].evaluate(nda_f) for nda_f in nda_foregrounds - ] + report[LabelStatsKeys.IMAGE_INTST] = [self.ops[LabelStatsKeys.IMAGE_INTST].evaluate(nda_f) for nda_f in nda_foregrounds] report[LabelStatsKeys.LABEL] = label_substats if not verify_report_format(report, self.get_report_format()): @@ -519,7 +505,7 @@ def __call__(self, data: Mapping[Hashable, MetaTensor]) -> dict[Hashable, MetaTe d[self.stats_name] = report # type: ignore[assignment] torch.set_grad_enabled(restore_grad_state) - logger.debug(f"Get label stats spent {time.time()-start}") + logger.debug(f"Get label stats spent {time.time() - start}") return d # type: ignore[return-value] @@ -689,9 +675,7 @@ class LabelStatsSumm(Analyzer): """ - def __init__( - self, stats_name: str = DataStatsKeys.LABEL_STATS, average: bool | None = True, do_ccp: bool | None = True - ): + def __init__(self, stats_name: str = DataStatsKeys.LABEL_STATS, average: bool | None = True, do_ccp: bool | None = True): self.summary_average = average self.do_ccp = do_ccp @@ -701,9 +685,7 @@ def __init__( LabelStatsKeys.LABEL: [{LabelStatsKeys.PIXEL_PCT: None, LabelStatsKeys.IMAGE_INTST: None}], } if self.do_ccp: - report_format[LabelStatsKeys.LABEL][0].update( - {LabelStatsKeys.LABEL_SHAPE: None, LabelStatsKeys.LABEL_NCOMP: None} - ) + report_format[LabelStatsKeys.LABEL][0].update({LabelStatsKeys.LABEL_SHAPE: None, LabelStatsKeys.LABEL_NCOMP: None}) super().__init__(stats_name, report_format) self.update_ops(LabelStatsKeys.IMAGE_INTST, SummaryOperations()) @@ -794,9 +776,7 @@ def __call__(self, data: list[dict]) -> dict: intst_fixed_keys = [self.stats_name, label_str, label_id, intst_str] op_keys = report[label_str][0][intst_str].keys() intst_dict = concat_multikeys_to_dict(data, intst_fixed_keys, op_keys, allow_missing=True) - stats[intst_str] = self.ops[label_str][0][intst_str].evaluate( - intst_dict, dim=None if self.summary_average else 0 - ) + stats[intst_str] = self.ops[label_str][0][intst_str].evaluate(intst_dict, dim=None if self.summary_average else 0) detailed_label_list.append(stats) @@ -876,9 +856,7 @@ def __init__( self.image_key = image_key # set defaults - self.hist_bins: list[int] = ( - [100] if hist_bins is None else hist_bins if isinstance(hist_bins, list) else [hist_bins] - ) + self.hist_bins: list[int] = [100] if hist_bins is None else hist_bins if isinstance(hist_bins, list) else [hist_bins] self.hist_range: list = [-500, 500] if hist_range is None else hist_range report_format = {"counts": None, "bin_edges": None} @@ -897,9 +875,9 @@ def __init__( for i, hist_params in enumerate(zip(self.hist_bins, self.hist_range)): _hist_bins, _hist_range = hist_params if not isinstance(_hist_bins, int) or _hist_bins < 0: - raise ValueError(f"Expected {i+1}. hist_bins value to be positive integer but got {_hist_bins}") + raise ValueError(f"Expected {i + 1}. hist_bins value to be positive integer but got {_hist_bins}") if not isinstance(_hist_range, list) or len(_hist_range) != 2: - raise ValueError(f"Expected {i+1}. hist_range values to be list of length 2 but received {_hist_range}") + raise ValueError(f"Expected {i + 1}. hist_range values to be list of length 2 but received {_hist_range}") def __call__(self, data: dict) -> dict: """ @@ -934,8 +912,7 @@ def __call__(self, data: dict) -> dict: self.hist_range = nr_channels * self.hist_range if len(self.hist_range) != nr_channels: raise ValueError( - f"There is a mismatch between the number of channels ({nr_channels}) " - f"and histogram ranges ({len(self.hist_range)})." + f"There is a mismatch between the number of channels ({nr_channels}) and histogram ranges ({len(self.hist_range)})." ) # perform calculation diff --git a/monai/data/wsi_reader.py b/monai/data/wsi_reader.py index 2a4fe9f7a8..9ff9d83236 100644 --- a/monai/data/wsi_reader.py +++ b/monai/data/wsi_reader.py @@ -144,9 +144,7 @@ def get_size(self, wsi, level: int) -> tuple[int, int]: """ raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") - def _find_closest_level( - self, name: str, value: tuple, value_at_levels: Sequence[tuple], atol: float, rtol: float - ) -> int: + def _find_closest_level(self, name: str, value: tuple, value_at_levels: Sequence[tuple], atol: float, rtol: float) -> int: """Find the level corresponding to the value of the quantity in the list of values at each level. Args: name: the name of the requested quantity @@ -170,9 +168,7 @@ def _find_closest_level( ) return value_at_levels.index(closest_value) - def get_valid_level( - self, wsi, level: int | None, mpp: float | tuple[float, float] | None, power: int | None - ) -> int: + def get_valid_level(self, wsi, level: int | None, mpp: float | tuple[float, float] | None, power: int | None) -> int: """ Returns the level associated to the resolution parameters in the whole slide image. @@ -210,7 +206,7 @@ def get_valid_level( # Set the default value if no resolution parameter is provided. level = 0 if level >= n_levels: - raise ValueError(f"The maximum level of this image is {n_levels-1} while level={level} is requested)!") + raise ValueError(f"The maximum level of this image is {n_levels - 1} while level={level} is requested)!") return level @@ -285,9 +281,7 @@ def _get_patch( """ raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") - def _get_metadata( - self, wsi, patch: NdarrayOrTensor, location: tuple[int, int], size: tuple[int, int], level: int - ) -> dict: + def _get_metadata(self, wsi, patch: NdarrayOrTensor, location: tuple[int, int], size: tuple[int, int], level: int) -> dict: """ Returns metadata of the extracted patch from the whole slide image. @@ -301,9 +295,7 @@ def _get_metadata( """ if self.channel_dim >= len(patch.shape) or self.channel_dim < -len(patch.shape): - raise ValueError( - f"The desired channel_dim ({self.channel_dim}) is out of bound for image shape: {patch.shape}" - ) + raise ValueError(f"The desired channel_dim ({self.channel_dim}) is out of bound for image shape: {patch.shape}") channel_dim: int = self.channel_dim + (len(patch.shape) if self.channel_dim < 0 else 0) metadata: dict = { "backend": self.backend, @@ -385,13 +377,9 @@ def get_data( patch = self._get_patch(each_wsi, location=location, size=size, level=level, dtype=dtype_np, mode=mode) # Convert the patch to torch.Tensor if dtype is torch - if isinstance(self.dtype, torch.dtype) or ( - self.device is not None and torch.device(self.device).type == "cuda" - ): + if isinstance(self.dtype, torch.dtype) or (self.device is not None and torch.device(self.device).type == "cuda"): # Ensure dtype is torch.dtype if the device is not "cpu" - dtype_torch = ( - dtype_numpy_to_torch(self.dtype) if not isinstance(self.dtype, torch.dtype) else self.dtype - ) + dtype_torch = dtype_numpy_to_torch(self.dtype) if not isinstance(self.dtype, torch.dtype) else self.dtype # Copy the numpy array if it is not writable if patch.flags["WRITEABLE"]: patch = torch.as_tensor(patch, dtype=dtype_torch, device=self.device) @@ -414,8 +402,7 @@ def get_data( # Check if there are three color channels for RGB elif mode in "RGB" and patch.shape[self.channel_dim] != 3: raise ValueError( - f"The image is expected to have three color channels in '{mode}' mode but has " - f"{patch.shape[self.channel_dim]}. " + f"The image is expected to have three color channels in '{mode}' mode but has {patch.shape[self.channel_dim]}. " ) # Get patch-related metadata metadata: dict = self._get_metadata(wsi=each_wsi, patch=patch, location=location, size=size, level=level) @@ -538,9 +525,7 @@ def __init__( **kwargs, ) else: - raise ValueError( - f"The supported backends are cucim, openslide, and tifffile but '{self.backend}' was given." - ) + raise ValueError(f"The supported backends are cucim, openslide, and tifffile but '{self.backend}' was given.") self.supported_suffixes = self.reader.supported_suffixes self.level = self.reader.level self.mpp_rtol = self.reader.mpp_rtol @@ -807,9 +792,7 @@ def _get_patch( """ # Extract a patch or the entire image # (reverse the order of location and size to become WxH for cuCIM) - patch: np.ndarray = wsi.read_region( - location=location[::-1], size=size[::-1], level=level, num_workers=self.num_workers - ) + patch: np.ndarray = wsi.read_region(location=location[::-1], size=size[::-1], level=level, num_workers=self.num_workers) # Convert to numpy patch = np.asarray(patch, dtype=dtype) diff --git a/monai/losses/unified_focal_loss.py b/monai/losses/unified_focal_loss.py index 8484eb67ed..06704c0104 100644 --- a/monai/losses/unified_focal_loss.py +++ b/monai/losses/unified_focal_loss.py @@ -217,7 +217,7 @@ def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor: y_true = one_hot(y_true, num_classes=self.num_classes) if torch.max(y_true) != self.num_classes - 1: - raise ValueError(f"Please make sure the number of classes is {self.num_classes-1}") + raise ValueError(f"Please make sure the number of classes is {self.num_classes - 1}") n_pred_ch = y_pred.shape[1] if self.to_onehot_y: diff --git a/monai/metrics/meandice.py b/monai/metrics/meandice.py index f21040d58e..2f4d3790be 100644 --- a/monai/metrics/meandice.py +++ b/monai/metrics/meandice.py @@ -100,9 +100,7 @@ def _compute_tensor(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor # compute dice (BxC) for each channel for each batch return self.dice_helper(y_pred=y_pred, y=y) # type: ignore - def aggregate( - self, reduction: MetricReduction | str | None = None - ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: + def aggregate(self, reduction: MetricReduction | str | None = None) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: """ Execute reduction and aggregation logic for the output of `compute_dice`. @@ -122,7 +120,7 @@ def aggregate( _f = {} if isinstance(self.return_with_label, bool): for i, v in enumerate(f): - _label_key = f"label_{i+1}" if not self.include_background else f"label_{i}" + _label_key = f"label_{i + 1}" if not self.include_background else f"label_{i}" _f[_label_key] = round(v.item(), 4) else: for key, v in zip(self.return_with_label, f): diff --git a/monai/networks/blocks/patchembedding.py b/monai/networks/blocks/patchembedding.py index fca566591a..bdd749bdf8 100644 --- a/monai/networks/blocks/patchembedding.py +++ b/monai/networks/blocks/patchembedding.py @@ -98,7 +98,7 @@ def __init__( chars = (("h", "p1"), ("w", "p2"), ("d", "p3"))[:spatial_dims] from_chars = "b c " + " ".join(f"({k} {v})" for k, v in chars) to_chars = f"b ({' '.join([c[0] for c in chars])}) ({' '.join([c[1] for c in chars])} c)" - axes_len = {f"p{i+1}": p for i, p in enumerate(patch_size)} + axes_len = {f"p{i + 1}": p for i, p in enumerate(patch_size)} self.patch_embeddings = nn.Sequential( Rearrange(f"{from_chars} -> {to_chars}", **axes_len), nn.Linear(self.patch_dim, hidden_size) ) diff --git a/monai/networks/layers/factories.py b/monai/networks/layers/factories.py index 29b72a4f37..c3987fbd2d 100644 --- a/monai/networks/layers/factories.py +++ b/monai/networks/layers/factories.py @@ -95,7 +95,7 @@ def add_factory_callable(self, name: str, func: Callable, desc: str | None = Non self.add(name.upper(), description, func) # append name to the docstring assert self.__doc__ is not None - self.__doc__ += f"{', ' if len(self.names)>1 else ' '}``{name}``" + self.__doc__ += f"{', ' if len(self.names) > 1 else ' '}``{name}``" def add_factory_class(self, name: str, cls: type, desc: str | None = None) -> None: """ @@ -276,9 +276,7 @@ def instance_nvfuser_factory(dim): return types[dim - 1] if not has_nvfuser_instance_norm(): - warnings.warn( - "`apex.normalization.InstanceNorm3dNVFuser` is not installed properly, use nn.InstanceNorm3d instead." - ) + warnings.warn("`apex.normalization.InstanceNorm3dNVFuser` is not installed properly, use nn.InstanceNorm3d instead.") return nn.InstanceNorm3d return optional_import("apex.normalization", name="InstanceNorm3dNVFuser")[0] diff --git a/monai/transforms/croppad/array.py b/monai/transforms/croppad/array.py index d5ca876e98..6b13dab885 100644 --- a/monai/transforms/croppad/array.py +++ b/monai/transforms/croppad/array.py @@ -285,13 +285,11 @@ def compute_pad_width(self, spatial_shape: Sequence[int]) -> tuple[tuple[int, in elif len(spatial_border) == len(spatial_shape): data_pad_width = [(int(sp), int(sp)) for sp in spatial_border[: len(spatial_shape)]] elif len(spatial_border) == len(spatial_shape) * 2: - data_pad_width = [ - (int(spatial_border[2 * i]), int(spatial_border[2 * i + 1])) for i in range(len(spatial_shape)) - ] + data_pad_width = [(int(spatial_border[2 * i]), int(spatial_border[2 * i + 1])) for i in range(len(spatial_shape))] else: raise ValueError( f"Unsupported spatial_border length: {len(spatial_border)}, available options are " - f"[1, len(spatial_shape)={len(spatial_shape)}, 2*len(spatial_shape)={2*len(spatial_shape)}]." + f"[1, len(spatial_shape)={len(spatial_shape)}, 2*len(spatial_shape)={2 * len(spatial_shape)}]." ) return tuple([(0, 0)] + data_pad_width) # type: ignore @@ -662,9 +660,7 @@ def __init__( random_size: bool = False, lazy: bool = False, ) -> None: - super().__init__( - roi_size=-1, max_roi_size=None, random_center=random_center, random_size=random_size, lazy=lazy - ) + super().__init__(roi_size=-1, max_roi_size=None, random_center=random_center, random_size=random_size, lazy=lazy) self.roi_scale = roi_scale self.max_roi_scale = max_roi_scale @@ -743,9 +739,7 @@ def __init__( self.num_samples = num_samples self.cropper = RandSpatialCrop(roi_size, max_roi_size, random_center, random_size, lazy) - def set_random_state( - self, seed: int | None = None, state: np.random.RandomState | None = None - ) -> RandSpatialCropSamples: + def set_random_state(self, seed: int | None = None, state: np.random.RandomState | None = None) -> RandSpatialCropSamples: super().set_random_state(seed, state) self.cropper.set_random_state(seed, state) return self @@ -898,9 +892,7 @@ def crop_pad( slices = self.compute_slices(roi_start=box_start, roi_end=box_end) cropped = super().__call__(img=img, slices=slices, lazy=lazy) pad_to_start = np.maximum(-box_start, 0) - pad_to_end = np.maximum( - box_end - np.asarray(img.peek_pending_shape() if isinstance(img, MetaTensor) else img.shape[1:]), 0 - ) + pad_to_end = np.maximum(box_end - np.asarray(img.peek_pending_shape() if isinstance(img, MetaTensor) else img.shape[1:]), 0) pad = list(chain(*zip(pad_to_start.tolist(), pad_to_end.tolist()))) pad_width = BorderPad(spatial_border=pad).compute_pad_width( cropped.peek_pending_shape() if isinstance(cropped, MetaTensor) else cropped.shape[1:] @@ -1329,9 +1321,7 @@ def randomize( if indices_ is None: if label is None: raise ValueError("label must not be None.") - indices_ = map_classes_to_indices( - label, self.num_classes, image, self.image_threshold, self.max_samples_per_class - ) + indices_ = map_classes_to_indices(label, self.num_classes, image, self.image_threshold, self.max_samples_per_class) _shape = None if label is not None: _shape = label.peek_pending_shape() if isinstance(label, MetaTensor) else label.shape[1:] @@ -1469,9 +1459,7 @@ def __call__( # type: ignore[override] pad_info = ret_.applied_operations.pop() crop_info = ret_.applied_operations.pop() orig_size = crop_info.get(TraceKeys.ORIG_SIZE) - self.push_transform( - ret_, orig_size=orig_size, extra_info={"pad_info": pad_info, "crop_info": crop_info}, lazy=lazy_ - ) + self.push_transform(ret_, orig_size=orig_size, extra_info={"pad_info": pad_info, "crop_info": crop_info}, lazy=lazy_) else: pad_info = ret_.pending_operations.pop() crop_info = ret_.pending_operations.pop() diff --git a/monai/transforms/regularization/array.py b/monai/transforms/regularization/array.py index 66a5116c1a..e7bce8f11a 100644 --- a/monai/transforms/regularization/array.py +++ b/monai/transforms/regularization/array.py @@ -25,7 +25,6 @@ class Mixer(RandomizableTransform): - def __init__(self, batch_size: int, alpha: float = 1.0) -> None: """ Mixer is a base class providing the basic logic for the mixup-class of @@ -41,7 +40,7 @@ def __init__(self, batch_size: int, alpha: float = 1.0) -> None: """ super().__init__() if alpha <= 0: - raise ValueError(f"Expected positive number, but got {alpha = }") + raise ValueError(f"Expected positive number, but got {alpha=}") self.alpha = alpha self.batch_size = batch_size From 36e2623a3be2efafe706f647b2364dfa404abb62 Mon Sep 17 00:00:00 2001 From: "R. Garcia-Dias" Date: Mon, 9 Mar 2026 10:10:04 +0000 Subject: [PATCH 6/6] lint Signed-off-by: R. Garcia-Dias --- monai/apps/auto3dseg/bundle_gen.py | 19 +++-- .../detection/networks/retinanet_detector.py | 75 ++++++++++++------- monai/apps/detection/utils/anchor_utils.py | 12 ++- monai/apps/detection/utils/detector_utils.py | 8 +- monai/auto3dseg/analyzer.py | 48 +++++++++--- monai/data/wsi_reader.py | 36 ++++++--- monai/metrics/meandice.py | 4 +- monai/networks/layers/factories.py | 4 +- monai/transforms/croppad/array.py | 24 ++++-- 9 files changed, 164 insertions(+), 66 deletions(-) diff --git a/monai/apps/auto3dseg/bundle_gen.py b/monai/apps/auto3dseg/bundle_gen.py index d575ba9937..227782cf7d 100644 --- a/monai/apps/auto3dseg/bundle_gen.py +++ b/monai/apps/auto3dseg/bundle_gen.py @@ -269,11 +269,15 @@ def _run_cmd(self, cmd: str, devices_info: str = "") -> subprocess.CompletedProc return _run_cmd_bcprun(cmd, n=self.device_setting["NUM_NODES"], p=self.device_setting["n_devices"]) elif int(self.device_setting["n_devices"]) > 1: - return _run_cmd_torchrun(cmd, nnodes=1, nproc_per_node=self.device_setting["n_devices"], env=ps_environ, check=True) + return _run_cmd_torchrun( + cmd, nnodes=1, nproc_per_node=self.device_setting["n_devices"], env=ps_environ, check=True + ) else: return run_cmd(cmd.split(), run_cmd_verbose=True, env=ps_environ, check=True) - def train(self, train_params: None | dict = None, device_setting: None | dict = None) -> subprocess.CompletedProcess: + def train( + self, train_params: None | dict = None, device_setting: None | dict = None + ) -> subprocess.CompletedProcess: """ Load the run function in the training script of each model. Training parameter is predefined by the algo_config.yaml file, which is pre-filled by the fill_template_config function in the same instance. @@ -364,7 +368,9 @@ def get_output_path(self): # path to download the algo_templates -default_algo_zip = f"https://github.com/Project-MONAI/research-contributions/releases/download/algo_templates/{ALGO_HASH}.tar.gz" +default_algo_zip = ( + f"https://github.com/Project-MONAI/research-contributions/releases/download/algo_templates/{ALGO_HASH}.tar.gz" +) # default algorithms default_algos = { @@ -653,7 +659,6 @@ def generate( gen_algo.export_to_disk(output_folder, name, fold=f_id) algo_to_pickle(gen_algo, template_path=algo.template_path) - self.history.append({ - AlgoKeys.ID: name, - AlgoKeys.ALGO: gen_algo, - }) # track the previous, may create a persistent history + self.history.append( + {AlgoKeys.ID: name, AlgoKeys.ALGO: gen_algo} + ) # track the previous, may create a persistent history diff --git a/monai/apps/detection/networks/retinanet_detector.py b/monai/apps/detection/networks/retinanet_detector.py index 265db1519a..321b5bcd7c 100644 --- a/monai/apps/detection/networks/retinanet_detector.py +++ b/monai/apps/detection/networks/retinanet_detector.py @@ -59,7 +59,9 @@ from monai.networks.nets import resnet from monai.utils import BlendMode, PytorchPadMode, ensure_tuple_rep, optional_import -BalancedPositiveNegativeSampler, _ = optional_import("torchvision.models.detection._utils", name="BalancedPositiveNegativeSampler") +BalancedPositiveNegativeSampler, _ = optional_import( + "torchvision.models.detection._utils", name="BalancedPositiveNegativeSampler" +) Matcher, _ = optional_import("torchvision.models.detection._utils", name="Matcher") @@ -326,7 +328,9 @@ def set_box_regression_loss(self, box_loss: nn.Module, encode_gt: bool, decode_p self.encode_gt = encode_gt self.decode_pred = decode_pred - def set_regular_matcher(self, fg_iou_thresh: float, bg_iou_thresh: float, allow_low_quality_matches: bool = True) -> None: + def set_regular_matcher( + self, fg_iou_thresh: float, bg_iou_thresh: float, allow_low_quality_matches: bool = True + ) -> None: """ Using for training. Set torchvision matcher that matches anchors with ground truth boxes. @@ -340,7 +344,9 @@ def set_regular_matcher(self, fg_iou_thresh: float, bg_iou_thresh: float, allow_ raise ValueError( f"Require fg_iou_thresh >= bg_iou_thresh. Got fg_iou_thresh={fg_iou_thresh}, bg_iou_thresh={bg_iou_thresh}." ) - self.proposal_matcher = Matcher(fg_iou_thresh, bg_iou_thresh, allow_low_quality_matches=allow_low_quality_matches) + self.proposal_matcher = Matcher( + fg_iou_thresh, bg_iou_thresh, allow_low_quality_matches=allow_low_quality_matches + ) def set_atss_matcher(self, num_candidates: int = 4, center_in_gt: bool = False) -> None: """ @@ -489,7 +495,9 @@ def forward( """ # 1. Check if input arguments are valid if self.training: - targets = check_training_targets(input_images, targets, self.spatial_dims, self.target_label_key, self.target_box_key) + targets = check_training_targets( + input_images, targets, self.spatial_dims, self.target_label_key, self.target_box_key + ) self._check_detector_training_components() # 2. Pad list of images to a single Tensor `images` with spatial size divisible by self.size_divisible. @@ -509,8 +517,12 @@ def forward( ensure_dict_value_to_list_(head_outputs) else: if self.inferer is None: - raise ValueError("`self.inferer` is not defined.Please refer to function self.set_sliding_window_inferer(*).") - head_outputs = predict_with_inferer(images, self.network, keys=[self.cls_key, self.box_reg_key], inferer=self.inferer) + raise ValueError( + "`self.inferer` is not defined.Please refer to function self.set_sliding_window_inferer(*)." + ) + head_outputs = predict_with_inferer( + images, self.network, keys=[self.cls_key, self.box_reg_key], inferer=self.inferer + ) # 4. Generate anchors and store it in self.anchors: List[Tensor] self.generate_anchors(images, head_outputs) @@ -532,10 +544,7 @@ def forward( # 6(2). If during inference, return detection results detections = self.postprocess_detections( - head_outputs, - self.anchors, - image_sizes, - num_anchor_locs_per_level, # type: ignore + head_outputs, self.anchors, image_sizes, num_anchor_locs_per_level # type: ignore ) return detections @@ -544,7 +553,9 @@ def _check_detector_training_components(self): Check if self.proposal_matcher and self.fg_bg_sampler have been set for training. """ if not hasattr(self, "proposal_matcher"): - raise AttributeError("Matcher is not set. Please refer to self.set_regular_matcher(*) or self.set_atss_matcher(*).") + raise AttributeError( + "Matcher is not set. Please refer to self.set_regular_matcher(*) or self.set_atss_matcher(*)." + ) if self.fg_bg_sampler is None and self.debug: warnings.warn( "No balanced sampler is used. Negative samples are likely to " @@ -641,7 +652,9 @@ def postprocess_detections( """ # recover level sizes, HWA or HWDA for each level - num_anchors_per_level = [num_anchor_locs * self.num_anchors_per_loc for num_anchor_locs in num_anchor_locs_per_level] + num_anchors_per_level = [ + num_anchor_locs * self.num_anchors_per_loc for num_anchor_locs in num_anchor_locs_per_level + ] # split outputs per level split_head_outputs: dict[str, list[Tensor]] = {} @@ -658,7 +671,9 @@ def postprocess_detections( detections: list[dict[str, Tensor]] = [] for index in range(num_images): - box_regression_per_image = [br[index] for br in box_regression] # List[Tensor], each sized (HWA, 2*spatial_dims) + box_regression_per_image = [ + br[index] for br in box_regression + ] # List[Tensor], each sized (HWA, 2*spatial_dims) logits_per_image = [cl[index] for cl in class_logits] # List[Tensor], each sized (HWA, self.num_classes) anchors_per_image, img_spatial_size = split_anchors[index], image_sizes[index] # decode box regression into boxes @@ -671,11 +686,13 @@ def postprocess_detections( boxes_per_image, logits_per_image, img_spatial_size ) - detections.append({ - self.target_box_key: selected_boxes, # Tensor, sized (N, 2*spatial_dims) - self.pred_score_key: selected_scores, # Tensor, sized (N, ) - self.target_label_key: selected_labels, # Tensor, sized (N, ) - }) + detections.append( + { + self.target_box_key: selected_boxes, # Tensor, sized (N, 2*spatial_dims) + self.pred_score_key: selected_scores, # Tensor, sized (N, ) + self.target_label_key: selected_labels, # Tensor, sized (N, ) + } + ) return detections @@ -704,7 +721,9 @@ def compute_loss( """ matched_idxs = self.compute_anchor_matched_idxs(anchors, targets, num_anchor_locs_per_level) losses_cls = self.compute_cls_loss(head_outputs_reshape[self.cls_key], targets, matched_idxs) - losses_box_regression = self.compute_box_loss(head_outputs_reshape[self.box_reg_key], targets, anchors, matched_idxs) + losses_box_regression = self.compute_box_loss( + head_outputs_reshape[self.box_reg_key], targets, anchors, matched_idxs + ) return {self.cls_key: losses_cls, self.box_reg_key: losses_box_regression} def compute_anchor_matched_idxs( @@ -737,7 +756,9 @@ def compute_anchor_matched_idxs( # anchors_per_image: Tensor, targets_per_image: Dice[str, Tensor] if targets_per_image[self.target_box_key].numel() == 0: # if no GT boxes - matched_idxs.append(torch.full((anchors_per_image.size(0),), -1, dtype=torch.int64, device=anchors_per_image.device)) + matched_idxs.append( + torch.full((anchors_per_image.size(0),), -1, dtype=torch.int64, device=anchors_per_image.device) + ) continue # matched_idxs_per_image (Tensor[int64]): Tensor sized (sum(HWA),) or (sum(HWDA),) @@ -777,7 +798,9 @@ def compute_anchor_matched_idxs( matched_idxs.append(matched_idxs_per_image) return matched_idxs - def compute_cls_loss(self, cls_logits: Tensor, targets: list[dict[str, Tensor]], matched_idxs: list[Tensor]) -> Tensor: + def compute_cls_loss( + self, cls_logits: Tensor, targets: list[dict[str, Tensor]], matched_idxs: list[Tensor] + ) -> Tensor: """ Compute classification losses. @@ -895,7 +918,9 @@ def get_cls_train_sample_per_image( gt_classes_target = torch.zeros_like(cls_logits_per_image) # (sum(HW(D)A), self.num_classes) gt_classes_target[ foreground_idxs_per_image, # fg anchor idx in - targets_per_image[self.target_label_key][matched_idxs_per_image[foreground_idxs_per_image]], # fg class label + targets_per_image[self.target_label_key][ + matched_idxs_per_image[foreground_idxs_per_image] + ], # fg class label ] = 1.0 if self.fg_bg_sampler is None: @@ -967,9 +992,9 @@ def get_box_train_sample_per_image( # select only the foreground boxes # matched GT boxes for foreground anchors - matched_gt_boxes_per_image = targets_per_image[self.target_box_key][matched_idxs_per_image[foreground_idxs_per_image]].to( - box_regression_per_image.device - ) + matched_gt_boxes_per_image = targets_per_image[self.target_box_key][ + matched_idxs_per_image[foreground_idxs_per_image] + ].to(box_regression_per_image.device) # predicted box regression for foreground anchors box_regression_per_image = box_regression_per_image[foreground_idxs_per_image, :] # foreground anchors diff --git a/monai/apps/detection/utils/anchor_utils.py b/monai/apps/detection/utils/anchor_utils.py index f846be1850..c1d723c89f 100644 --- a/monai/apps/detection/utils/anchor_utils.py +++ b/monai/apps/detection/utils/anchor_utils.py @@ -136,7 +136,9 @@ def __init__( self.indexing = look_up_option(indexing, ["ij", "xy"]) self.aspect_ratios = aspect_ratios - self.cell_anchors = [self.generate_anchors(size, aspect_ratio) for size, aspect_ratio in zip(self.sizes, aspect_ratios)] + self.cell_anchors = [ + self.generate_anchors(size, aspect_ratio) for size, aspect_ratio in zip(self.sizes, aspect_ratios) + ] # This comment comes from torchvision. # TODO: https://github.com/pytorch/pytorch/issues/26792 @@ -251,7 +253,8 @@ def grid_anchors(self, grid_sizes: list[list[int]], strides: list[list[Tensor]]) # compute anchor centers regarding to the image. # shifts_centers is [x_center, y_center] or [x_center, y_center, z_center] shifts_centers = [ - torch.arange(0, size[axis], dtype=torch.int32, device=device) * stride[axis] for axis in range(self.spatial_dims) + torch.arange(0, size[axis], dtype=torch.int32, device=device) * stride[axis] + for axis in range(self.spatial_dims) ] # to support torchscript, cannot directly use torch.meshgrid(shifts_centers). @@ -304,7 +307,10 @@ def forward(self, images: Tensor, feature_maps: list[Tensor]) -> list[Tensor]: batchsize = images.shape[0] dtype, device = feature_maps[0].dtype, feature_maps[0].device strides = [ - [torch.tensor(image_size[axis] // g[axis], dtype=torch.int64, device=device) for axis in range(self.spatial_dims)] + [ + torch.tensor(image_size[axis] // g[axis], dtype=torch.int64, device=device) + for axis in range(self.spatial_dims) + ] for g in grid_sizes ] diff --git a/monai/apps/detection/utils/detector_utils.py b/monai/apps/detection/utils/detector_utils.py index dc4103cd23..c22df38be1 100644 --- a/monai/apps/detection/utils/detector_utils.py +++ b/monai/apps/detection/utils/detector_utils.py @@ -80,7 +80,9 @@ def check_training_targets( for i in range(len(targets)): target = targets[i] if (target_label_key not in target.keys()) or (target_box_key not in target.keys()): - raise ValueError(f"{target_label_key} and {target_box_key} are expected keys in targets. Got {target.keys()}.") + raise ValueError( + f"{target_label_key} and {target_box_key} are expected keys in targets. Got {target.keys()}." + ) boxes = target[target_box_key] if not isinstance(boxes, torch.Tensor): @@ -92,7 +94,9 @@ def check_training_targets( f"The detector reshaped it with boxes = torch.reshape(boxes, [0, {2 * spatial_dims}])." ) else: - raise ValueError(f"Expected target boxes to be a tensor of shape [N, {2 * spatial_dims}], got {boxes.shape}.).") + raise ValueError( + f"Expected target boxes to be a tensor of shape [N, {2 * spatial_dims}], got {boxes.shape}.)." + ) if not torch.is_floating_point(boxes): raise ValueError(f"Expected target boxes to be a float tensor, got {boxes.dtype}.") targets[i][target_box_key] = standardize_empty_box(boxes, spatial_dims=spatial_dims) # type: ignore diff --git a/monai/auto3dseg/analyzer.py b/monai/auto3dseg/analyzer.py index e1ae99a0e4..0a18983d31 100644 --- a/monai/auto3dseg/analyzer.py +++ b/monai/auto3dseg/analyzer.py @@ -255,9 +255,13 @@ def __call__(self, data): else [1.0] * min(3, data[self.image_key].ndim) ) - report[ImageStatsKeys.SIZEMM] = [a * b for a, b in zip(report[ImageStatsKeys.SHAPE][0], report[ImageStatsKeys.SPACING])] + report[ImageStatsKeys.SIZEMM] = [ + a * b for a, b in zip(report[ImageStatsKeys.SHAPE][0], report[ImageStatsKeys.SPACING]) + ] - report[ImageStatsKeys.INTENSITY] = [self.ops[ImageStatsKeys.INTENSITY].evaluate(nda_c) for nda_c in nda_croppeds] + report[ImageStatsKeys.INTENSITY] = [ + self.ops[ImageStatsKeys.INTENSITY].evaluate(nda_c) for nda_c in nda_croppeds + ] if not verify_report_format(report, self.get_report_format()): raise RuntimeError(f"report generated by {self.__class__} differs from the report format.") @@ -336,7 +340,9 @@ def __call__(self, data: Mapping) -> dict: # perform calculation report = deepcopy(self.get_report_format()) - report[ImageStatsKeys.INTENSITY] = [self.ops[ImageStatsKeys.INTENSITY].evaluate(nda_f) for nda_f in nda_foregrounds] + report[ImageStatsKeys.INTENSITY] = [ + self.ops[ImageStatsKeys.INTENSITY].evaluate(nda_f) for nda_f in nda_foregrounds + ] if not verify_report_format(report, self.get_report_format()): raise RuntimeError(f"report generated by {self.__class__} differs from the report format.") @@ -372,7 +378,9 @@ class LabelStats(Analyzer): """ - def __init__(self, image_key: str, label_key: str, stats_name: str = DataStatsKeys.LABEL_STATS, do_ccp: bool | None = True): + def __init__( + self, image_key: str, label_key: str, stats_name: str = DataStatsKeys.LABEL_STATS, do_ccp: bool | None = True + ): self.image_key = image_key self.label_key = label_key self.do_ccp = do_ccp @@ -384,7 +392,9 @@ def __init__(self, image_key: str, label_key: str, stats_name: str = DataStatsKe } if self.do_ccp: - report_format[LabelStatsKeys.LABEL][0].update({LabelStatsKeys.LABEL_SHAPE: None, LabelStatsKeys.LABEL_NCOMP: None}) + report_format[LabelStatsKeys.LABEL][0].update( + {LabelStatsKeys.LABEL_SHAPE: None, LabelStatsKeys.LABEL_NCOMP: None} + ) super().__init__(stats_name, report_format) self.update_ops(LabelStatsKeys.IMAGE_INTST, SampleOperations()) @@ -473,7 +483,9 @@ def __call__(self, data: Mapping[Hashable, MetaTensor]) -> dict[Hashable, MetaTe mask_index = ndas_label == index nda_masks = [nda[mask_index] for nda in ndas] - label_dict[LabelStatsKeys.IMAGE_INTST] = [self.ops[LabelStatsKeys.IMAGE_INTST].evaluate(nda_m) for nda_m in nda_masks] + label_dict[LabelStatsKeys.IMAGE_INTST] = [ + self.ops[LabelStatsKeys.IMAGE_INTST].evaluate(nda_m) for nda_m in nda_masks + ] pixel_count = sum(mask_index) pixel_arr.append(pixel_count) @@ -496,7 +508,9 @@ def __call__(self, data: Mapping[Hashable, MetaTensor]) -> dict[Hashable, MetaTe report = deepcopy(self.get_report_format()) report[LabelStatsKeys.LABEL_UID] = unique_label - report[LabelStatsKeys.IMAGE_INTST] = [self.ops[LabelStatsKeys.IMAGE_INTST].evaluate(nda_f) for nda_f in nda_foregrounds] + report[LabelStatsKeys.IMAGE_INTST] = [ + self.ops[LabelStatsKeys.IMAGE_INTST].evaluate(nda_f) for nda_f in nda_foregrounds + ] report[LabelStatsKeys.LABEL] = label_substats if not verify_report_format(report, self.get_report_format()): @@ -675,7 +689,9 @@ class LabelStatsSumm(Analyzer): """ - def __init__(self, stats_name: str = DataStatsKeys.LABEL_STATS, average: bool | None = True, do_ccp: bool | None = True): + def __init__( + self, stats_name: str = DataStatsKeys.LABEL_STATS, average: bool | None = True, do_ccp: bool | None = True + ): self.summary_average = average self.do_ccp = do_ccp @@ -685,7 +701,9 @@ def __init__(self, stats_name: str = DataStatsKeys.LABEL_STATS, average: bool | LabelStatsKeys.LABEL: [{LabelStatsKeys.PIXEL_PCT: None, LabelStatsKeys.IMAGE_INTST: None}], } if self.do_ccp: - report_format[LabelStatsKeys.LABEL][0].update({LabelStatsKeys.LABEL_SHAPE: None, LabelStatsKeys.LABEL_NCOMP: None}) + report_format[LabelStatsKeys.LABEL][0].update( + {LabelStatsKeys.LABEL_SHAPE: None, LabelStatsKeys.LABEL_NCOMP: None} + ) super().__init__(stats_name, report_format) self.update_ops(LabelStatsKeys.IMAGE_INTST, SummaryOperations()) @@ -776,7 +794,9 @@ def __call__(self, data: list[dict]) -> dict: intst_fixed_keys = [self.stats_name, label_str, label_id, intst_str] op_keys = report[label_str][0][intst_str].keys() intst_dict = concat_multikeys_to_dict(data, intst_fixed_keys, op_keys, allow_missing=True) - stats[intst_str] = self.ops[label_str][0][intst_str].evaluate(intst_dict, dim=None if self.summary_average else 0) + stats[intst_str] = self.ops[label_str][0][intst_str].evaluate( + intst_dict, dim=None if self.summary_average else 0 + ) detailed_label_list.append(stats) @@ -856,7 +876,9 @@ def __init__( self.image_key = image_key # set defaults - self.hist_bins: list[int] = [100] if hist_bins is None else hist_bins if isinstance(hist_bins, list) else [hist_bins] + self.hist_bins: list[int] = ( + [100] if hist_bins is None else hist_bins if isinstance(hist_bins, list) else [hist_bins] + ) self.hist_range: list = [-500, 500] if hist_range is None else hist_range report_format = {"counts": None, "bin_edges": None} @@ -877,7 +899,9 @@ def __init__( if not isinstance(_hist_bins, int) or _hist_bins < 0: raise ValueError(f"Expected {i + 1}. hist_bins value to be positive integer but got {_hist_bins}") if not isinstance(_hist_range, list) or len(_hist_range) != 2: - raise ValueError(f"Expected {i + 1}. hist_range values to be list of length 2 but received {_hist_range}") + raise ValueError( + f"Expected {i + 1}. hist_range values to be list of length 2 but received {_hist_range}" + ) def __call__(self, data: dict) -> dict: """ diff --git a/monai/data/wsi_reader.py b/monai/data/wsi_reader.py index 9ff9d83236..a9f7fab1c2 100644 --- a/monai/data/wsi_reader.py +++ b/monai/data/wsi_reader.py @@ -144,7 +144,9 @@ def get_size(self, wsi, level: int) -> tuple[int, int]: """ raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") - def _find_closest_level(self, name: str, value: tuple, value_at_levels: Sequence[tuple], atol: float, rtol: float) -> int: + def _find_closest_level( + self, name: str, value: tuple, value_at_levels: Sequence[tuple], atol: float, rtol: float + ) -> int: """Find the level corresponding to the value of the quantity in the list of values at each level. Args: name: the name of the requested quantity @@ -168,7 +170,9 @@ def _find_closest_level(self, name: str, value: tuple, value_at_levels: Sequence ) return value_at_levels.index(closest_value) - def get_valid_level(self, wsi, level: int | None, mpp: float | tuple[float, float] | None, power: int | None) -> int: + def get_valid_level( + self, wsi, level: int | None, mpp: float | tuple[float, float] | None, power: int | None + ) -> int: """ Returns the level associated to the resolution parameters in the whole slide image. @@ -206,7 +210,9 @@ def get_valid_level(self, wsi, level: int | None, mpp: float | tuple[float, floa # Set the default value if no resolution parameter is provided. level = 0 if level >= n_levels: - raise ValueError(f"The maximum level of this image is {n_levels - 1} while level={level} is requested)!") + raise ValueError( + f"The maximum level of this image is {n_levels - 1} while level={level} is requested)!" + ) return level @@ -281,7 +287,9 @@ def _get_patch( """ raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.") - def _get_metadata(self, wsi, patch: NdarrayOrTensor, location: tuple[int, int], size: tuple[int, int], level: int) -> dict: + def _get_metadata( + self, wsi, patch: NdarrayOrTensor, location: tuple[int, int], size: tuple[int, int], level: int + ) -> dict: """ Returns metadata of the extracted patch from the whole slide image. @@ -295,7 +303,9 @@ def _get_metadata(self, wsi, patch: NdarrayOrTensor, location: tuple[int, int], """ if self.channel_dim >= len(patch.shape) or self.channel_dim < -len(patch.shape): - raise ValueError(f"The desired channel_dim ({self.channel_dim}) is out of bound for image shape: {patch.shape}") + raise ValueError( + f"The desired channel_dim ({self.channel_dim}) is out of bound for image shape: {patch.shape}" + ) channel_dim: int = self.channel_dim + (len(patch.shape) if self.channel_dim < 0 else 0) metadata: dict = { "backend": self.backend, @@ -377,9 +387,13 @@ def get_data( patch = self._get_patch(each_wsi, location=location, size=size, level=level, dtype=dtype_np, mode=mode) # Convert the patch to torch.Tensor if dtype is torch - if isinstance(self.dtype, torch.dtype) or (self.device is not None and torch.device(self.device).type == "cuda"): + if isinstance(self.dtype, torch.dtype) or ( + self.device is not None and torch.device(self.device).type == "cuda" + ): # Ensure dtype is torch.dtype if the device is not "cpu" - dtype_torch = dtype_numpy_to_torch(self.dtype) if not isinstance(self.dtype, torch.dtype) else self.dtype + dtype_torch = ( + dtype_numpy_to_torch(self.dtype) if not isinstance(self.dtype, torch.dtype) else self.dtype + ) # Copy the numpy array if it is not writable if patch.flags["WRITEABLE"]: patch = torch.as_tensor(patch, dtype=dtype_torch, device=self.device) @@ -525,7 +539,9 @@ def __init__( **kwargs, ) else: - raise ValueError(f"The supported backends are cucim, openslide, and tifffile but '{self.backend}' was given.") + raise ValueError( + f"The supported backends are cucim, openslide, and tifffile but '{self.backend}' was given." + ) self.supported_suffixes = self.reader.supported_suffixes self.level = self.reader.level self.mpp_rtol = self.reader.mpp_rtol @@ -792,7 +808,9 @@ def _get_patch( """ # Extract a patch or the entire image # (reverse the order of location and size to become WxH for cuCIM) - patch: np.ndarray = wsi.read_region(location=location[::-1], size=size[::-1], level=level, num_workers=self.num_workers) + patch: np.ndarray = wsi.read_region( + location=location[::-1], size=size[::-1], level=level, num_workers=self.num_workers + ) # Convert to numpy patch = np.asarray(patch, dtype=dtype) diff --git a/monai/metrics/meandice.py b/monai/metrics/meandice.py index 2f4d3790be..c0f6ff73f2 100644 --- a/monai/metrics/meandice.py +++ b/monai/metrics/meandice.py @@ -100,7 +100,9 @@ def _compute_tensor(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor # compute dice (BxC) for each channel for each batch return self.dice_helper(y_pred=y_pred, y=y) # type: ignore - def aggregate(self, reduction: MetricReduction | str | None = None) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: + def aggregate( + self, reduction: MetricReduction | str | None = None + ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: """ Execute reduction and aggregation logic for the output of `compute_dice`. diff --git a/monai/networks/layers/factories.py b/monai/networks/layers/factories.py index c3987fbd2d..9ea181974a 100644 --- a/monai/networks/layers/factories.py +++ b/monai/networks/layers/factories.py @@ -276,7 +276,9 @@ def instance_nvfuser_factory(dim): return types[dim - 1] if not has_nvfuser_instance_norm(): - warnings.warn("`apex.normalization.InstanceNorm3dNVFuser` is not installed properly, use nn.InstanceNorm3d instead.") + warnings.warn( + "`apex.normalization.InstanceNorm3dNVFuser` is not installed properly, use nn.InstanceNorm3d instead." + ) return nn.InstanceNorm3d return optional_import("apex.normalization", name="InstanceNorm3dNVFuser")[0] diff --git a/monai/transforms/croppad/array.py b/monai/transforms/croppad/array.py index 6b13dab885..982e353e1a 100644 --- a/monai/transforms/croppad/array.py +++ b/monai/transforms/croppad/array.py @@ -285,7 +285,9 @@ def compute_pad_width(self, spatial_shape: Sequence[int]) -> tuple[tuple[int, in elif len(spatial_border) == len(spatial_shape): data_pad_width = [(int(sp), int(sp)) for sp in spatial_border[: len(spatial_shape)]] elif len(spatial_border) == len(spatial_shape) * 2: - data_pad_width = [(int(spatial_border[2 * i]), int(spatial_border[2 * i + 1])) for i in range(len(spatial_shape))] + data_pad_width = [ + (int(spatial_border[2 * i]), int(spatial_border[2 * i + 1])) for i in range(len(spatial_shape)) + ] else: raise ValueError( f"Unsupported spatial_border length: {len(spatial_border)}, available options are " @@ -660,7 +662,9 @@ def __init__( random_size: bool = False, lazy: bool = False, ) -> None: - super().__init__(roi_size=-1, max_roi_size=None, random_center=random_center, random_size=random_size, lazy=lazy) + super().__init__( + roi_size=-1, max_roi_size=None, random_center=random_center, random_size=random_size, lazy=lazy + ) self.roi_scale = roi_scale self.max_roi_scale = max_roi_scale @@ -739,7 +743,9 @@ def __init__( self.num_samples = num_samples self.cropper = RandSpatialCrop(roi_size, max_roi_size, random_center, random_size, lazy) - def set_random_state(self, seed: int | None = None, state: np.random.RandomState | None = None) -> RandSpatialCropSamples: + def set_random_state( + self, seed: int | None = None, state: np.random.RandomState | None = None + ) -> RandSpatialCropSamples: super().set_random_state(seed, state) self.cropper.set_random_state(seed, state) return self @@ -892,7 +898,9 @@ def crop_pad( slices = self.compute_slices(roi_start=box_start, roi_end=box_end) cropped = super().__call__(img=img, slices=slices, lazy=lazy) pad_to_start = np.maximum(-box_start, 0) - pad_to_end = np.maximum(box_end - np.asarray(img.peek_pending_shape() if isinstance(img, MetaTensor) else img.shape[1:]), 0) + pad_to_end = np.maximum( + box_end - np.asarray(img.peek_pending_shape() if isinstance(img, MetaTensor) else img.shape[1:]), 0 + ) pad = list(chain(*zip(pad_to_start.tolist(), pad_to_end.tolist()))) pad_width = BorderPad(spatial_border=pad).compute_pad_width( cropped.peek_pending_shape() if isinstance(cropped, MetaTensor) else cropped.shape[1:] @@ -1321,7 +1329,9 @@ def randomize( if indices_ is None: if label is None: raise ValueError("label must not be None.") - indices_ = map_classes_to_indices(label, self.num_classes, image, self.image_threshold, self.max_samples_per_class) + indices_ = map_classes_to_indices( + label, self.num_classes, image, self.image_threshold, self.max_samples_per_class + ) _shape = None if label is not None: _shape = label.peek_pending_shape() if isinstance(label, MetaTensor) else label.shape[1:] @@ -1459,7 +1469,9 @@ def __call__( # type: ignore[override] pad_info = ret_.applied_operations.pop() crop_info = ret_.applied_operations.pop() orig_size = crop_info.get(TraceKeys.ORIG_SIZE) - self.push_transform(ret_, orig_size=orig_size, extra_info={"pad_info": pad_info, "crop_info": crop_info}, lazy=lazy_) + self.push_transform( + ret_, orig_size=orig_size, extra_info={"pad_info": pad_info, "crop_info": crop_info}, lazy=lazy_ + ) else: pad_info = ret_.pending_operations.pop() crop_info = ret_.pending_operations.pop()