diff --git a/examples/acrobat_2023/valis/feature_detectors.py b/examples/acrobat_2023/valis/feature_detectors.py index 45ab9c2f..25cba7fa 100644 --- a/examples/acrobat_2023/valis/feature_detectors.py +++ b/examples/acrobat_2023/valis/feature_detectors.py @@ -471,7 +471,7 @@ def compute(self, img, kp_pos_xy): descriptors = [superpoint.sample_descriptors(k[None], d[None], 8)[0] for k, d in zip([torch.from_numpy(kp_pos_xy.astype(np.float32))], descriptors)] - descriptors = descriptors[0].detach().numpy().T + descriptors = descriptors[0].detach().cpu().numpy().T else: kp = cv2.KeyPoint_convert(kp_pos_xy.tolist()) kp, descriptors = self.kp_descriptor.compute(img, kp) @@ -487,8 +487,8 @@ def detect_and_compute_sg(self, img): superpoint_obj = superpoint.SuperPoint(self.config.get('superpoint', {})) pred = superpoint_obj({'image': inp}) pred = {**pred, **{k+'0': v for k, v in pred.items()}} - kp_pos_xy = pred['keypoints'][0].detach().numpy() - desc = pred['descriptors'][0].detach().numpy().T + kp_pos_xy = pred['keypoints'][0].detach().cpu().numpy() + desc = pred['descriptors'][0].detach().cpu().numpy().T return kp_pos_xy, desc diff --git a/examples/acrobat_2023/valis/feature_matcher.py b/examples/acrobat_2023/valis/feature_matcher.py index f22ad469..2bd281ab 100644 --- a/examples/acrobat_2023/valis/feature_matcher.py +++ b/examples/acrobat_2023/valis/feature_matcher.py @@ -1071,7 +1071,7 @@ def _match_images(self, img1, img2, matcher_obj=None, additional_filtering_kwarg sg_matching = matching.Matching(self.config).eval().to(self.device) sg_pred = sg_matching({'image0': inp1, 'image1': inp2}) - sg_pred = {k: v[0].detach().numpy() for k, v in sg_pred.items()} + sg_pred = {k: v[0].detach().cpu().numpy() for k, v in sg_pred.items()} matches, conf = sg_pred['matches0'], sg_pred['matching_scores0'] @@ -1333,7 +1333,7 @@ def _match_images(self, img1=None, desc1=None, kp1_xy=None, img2=None, desc2=Non sg_pred = sg(data) - sg_pred = {k: v[0].detach().numpy() for k, v in sg_pred.items()} + sg_pred = {k: v[0].detach().cpu().numpy() for k, v in sg_pred.items()} sg_pred.update(data) # Keep the matching keypoints and descriptors diff --git a/valis/feature_detectors.py b/valis/feature_detectors.py index c7492694..6ac1f50e 100644 --- a/valis/feature_detectors.py +++ b/valis/feature_detectors.py @@ -513,7 +513,7 @@ def compute(self, img, kp_pos_xy): descriptors = [superpoint.sample_descriptors(k[None], d[None], 8)[0] for k, d in zip([torch.from_numpy(kp_pos_xy.astype(np.float32))], descriptors)] - descriptors = descriptors[0].detach().numpy().T + descriptors = descriptors[0].detach().cpu().numpy().T else: kp = cv2.KeyPoint_convert(kp_pos_xy.tolist()) kp, descriptors = self.kp_descriptor.compute(img, kp) @@ -529,8 +529,8 @@ def detect_and_compute_sg(self, img): superpoint_obj = superpoint.SuperPoint(self.config.get('superpoint', {})) pred = superpoint_obj({'image': inp}) pred = {**pred, **{k+'0': v for k, v in pred.items()}} - kp_pos_xy = pred['keypoints'][0].detach().numpy() - desc = pred['descriptors'][0].detach().numpy().T + kp_pos_xy = pred['keypoints'][0].detach().cpu().numpy() + desc = pred['descriptors'][0].detach().cpu().numpy().T return kp_pos_xy, desc @@ -605,8 +605,8 @@ def _detect_and_compute(self, image, *args, **kwargs): tensor_img = preprocessing.img_to_tensor(image) with torch.inference_mode(): res = self.disk(tensor_img.to(self.device).float(), n=self.num_features, pad_if_not_divisible=True)[0] - kp_pos_xy = res.keypoints.detach().numpy() - desc = res.descriptors.detach().numpy() + kp_pos_xy = res.keypoints.detach().cpu().numpy() + desc = res.descriptors.detach().cpu().numpy() return kp_pos_xy, desc @@ -661,7 +661,7 @@ def _detect_and_compute(self, image, *args, **kwargs): with torch.inference_mode(): res = self.dedode(tensor_img.to(self.device).float(), n=self.num_features, pad_if_not_divisible=True) kp_pos_xy = res[0].detach().squeeze(0).numpy() - scores = res[1].detach().numpy() + scores = res[1].detach().cpu().numpy() desc = res[2].detach().squeeze(0).numpy() return kp_pos_xy, desc diff --git a/valis/feature_matcher.py b/valis/feature_matcher.py index 5a239c85..00eaacd9 100644 --- a/valis/feature_matcher.py +++ b/valis/feature_matcher.py @@ -1180,7 +1180,7 @@ def match_images(self, img1=None, desc1=None, kp1_xy=None, img2=None, desc2=None sg_pred = self.sg_matcher(data) - sg_pred = {k: v[0].detach().numpy() for k, v in sg_pred.items()} + sg_pred = {k: v[0].detach().cpu().numpy() for k, v in sg_pred.items()} sg_pred.update(data) # Keep the matching keypoints and descriptors @@ -1455,8 +1455,8 @@ def match_images(self, img1, img2, desc1=None, kp1_xy=None, desc2=None, kp2_xy=N lafs2 = kornia.feature.laf_from_center_scale_ori(t_kp2[None], torch.ones(1, len(t_kp2), 1, 1, device=self.device)) match_distances, idxs = self.lg_matcher(t_desc1, t_desc2, lafs1, lafs2, hw1=hw1, hw2=r_hw2) - match_distances = match_distances.detach().numpy() - idxs = idxs.detach().numpy() + match_distances = match_distances.detach().cpu().numpy() + idxs = idxs.detach().cpu().numpy() desc1_match_idx = idxs[:, 0] matched_desc1 = desc1[desc1_match_idx, :] diff --git a/valis/non_rigid_registrars.py b/valis/non_rigid_registrars.py index 33e20d9c..02435757 100644 --- a/valis/non_rigid_registrars.py +++ b/valis/non_rigid_registrars.py @@ -1112,7 +1112,7 @@ def calc(self, moving_img, fixed_img, *args, **kwargs): transformed_moving_img, transformed_fixed_img = self.weights.transforms()(transformed_moving_img, transformed_fixed_img) list_of_flows = self.model(transformed_fixed_img.to(self.device), transformed_moving_img.to(self.device)) - dxdy = list_of_flows[-1].squeeze(0).detach().numpy() + dxdy = list_of_flows[-1].squeeze(0).detach().cpu().numpy() if self.transform_method == "pad" and len(moving_transform) == 4: # Remove padding