diff --git a/data/base_dataset.py b/data/base_dataset.py index b6b9c08..1bd57d0 100644 --- a/data/base_dataset.py +++ b/data/base_dataset.py @@ -95,8 +95,8 @@ def get_affine_mat(opt, size): affine_inv = np.linalg.inv(affine) return affine, affine_inv, flip -def apply_img_affine(img, affine_inv, method=Image.Resampling.BICUBIC): - return img.transform(img.size, Image.AFFINE, data=affine_inv.flatten()[:6], resample=Image.Resampling.BICUBIC) +def apply_img_affine(img, affine_inv, method=Image.BICUBIC): + return img.transform(img.size, Image.AFFINE, data=affine_inv.flatten()[:6], resample=Image.BICUBIC) def apply_lm_affine(landmark, affine, flip, size): _, h = size diff --git a/util/preprocess.py b/util/preprocess.py index 4d9fc61..a6de6ea 100644 --- a/util/preprocess.py +++ b/util/preprocess.py @@ -142,11 +142,11 @@ def resize_n_crop_img(img, lm, t, s, target_size=224., mask=None): up = (h/2 - target_size/2 + float((h0/2 - t[1])*s)).astype(np.int32) below = up + target_size - img = img.resize((w, h), resample=Image.Resampling.BICUBIC) + img = img.resize((w, h), resample=Image.BICUBIC) img = img.crop((left, up, right, below)) if mask is not None: - mask = mask.resize((w, h), resample=Image.Resampling.BICUBIC) + mask = mask.resize((w, h), resample=Image.BICUBIC) mask = mask.crop((left, up, right, below)) lm = np.stack([lm[:, 0] - t[0] + w0/2, lm[:, 1] - diff --git a/util/util.py b/util/util.py index aa2fb10..0d689ca 100644 --- a/util/util.py +++ b/util/util.py @@ -107,9 +107,9 @@ def save_image(image_numpy, image_path, aspect_ratio=1.0): if aspect_ratio is None: pass elif aspect_ratio > 1.0: - image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.Resampling.BICUBIC) + image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC) elif aspect_ratio < 1.0: - image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.Resampling.BICUBIC) + image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC) image_pil.save(image_path) @@ -166,13 +166,13 @@ def correct_resize_label(t, size): return torch.stack(resized, dim=0).to(device) -def correct_resize(t, size, mode=Image.Resampling.BICUBIC): +def correct_resize(t, size, mode=Image.BICUBIC): device = t.device t = t.detach().cpu() resized = [] for i in range(t.size(0)): one_t = t[i:i + 1] - one_image = Image.fromarray(tensor2im(one_t)).resize(size, Image.Resampling.BICUBIC) + one_image = Image.fromarray(tensor2im(one_t)).resize(size, Image.BICUBIC) resized_t = torchvision.transforms.functional.to_tensor(one_image) * 2 - 1.0 resized.append(resized_t) return torch.stack(resized, dim=0).to(device)