Skip to content

Commit f221678

Browse files
authored
Merge pull request #134 from elenacliu/master
fix: replace Image.BICUBIC with Image.Resampling.BICUBIC
2 parents 6f28de2 + 8796cbf commit f221678

File tree

3 files changed

+8
-8
lines changed

3 files changed

+8
-8
lines changed

data/base_dataset.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -95,8 +95,8 @@ def get_affine_mat(opt, size):
9595
affine_inv = np.linalg.inv(affine)
9696
return affine, affine_inv, flip
9797

98-
def apply_img_affine(img, affine_inv, method=Image.BICUBIC):
99-
return img.transform(img.size, Image.AFFINE, data=affine_inv.flatten()[:6], resample=Image.BICUBIC)
98+
def apply_img_affine(img, affine_inv, method=Image.Resampling.BICUBIC):
99+
return img.transform(img.size, Image.AFFINE, data=affine_inv.flatten()[:6], resample=Image.Resampling.BICUBIC)
100100

101101
def apply_lm_affine(landmark, affine, flip, size):
102102
_, h = size

util/preprocess.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -142,11 +142,11 @@ def resize_n_crop_img(img, lm, t, s, target_size=224., mask=None):
142142
up = (h/2 - target_size/2 + float((h0/2 - t[1])*s)).astype(np.int32)
143143
below = up + target_size
144144

145-
img = img.resize((w, h), resample=Image.BICUBIC)
145+
img = img.resize((w, h), resample=Image.Resampling.BICUBIC)
146146
img = img.crop((left, up, right, below))
147147

148148
if mask is not None:
149-
mask = mask.resize((w, h), resample=Image.BICUBIC)
149+
mask = mask.resize((w, h), resample=Image.Resampling.BICUBIC)
150150
mask = mask.crop((left, up, right, below))
151151

152152
lm = np.stack([lm[:, 0] - t[0] + w0/2, lm[:, 1] -

util/util.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -107,9 +107,9 @@ def save_image(image_numpy, image_path, aspect_ratio=1.0):
107107
if aspect_ratio is None:
108108
pass
109109
elif aspect_ratio > 1.0:
110-
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
110+
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.Resampling.BICUBIC)
111111
elif aspect_ratio < 1.0:
112-
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
112+
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.Resampling.BICUBIC)
113113
image_pil.save(image_path)
114114

115115

@@ -166,13 +166,13 @@ def correct_resize_label(t, size):
166166
return torch.stack(resized, dim=0).to(device)
167167

168168

169-
def correct_resize(t, size, mode=Image.BICUBIC):
169+
def correct_resize(t, size, mode=Image.Resampling.BICUBIC):
170170
device = t.device
171171
t = t.detach().cpu()
172172
resized = []
173173
for i in range(t.size(0)):
174174
one_t = t[i:i + 1]
175-
one_image = Image.fromarray(tensor2im(one_t)).resize(size, Image.BICUBIC)
175+
one_image = Image.fromarray(tensor2im(one_t)).resize(size, Image.Resampling.BICUBIC)
176176
resized_t = torchvision.transforms.functional.to_tensor(one_image) * 2 - 1.0
177177
resized.append(resized_t)
178178
return torch.stack(resized, dim=0).to(device)

0 commit comments

Comments
 (0)