Skip to content

fix: add try-except block when import resampling from PIL.Image #136

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 12, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions data/base_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,12 @@
import numpy as np
import torch.utils.data as data
from PIL import Image
try:
from PIL.Image import Resampling
RESAMPLING_METHOD = Resampling.BICUBIC
except ImportError:
from PIL.Image import BICUBIC
RESAMPLING_METHOD = BICUBIC
import torchvision.transforms as transforms
from abc import ABC, abstractmethod

Expand Down Expand Up @@ -95,8 +101,8 @@ def get_affine_mat(opt, size):
affine_inv = np.linalg.inv(affine)
return affine, affine_inv, flip

def apply_img_affine(img, affine_inv, method=Image.BICUBIC):
return img.transform(img.size, Image.AFFINE, data=affine_inv.flatten()[:6], resample=Image.BICUBIC)
def apply_img_affine(img, affine_inv, method=RESAMPLING_METHOD):
return img.transform(img.size, Image.AFFINE, data=affine_inv.flatten()[:6], resample=RESAMPLING_METHOD)

def apply_lm_affine(landmark, affine, flip, size):
_, h = size
Expand Down
13 changes: 10 additions & 3 deletions util/preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,14 @@

import numpy as np
from scipy.io import loadmat
from PIL import Image

try:
from PIL.Image import Resampling
RESAMPLING_METHOD = Resampling.BICUBIC
except ImportError:
from PIL.Image import BICUBIC
RESAMPLING_METHOD = BICUBIC

import cv2
import os
from skimage import transform as trans
Expand Down Expand Up @@ -142,11 +149,11 @@ def resize_n_crop_img(img, lm, t, s, target_size=224., mask=None):
up = (h/2 - target_size/2 + float((h0/2 - t[1])*s)).astype(np.int32)
below = up + target_size

img = img.resize((w, h), resample=Image.BICUBIC)
img = img.resize((w, h), resample=RESAMPLING_METHOD)
img = img.crop((left, up, right, below))

if mask is not None:
mask = mask.resize((w, h), resample=Image.BICUBIC)
mask = mask.resize((w, h), resample=RESAMPLING_METHOD)
mask = mask.crop((left, up, right, below))

lm = np.stack([lm[:, 0] - t[0] + w0/2, lm[:, 1] -
Expand Down
14 changes: 10 additions & 4 deletions util/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,12 @@
import numpy as np
import torch
from PIL import Image
try:
from PIL.Image import Resampling
RESAMPLING_METHOD = Resampling.BICUBIC
except ImportError:
from PIL.Image import BICUBIC
RESAMPLING_METHOD = BICUBIC
import os
import importlib
import argparse
Expand Down Expand Up @@ -107,9 +113,9 @@ def save_image(image_numpy, image_path, aspect_ratio=1.0):
if aspect_ratio is None:
pass
elif aspect_ratio > 1.0:
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
image_pil = image_pil.resize((h, int(w * aspect_ratio)), RESAMPLING_METHOD)
elif aspect_ratio < 1.0:
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
image_pil = image_pil.resize((int(h / aspect_ratio), w), RESAMPLING_METHOD)
image_pil.save(image_path)


Expand Down Expand Up @@ -166,13 +172,13 @@ def correct_resize_label(t, size):
return torch.stack(resized, dim=0).to(device)


def correct_resize(t, size, mode=Image.BICUBIC):
def correct_resize(t, size, mode=RESAMPLING_METHOD):
device = t.device
t = t.detach().cpu()
resized = []
for i in range(t.size(0)):
one_t = t[i:i + 1]
one_image = Image.fromarray(tensor2im(one_t)).resize(size, Image.BICUBIC)
one_image = Image.fromarray(tensor2im(one_t)).resize(size, RESAMPLING_METHOD)
resized_t = torchvision.transforms.functional.to_tensor(one_image) * 2 - 1.0
resized.append(resized_t)
return torch.stack(resized, dim=0).to(device)
Expand Down