Skip to content

Commit bbb9016

Browse files
authored
Merge branch 'main' into main
2 parents 8d89128 + bb83e5b commit bbb9016

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+1253
-1800
lines changed

.circleci/config.yml

Lines changed: 1 addition & 593 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

.circleci/config.yml.in

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -848,7 +848,7 @@ jobs:
848848
executor:
849849
name: windows-gpu
850850
environment:
851-
CUDA_VERSION: "11.1"
851+
CUDA_VERSION: "11.3"
852852
PYTHON_VERSION: << parameters.python_version >>
853853
steps:
854854
- checkout

.circleci/regenerate.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,8 @@ def build_workflows(prefix="", filter_branch=None, upload=False, indentation=6,
3232
for os_type in ["linux", "macos", "win"]:
3333
python_versions = PYTHON_VERSIONS
3434
cu_versions_dict = {
35-
"linux": ["cpu", "cu102", "cu111", "cu113", "cu115", "rocm4.3.1", "rocm4.5.2"],
36-
"win": ["cpu", "cu111", "cu113", "cu115"],
35+
"linux": ["cpu", "cu102", "cu113", "cu115", "rocm4.3.1", "rocm4.5.2"],
36+
"win": ["cpu", "cu113", "cu115"],
3737
"macos": ["cpu"],
3838
}
3939
cu_versions = cu_versions_dict[os_type]

.circleci/unittest/windows/scripts/environment.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,10 +9,11 @@ dependencies:
99
- libpng
1010
- jpeg
1111
- ca-certificates
12-
- h5py
12+
- hdf5
1313
- pip:
1414
- future
1515
- pillow >=5.3.0, !=8.3.*
1616
- scipy
1717
- av
1818
- dataclasses
19+
- h5py

.circleci/unittest/windows/scripts/install.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ else
2828
fi
2929

3030
printf "Installing PyTorch with %s\n" "${cudatoolkit}"
31-
conda install -y -c "pytorch-${UPLOAD_CHANNEL}" -c conda-forge "pytorch-${UPLOAD_CHANNEL}"::pytorch[build="*${version}*"] "${cudatoolkit}" pytest
31+
conda install -y -c "pytorch-${UPLOAD_CHANNEL}" -c nvidia "pytorch-${UPLOAD_CHANNEL}"::pytorch[build="*${version}*"] "${cudatoolkit}" pytest
3232

3333
torch_cuda=$(python -c "import torch; print(torch.cuda.is_available())")
3434
echo torch.cuda.is_available is $torch_cuda

docs/source/ops.rst

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,8 @@ Operators
4747
FeaturePyramidNetwork
4848
StochasticDepth
4949
FrozenBatchNorm2d
50-
ConvNormActivation
50+
Conv2dNormActivation
51+
Conv3dNormActivation
5152
SqueezeExcitation
5253
DropBlock2d
5354
DropBlock3d

references/classification/utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -274,6 +274,7 @@ def init_distributed_mode(args):
274274
torch.distributed.init_process_group(
275275
backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank
276276
)
277+
torch.distributed.barrier()
277278
setup_for_distributed(args.rank == 0)
278279

279280

references/optical_flow/utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -267,6 +267,7 @@ def setup_ddp(args):
267267
world_size=args.world_size,
268268
init_method=args.dist_url,
269269
)
270+
torch.distributed.barrier()
270271

271272

272273
def reduce_across_processes(val):

references/segmentation/utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -291,4 +291,5 @@ def init_distributed_mode(args):
291291
torch.distributed.init_process_group(
292292
backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank
293293
)
294+
torch.distributed.barrier()
294295
setup_for_distributed(args.rank == 0)

references/video_classification/utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -250,4 +250,5 @@ def init_distributed_mode(args):
250250
torch.distributed.init_process_group(
251251
backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank
252252
)
253+
torch.distributed.barrier()
253254
setup_for_distributed(args.rank == 0)

scripts/release_notes/classify_prs.py

Lines changed: 19 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,18 @@
11
# In[1]:
22

3-
3+
# imports and set configuration
44
import pandas as pd
5+
from retrieve_prs_data import run
56

7+
exclude_prototype = True
8+
data_filename = "10.0_to_11.0-rc2.json"
9+
previous_release = "v10.0"
10+
current_release = "v11.0-rc2"
611

712
# In[2]:
813

914

10-
df = pd.read_json("10.0_to_11.0-rc2.json").T
15+
df = pd.read_json(data_filename).T
1116
df.tail()
1217

1318

@@ -76,6 +81,8 @@
7681
def format_prs(mod_df):
7782
out = []
7883
for idx, row in mod_df.iterrows():
84+
if exclude_prototype and row["prototype"]:
85+
continue
7986
modules = idx
8087
# Put "documentation" and "tests" first for sorting to be dece
8188
for last_module in ("documentation", "tests"):
@@ -119,3 +126,13 @@ def format_prs(mod_df):
119126
# Missing PRs are these ones... classify them manually
120127
missing_prs = pd.concat([mod_df, included_prs]).drop_duplicates(subset="pr_number", keep=False)
121128
print(format_prs(missing_prs))
129+
130+
# In[12]:
131+
132+
# Generate list of contributors
133+
print()
134+
print("## Contributors")
135+
136+
command_to_run = f"{{ git shortlog -s {previous_release}..{current_release} | cut -f2- & git log -s {previous_release}..{current_release} | grep Co-authored | cut -f2- -d: | cut -f1 -d\\< | sed 's/^ *//;s/ *$//' ; }} | sort --ignore-case | uniq | tr '\\n' ';' | sed 's/;/, /g;s/, $//' | fold -s"
137+
rc, output, err = run(command_to_run)
138+
print(output)

test/test_prototype_transforms.py

Lines changed: 15 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,8 @@
11
import itertools
22

3-
import PIL.Image
43
import pytest
54
import torch
6-
from test_prototype_transforms_kernels import make_images, make_bounding_boxes, make_one_hot_labels
5+
from test_prototype_transforms_functional import make_images, make_bounding_boxes, make_one_hot_labels
76
from torchvision.prototype import transforms, features
87
from torchvision.transforms.functional import to_pil_image
98

@@ -25,15 +24,6 @@ def make_vanilla_tensor_bounding_boxes(*args, **kwargs):
2524
yield bounding_box.data
2625

2726

28-
INPUT_CREATIONS_FNS = {
29-
features.Image: make_images,
30-
features.BoundingBox: make_bounding_boxes,
31-
features.OneHotLabel: make_one_hot_labels,
32-
torch.Tensor: make_vanilla_tensor_images,
33-
PIL.Image.Image: make_pil_images,
34-
}
35-
36-
3727
def parametrize(transforms_with_inputs):
3828
return pytest.mark.parametrize(
3929
("transform", "input"),
@@ -52,15 +42,21 @@ def parametrize(transforms_with_inputs):
5242
def parametrize_from_transforms(*transforms):
5343
transforms_with_inputs = []
5444
for transform in transforms:
55-
dispatcher = transform._DISPATCHER
56-
if dispatcher is None:
57-
continue
58-
59-
for type_ in dispatcher._kernels:
45+
for creation_fn in [
46+
make_images,
47+
make_bounding_boxes,
48+
make_one_hot_labels,
49+
make_vanilla_tensor_images,
50+
make_pil_images,
51+
]:
52+
inputs = list(creation_fn())
6053
try:
61-
inputs = INPUT_CREATIONS_FNS[type_]()
62-
except KeyError:
54+
output = transform(inputs[0])
55+
except Exception:
6356
continue
57+
else:
58+
if output is inputs[0]:
59+
continue
6460

6561
transforms_with_inputs.append((transform, inputs))
6662

@@ -69,7 +65,7 @@ def parametrize_from_transforms(*transforms):
6965

7066
class TestSmoke:
7167
@parametrize_from_transforms(
72-
transforms.RandomErasing(),
68+
transforms.RandomErasing(p=1.0),
7369
transforms.HorizontalFlip(),
7470
transforms.Resize([16, 16]),
7571
transforms.CenterCrop([16, 16]),
@@ -141,35 +137,6 @@ def test_auto_augment(self, transform, input):
141137
def test_normalize(self, transform, input):
142138
transform(input)
143139

144-
@parametrize(
145-
[
146-
(
147-
transforms.ConvertColorSpace("grayscale"),
148-
itertools.chain(
149-
make_images(),
150-
make_vanilla_tensor_images(color_spaces=["rgb"]),
151-
make_pil_images(color_spaces=["rgb"]),
152-
),
153-
)
154-
]
155-
)
156-
def test_convert_bounding_color_space(self, transform, input):
157-
transform(input)
158-
159-
@parametrize(
160-
[
161-
(
162-
transforms.ConvertBoundingBoxFormat("xyxy", old_format="xywh"),
163-
itertools.chain(
164-
make_bounding_boxes(),
165-
make_vanilla_tensor_bounding_boxes(formats=["xywh"]),
166-
),
167-
)
168-
]
169-
)
170-
def test_convert_bounding_box_format(self, transform, input):
171-
transform(input)
172-
173140
@parametrize(
174141
[
175142
(

test/test_prototype_transforms_kernels.py renamed to test/test_prototype_transforms_functional.py

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
import pytest
55
import torch.testing
6-
import torchvision.prototype.transforms.kernels as K
6+
import torchvision.prototype.transforms.functional as F
77
from torch import jit
88
from torch.nn.functional import one_hot
99
from torchvision.prototype import features
@@ -134,10 +134,10 @@ def __init__(self, *args, **kwargs):
134134
self.kwargs = kwargs
135135

136136

137-
class KernelInfo:
137+
class FunctionalInfo:
138138
def __init__(self, name, *, sample_inputs_fn):
139139
self.name = name
140-
self.kernel = getattr(K, name)
140+
self.functional = getattr(F, name)
141141
self._sample_inputs_fn = sample_inputs_fn
142142

143143
def sample_inputs(self):
@@ -146,21 +146,21 @@ def sample_inputs(self):
146146
def __call__(self, *args, **kwargs):
147147
if len(args) == 1 and not kwargs and isinstance(args[0], SampleInput):
148148
sample_input = args[0]
149-
return self.kernel(*sample_input.args, **sample_input.kwargs)
149+
return self.functional(*sample_input.args, **sample_input.kwargs)
150150

151-
return self.kernel(*args, **kwargs)
151+
return self.functional(*args, **kwargs)
152152

153153

154-
KERNEL_INFOS = []
154+
FUNCTIONAL_INFOS = []
155155

156156

157157
def register_kernel_info_from_sample_inputs_fn(sample_inputs_fn):
158-
KERNEL_INFOS.append(KernelInfo(sample_inputs_fn.__name__, sample_inputs_fn=sample_inputs_fn))
158+
FUNCTIONAL_INFOS.append(FunctionalInfo(sample_inputs_fn.__name__, sample_inputs_fn=sample_inputs_fn))
159159
return sample_inputs_fn
160160

161161

162162
@register_kernel_info_from_sample_inputs_fn
163-
def horizontal_flip_image():
163+
def horizontal_flip_image_tensor():
164164
for image in make_images():
165165
yield SampleInput(image)
166166

@@ -172,12 +172,12 @@ def horizontal_flip_bounding_box():
172172

173173

174174
@register_kernel_info_from_sample_inputs_fn
175-
def resize_image():
175+
def resize_image_tensor():
176176
for image, interpolation in itertools.product(
177177
make_images(),
178178
[
179-
K.InterpolationMode.BILINEAR,
180-
K.InterpolationMode.NEAREST,
179+
F.InterpolationMode.BILINEAR,
180+
F.InterpolationMode.NEAREST,
181181
],
182182
):
183183
height, width = image.shape[-2:]
@@ -200,20 +200,20 @@ def resize_bounding_box():
200200

201201

202202
class TestKernelsCommon:
203-
@pytest.mark.parametrize("kernel_info", KERNEL_INFOS, ids=lambda kernel_info: kernel_info.name)
204-
def test_scriptable(self, kernel_info):
205-
jit.script(kernel_info.kernel)
203+
@pytest.mark.parametrize("functional_info", FUNCTIONAL_INFOS, ids=lambda functional_info: functional_info.name)
204+
def test_scriptable(self, functional_info):
205+
jit.script(functional_info.functional)
206206

207207
@pytest.mark.parametrize(
208-
("kernel_info", "sample_input"),
208+
("functional_info", "sample_input"),
209209
[
210-
pytest.param(kernel_info, sample_input, id=f"{kernel_info.name}-{idx}")
211-
for kernel_info in KERNEL_INFOS
212-
for idx, sample_input in enumerate(kernel_info.sample_inputs())
210+
pytest.param(functional_info, sample_input, id=f"{functional_info.name}-{idx}")
211+
for functional_info in FUNCTIONAL_INFOS
212+
for idx, sample_input in enumerate(functional_info.sample_inputs())
213213
],
214214
)
215-
def test_eager_vs_scripted(self, kernel_info, sample_input):
216-
eager = kernel_info(sample_input)
217-
scripted = jit.script(kernel_info.kernel)(*sample_input.args, **sample_input.kwargs)
215+
def test_eager_vs_scripted(self, functional_info, sample_input):
216+
eager = functional_info(sample_input)
217+
scripted = jit.script(functional_info.functional)(*sample_input.args, **sample_input.kwargs)
218218

219219
torch.testing.assert_close(eager, scripted)

test/test_transforms_tensor.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import os
2+
import sys
23

34
import numpy as np
45
import pytest
@@ -958,6 +959,17 @@ def test_random_apply(device):
958959
)
959960
@pytest.mark.parametrize("channels", [1, 3])
960961
def test_gaussian_blur(device, channels, meth_kwargs):
962+
if all(
963+
[
964+
device == "cuda",
965+
channels == 1,
966+
meth_kwargs["kernel_size"] in [23, [23]],
967+
torch.version.cuda == "11.3",
968+
sys.platform in ("win32", "cygwin"),
969+
]
970+
):
971+
pytest.skip("Fails on Windows, see https://github.com/pytorch/vision/issues/5464")
972+
961973
tol = 1.0 + 1e-10
962974
torch.manual_seed(12)
963975
_test_class_op(

torchvision/csrc/io/image/cuda/decode_jpeg_cuda.cpp

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,27 @@ torch::Tensor decode_jpeg_cuda(
4747

4848
TORCH_CHECK(device.is_cuda(), "Expected a cuda device")
4949

50+
int major_version;
51+
int minor_version;
52+
nvjpegStatus_t get_major_property_status =
53+
nvjpegGetProperty(MAJOR_VERSION, &major_version);
54+
nvjpegStatus_t get_minor_property_status =
55+
nvjpegGetProperty(MINOR_VERSION, &minor_version);
56+
57+
TORCH_CHECK(
58+
get_major_property_status == NVJPEG_STATUS_SUCCESS,
59+
"nvjpegGetProperty failed: ",
60+
get_major_property_status);
61+
TORCH_CHECK(
62+
get_minor_property_status == NVJPEG_STATUS_SUCCESS,
63+
"nvjpegGetProperty failed: ",
64+
get_minor_property_status);
65+
if ((major_version < 11) || ((major_version == 11) && (minor_version < 6))) {
66+
TORCH_WARN_ONCE(
67+
"There is a memory leak issue in the nvjpeg library for CUDA versions < 11.6. "
68+
"Make sure to rely on CUDA 11.6 or above before using decode_jpeg(..., device='cuda').");
69+
}
70+
5071
at::cuda::CUDAGuard device_guard(device);
5172

5273
// Create global nvJPEG handle

torchvision/io/image.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,10 @@ def decode_jpeg(
145145
with `nvjpeg <https://developer.nvidia.com/nvjpeg>`_. This is only
146146
supported for CUDA version >= 10.1
147147
148+
.. warning::
149+
There is a memory leak in the nvjpeg library for CUDA versions < 11.6.
150+
Make sure to rely on CUDA 11.6 or above before using ``device="cuda"``.
151+
148152
Returns:
149153
output (Tensor[image_channels, image_height, image_width])
150154
"""

0 commit comments

Comments
 (0)