Skip to content

Commit be6764d

Browse files
authored
Merge branch 'pytorch:main' into replace
2 parents 110b8a9 + 39772ec commit be6764d

File tree

81 files changed

+1652
-375
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

81 files changed

+1652
-375
lines changed

.circleci/config.yml

Lines changed: 26 additions & 10 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

.circleci/config.yml.in

Lines changed: 26 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -174,6 +174,26 @@ commands:
174174
- store_test_results:
175175
path: test-results
176176

177+
download_model_weights:
178+
parameters:
179+
extract_roots:
180+
type: string
181+
default: "torchvision/models"
182+
background:
183+
type: boolean
184+
default: true
185+
steps:
186+
- apt_install:
187+
args: parallel wget
188+
descr: Install download utilitites
189+
- run:
190+
name: Download model weights
191+
background: << parameters.background >>
192+
command: |
193+
mkdir -p ~/.cache/torch/hub/checkpoints
194+
python scripts/collect_model_urls.py << parameters.extract_roots >> \
195+
| parallel -j0 'wget --no-verbose -O ~/.cache/torch/hub/checkpoints/`basename {}` {}\?source=ci'
196+
177197
binary_common: &binary_common
178198
parameters:
179199
# Edit these defaults to do a release
@@ -340,14 +360,8 @@ jobs:
340360
resource_class: xlarge
341361
steps:
342362
- checkout
343-
- run:
344-
name: Download model weights
345-
background: true
346-
command: |
347-
sudo apt update -qy && sudo apt install -qy parallel wget
348-
mkdir -p ~/.cache/torch/hub/checkpoints
349-
python scripts/collect_model_urls.py torchvision/prototype/models \
350-
| parallel -j0 'wget --no-verbose -O ~/.cache/torch/hub/checkpoints/`basename {}` {}\?source=ci'
363+
- download_model_weights:
364+
extract_roots: torchvision/prototype/models
351365
- install_torchvision
352366
- install_prototype_dependencies
353367
- pip_install:
@@ -1011,12 +1025,13 @@ jobs:
10111025
build_docs:
10121026
<<: *binary_common
10131027
docker:
1014-
- image: "pytorch/manylinux-cuda100"
1028+
- image: circleci/python:3.7
10151029
resource_class: 2xlarge+
10161030
steps:
10171031
- attach_workspace:
10181032
at: ~/workspace
10191033
- checkout
1034+
- download_model_weights
10201035
- run:
10211036
name: Setup
10221037
command: .circleci/unittest/linux/scripts/setup_env.sh
@@ -1069,7 +1084,8 @@ jobs:
10691084
# Don't use "checkout" step since it uses ssh, which cannot git push
10701085
# https://circleci.com/docs/2.0/configuration-reference/#checkout
10711086
set -ex
1072-
tag=${CIRCLE_TAG:1:5}
1087+
# Change v1.12.1rc1 into 1.12 (only major.minor)
1088+
tag=$(echo $CIRCLE_TAG | sed -e 's/v*\([0-9]*\.[0-9]*\).*/\1/')
10731089
target=${tag:-main}
10741090
~/workspace/.circleci/build_docs/commit_docs.sh ~/workspace $target
10751091

.circleci/unittest/linux/scripts/run_test.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,5 @@ set -e
55
eval "$(./conda/bin/conda shell.bash hook)"
66
conda activate ./env
77

8-
export PYTORCH_TEST_WITH_SLOW='1'
98
python -m torch.utils.collect_env
109
pytest --junitxml=test-results/junit.xml -v --durations 20

.circleci/unittest/windows/scripts/run_test.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,5 @@ conda activate ./env
88
this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
99
source "$this_dir/set_cuda_envs.sh"
1010

11-
export PYTORCH_TEST_WITH_SLOW='1'
1211
python -m torch.utils.collect_env
1312
pytest --junitxml=test-results/junit.xml -v --durations 20

README.rst

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,9 @@ supported Python versions.
2121
+--------------------------+--------------------------+---------------------------------+
2222
| ``torch`` | ``torchvision`` | ``python`` |
2323
+==========================+==========================+=================================+
24-
| ``main`` / ``nightly`` | ``main`` / ``nightly`` | ``>=3.7``, ``<=3.9`` |
24+
| ``main`` / ``nightly`` | ``main`` / ``nightly`` | ``>=3.7``, ``<=3.10`` |
25+
+--------------------------+--------------------------+---------------------------------+
26+
| ``1.11.0`` | ``0.12.0`` | ``>=3.7``, ``<=3.10`` |
2527
+--------------------------+--------------------------+---------------------------------+
2628
| ``1.10.2`` | ``0.11.3`` | ``>=3.6``, ``<=3.9`` |
2729
+--------------------------+--------------------------+---------------------------------+

docs/source/conf.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,8 @@
2020
# import sys
2121
# sys.path.insert(0, os.path.abspath('.'))
2222

23+
import os
24+
2325
import pytorch_sphinx_theme
2426
import torchvision
2527

@@ -80,11 +82,16 @@
8082
# built documents.
8183
#
8284
# The short X.Y version.
83-
# TODO: change to [:2] at v1.0
8485
version = "main (" + torchvision.__version__ + " )"
8586
# The full version, including alpha/beta/rc tags.
86-
# TODO: verify this works as expected
8787
release = "main"
88+
VERSION = os.environ.get("VERSION", None)
89+
if VERSION:
90+
# Turn 1.11.0aHASH into 1.11 (major.minor only)
91+
version = ".".join(version.split(".")[:2])
92+
html_title = " ".join((project, version, "documentation"))
93+
release = version
94+
8895

8996
# The language for content autogenerated by Sphinx. Refer to documentation
9097
# for a list of supported languages.

gallery/plot_visualization_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
def show(imgs):
2323
if not isinstance(imgs, list):
2424
imgs = [imgs]
25-
fix, axs = plt.subplots(ncols=len(imgs), squeeze=False)
25+
fig, axs = plt.subplots(ncols=len(imgs), squeeze=False)
2626
for i, img in enumerate(imgs):
2727
img = img.detach()
2828
img = F.to_pil_image(img)

ios/LibTorchvision.podspec

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
1-
pytorch_version = '1.10.0'
1+
pytorch_version = '1.11.0'
22

33
Pod::Spec.new do |s|
44
s.name = 'LibTorchvision'
5-
s.version = '0.11.1'
5+
s.version = '0.12.0'
66
s.authors = 'PyTorch Team'
77
s.license = { :type => 'BSD' }
88
s.homepage = 'https://github.com/pytorch/vision'

references/classification/transforms.py

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,14 @@ class RandomMixup(torch.nn.Module):
2121

2222
def __init__(self, num_classes: int, p: float = 0.5, alpha: float = 1.0, inplace: bool = False) -> None:
2323
super().__init__()
24-
assert num_classes > 0, "Please provide a valid positive value for the num_classes."
25-
assert alpha > 0, "Alpha param can't be zero."
24+
25+
if num_classes < 1:
26+
raise ValueError(
27+
f"Please provide a valid positive value for the num_classes. Got num_classes={num_classes}"
28+
)
29+
30+
if alpha <= 0:
31+
raise ValueError("Alpha param can't be zero.")
2632

2733
self.num_classes = num_classes
2834
self.p = p
@@ -99,8 +105,10 @@ class RandomCutmix(torch.nn.Module):
99105

100106
def __init__(self, num_classes: int, p: float = 0.5, alpha: float = 1.0, inplace: bool = False) -> None:
101107
super().__init__()
102-
assert num_classes > 0, "Please provide a valid positive value for the num_classes."
103-
assert alpha > 0, "Alpha param can't be zero."
108+
if num_classes < 1:
109+
raise ValueError("Please provide a valid positive value for the num_classes.")
110+
if alpha <= 0:
111+
raise ValueError("Alpha param can't be zero.")
104112

105113
self.num_classes = num_classes
106114
self.p = p

references/detection/coco_eval.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,8 @@
1212

1313
class CocoEvaluator:
1414
def __init__(self, coco_gt, iou_types):
15-
assert isinstance(iou_types, (list, tuple))
15+
if not isinstance(iou_types, (list, tuple)):
16+
raise TypeError(f"This constructor expects iou_types of type list or tuple, instead got {type(iou_types)}")
1617
coco_gt = copy.deepcopy(coco_gt)
1718
self.coco_gt = coco_gt
1819

references/detection/coco_utils.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,10 @@ def _has_valid_annotation(anno):
126126
return True
127127
return False
128128

129-
assert isinstance(dataset, torchvision.datasets.CocoDetection)
129+
if not isinstance(dataset, torchvision.datasets.CocoDetection):
130+
raise TypeError(
131+
f"This function expects dataset of type torchvision.datasets.CocoDetection, instead got {type(dataset)}"
132+
)
130133
ids = []
131134
for ds_idx, img_id in enumerate(dataset.ids):
132135
ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None)

references/detection/transforms.py

Lines changed: 112 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from typing import List, Tuple, Dict, Optional
1+
from typing import List, Tuple, Dict, Optional, Union
22

33
import torch
44
import torchvision
@@ -326,3 +326,114 @@ def forward(
326326
)
327327

328328
return image, target
329+
330+
331+
class FixedSizeCrop(nn.Module):
332+
def __init__(self, size, fill=0, padding_mode="constant"):
333+
super().__init__()
334+
size = tuple(T._setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
335+
self.crop_height = size[0]
336+
self.crop_width = size[1]
337+
self.fill = fill # TODO: Fill is currently respected only on PIL. Apply tensor patch.
338+
self.padding_mode = padding_mode
339+
340+
def _pad(self, img, target, padding):
341+
# Taken from the functional_tensor.py pad
342+
if isinstance(padding, int):
343+
pad_left = pad_right = pad_top = pad_bottom = padding
344+
elif len(padding) == 1:
345+
pad_left = pad_right = pad_top = pad_bottom = padding[0]
346+
elif len(padding) == 2:
347+
pad_left = pad_right = padding[0]
348+
pad_top = pad_bottom = padding[1]
349+
else:
350+
pad_left = padding[0]
351+
pad_top = padding[1]
352+
pad_right = padding[2]
353+
pad_bottom = padding[3]
354+
355+
padding = [pad_left, pad_top, pad_right, pad_bottom]
356+
img = F.pad(img, padding, self.fill, self.padding_mode)
357+
if target is not None:
358+
target["boxes"][:, 0::2] += pad_left
359+
target["boxes"][:, 1::2] += pad_top
360+
if "masks" in target:
361+
target["masks"] = F.pad(target["masks"], padding, 0, "constant")
362+
363+
return img, target
364+
365+
def _crop(self, img, target, top, left, height, width):
366+
img = F.crop(img, top, left, height, width)
367+
if target is not None:
368+
boxes = target["boxes"]
369+
boxes[:, 0::2] -= left
370+
boxes[:, 1::2] -= top
371+
boxes[:, 0::2].clamp_(min=0, max=width)
372+
boxes[:, 1::2].clamp_(min=0, max=height)
373+
374+
is_valid = (boxes[:, 0] < boxes[:, 2]) & (boxes[:, 1] < boxes[:, 3])
375+
376+
target["boxes"] = boxes[is_valid]
377+
target["labels"] = target["labels"][is_valid]
378+
if "masks" in target:
379+
target["masks"] = F.crop(target["masks"][is_valid], top, left, height, width)
380+
381+
return img, target
382+
383+
def forward(self, img, target=None):
384+
_, height, width = F.get_dimensions(img)
385+
new_height = min(height, self.crop_height)
386+
new_width = min(width, self.crop_width)
387+
388+
if new_height != height or new_width != width:
389+
offset_height = max(height - self.crop_height, 0)
390+
offset_width = max(width - self.crop_width, 0)
391+
392+
r = torch.rand(1)
393+
top = int(offset_height * r)
394+
left = int(offset_width * r)
395+
396+
img, target = self._crop(img, target, top, left, new_height, new_width)
397+
398+
pad_bottom = max(self.crop_height - new_height, 0)
399+
pad_right = max(self.crop_width - new_width, 0)
400+
if pad_bottom != 0 or pad_right != 0:
401+
img, target = self._pad(img, target, [0, 0, pad_right, pad_bottom])
402+
403+
return img, target
404+
405+
406+
class RandomShortestSize(nn.Module):
407+
def __init__(
408+
self,
409+
min_size: Union[List[int], Tuple[int], int],
410+
max_size: int,
411+
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
412+
):
413+
super().__init__()
414+
self.min_size = [min_size] if isinstance(min_size, int) else list(min_size)
415+
self.max_size = max_size
416+
self.interpolation = interpolation
417+
418+
def forward(
419+
self, image: Tensor, target: Optional[Dict[str, Tensor]] = None
420+
) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
421+
_, orig_height, orig_width = F.get_dimensions(image)
422+
423+
min_size = self.min_size[torch.randint(len(self.min_size), (1,)).item()]
424+
r = min(min_size / min(orig_height, orig_width), self.max_size / max(orig_height, orig_width))
425+
426+
new_width = int(orig_width * r)
427+
new_height = int(orig_height * r)
428+
429+
image = F.resize(image, [new_height, new_width], interpolation=self.interpolation)
430+
431+
if target is not None:
432+
target["boxes"][:, 0::2] *= new_width / orig_width
433+
target["boxes"][:, 1::2] *= new_height / orig_height
434+
if "masks" in target:
435+
target["masks"] = F.resize(
436+
target["masks"], [new_height, new_width], interpolation=InterpolationMode.NEAREST
437+
)
438+
439+
return image, target

0 commit comments

Comments
 (0)