Skip to content

Commit 309483c

Browse files
datumboxfacebook-github-bot
authored andcommitted
[fbsync] Adding resnext101 64x4d model (#5935)
Summary: * Add resnext101_64x4d model definition * Add test for resnext101_64x4d * Add resnext101_64x4d weight * Update checkpoint to use EMA weigth * Add quantization model signature for resnext101_64x4d * Fix class name and update accuracy using 1 gpu and batch_size=1 * Apply ufmt * Update the quantized weight and accuracy that we still keep the training log * Add quantized expect file * Update docs and fix acc1 * Add recipe for quantized to PR * Update models.rst Reviewed By: YosuaMichael Differential Revision: D36281598 fbshipit-source-id: 300bd36343b8ad8b185a246b794e078bdf67f5c8
1 parent 680a15b commit 309483c

File tree

6 files changed

+101
-0
lines changed

6 files changed

+101
-0
lines changed

docs/source/models.rst

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,8 @@ You can construct a model with random weights by calling its constructor:
6161
mobilenet_v3_large = models.mobilenet_v3_large()
6262
mobilenet_v3_small = models.mobilenet_v3_small()
6363
resnext50_32x4d = models.resnext50_32x4d()
64+
resnext101_32x8d = models.resnext101_32x8d()
65+
resnext101_64x4d = models.resnext101_64x4d()
6466
wide_resnet50_2 = models.wide_resnet50_2()
6567
mnasnet = models.mnasnet1_0()
6668
efficientnet_b0 = models.efficientnet_b0()
@@ -185,6 +187,7 @@ MobileNet V3 Large 74.042 91.340
185187
MobileNet V3 Small 67.668 87.402
186188
ResNeXt-50-32x4d 77.618 93.698
187189
ResNeXt-101-32x8d 79.312 94.526
190+
ResNeXt-101-64x4d 83.246 96.454
188191
Wide ResNet-50-2 78.468 94.086
189192
Wide ResNet-101-2 78.848 94.284
190193
MNASNet 1.0 73.456 91.510
@@ -366,6 +369,7 @@ ResNext
366369

367370
resnext50_32x4d
368371
resnext101_32x8d
372+
resnext101_64x4d
369373

370374
Wide ResNet
371375
-----------
@@ -481,8 +485,11 @@ a model with random weights by calling its constructor:
481485
resnet18 = models.quantization.resnet18()
482486
resnet50 = models.quantization.resnet50()
483487
resnext101_32x8d = models.quantization.resnext101_32x8d()
488+
resnext101_64x4d = models.quantization.resnext101_64x4d()
484489
shufflenet_v2_x0_5 = models.quantization.shufflenet_v2_x0_5()
485490
shufflenet_v2_x1_0 = models.quantization.shufflenet_v2_x1_0()
491+
shufflenet_v2_x1_5 = models.quantization.shufflenet_v2_x1_5()
492+
shufflenet_v2_x2_0 = models.quantization.shufflenet_v2_x2_0()
486493
487494
Obtaining a pre-trained quantized model can be done with a few lines of code:
488495

@@ -508,6 +515,7 @@ ShuffleNet V2 x2.0 75.354 92.488
508515
ResNet 18 69.494 88.882
509516
ResNet 50 75.920 92.814
510517
ResNext 101 32x8d 78.986 94.480
518+
ResNext 101 64x4d 82.898 96.326
511519
Inception V3 77.176 93.354
512520
GoogleNet 69.826 89.404
513521
================================ ============= =============
Binary file not shown.
Binary file not shown.

test/test_models.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -315,6 +315,7 @@ def _check_input_backprop(model, inputs):
315315
"convnext_base",
316316
"convnext_large",
317317
"resnext101_32x8d",
318+
"resnext101_64x4d",
318319
"wide_resnet101_2",
319320
"efficientnet_b6",
320321
"efficientnet_b7",

torchvision/models/quantization/resnet.py

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
ResNet18_Weights,
1212
ResNet50_Weights,
1313
ResNeXt101_32X8D_Weights,
14+
ResNeXt101_64X4D_Weights,
1415
)
1516

1617
from ...transforms._presets import ImageClassification
@@ -25,9 +26,11 @@
2526
"ResNet18_QuantizedWeights",
2627
"ResNet50_QuantizedWeights",
2728
"ResNeXt101_32X8D_QuantizedWeights",
29+
"ResNeXt101_64X4D_QuantizedWeights",
2830
"resnet18",
2931
"resnet50",
3032
"resnext101_32x8d",
33+
"resnext101_64x4d",
3134
]
3235

3336

@@ -231,6 +234,24 @@ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum):
231234
DEFAULT = IMAGENET1K_FBGEMM_V2
232235

233236

237+
class ResNeXt101_64X4D_QuantizedWeights(WeightsEnum):
238+
IMAGENET1K_FBGEMM_V1 = Weights(
239+
url="https://download.pytorch.org/models/quantized/resnext101_64x4d_fbgemm-605a1cb3.pth",
240+
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
241+
meta={
242+
**_COMMON_META,
243+
"num_params": 83455272,
244+
"recipe": "https://github.com/pytorch/vision/pull/5935",
245+
"unquantized": ResNeXt101_64X4D_Weights.IMAGENET1K_V1,
246+
"metrics": {
247+
"acc@1": 82.898,
248+
"acc@5": 96.326,
249+
},
250+
},
251+
)
252+
DEFAULT = IMAGENET1K_FBGEMM_V1
253+
254+
234255
@handle_legacy_interface(
235256
weights=(
236257
"pretrained",
@@ -318,3 +339,26 @@ def resnext101_32x8d(
318339
_ovewrite_named_param(kwargs, "groups", 32)
319340
_ovewrite_named_param(kwargs, "width_per_group", 8)
320341
return _resnet(QuantizableBottleneck, [3, 4, 23, 3], weights, progress, quantize, **kwargs)
342+
343+
344+
def resnext101_64x4d(
345+
*,
346+
weights: Optional[Union[ResNeXt101_64X4D_QuantizedWeights, ResNeXt101_64X4D_Weights]] = None,
347+
progress: bool = True,
348+
quantize: bool = False,
349+
**kwargs: Any,
350+
) -> QuantizableResNet:
351+
r"""ResNeXt-101 64x4d model from
352+
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
353+
354+
Args:
355+
weights (ResNeXt101_64X4D_QuantizedWeights or ResNeXt101_64X4D_Weights, optional): The pretrained
356+
weights for the model
357+
progress (bool): If True, displays a progress bar of the download to stderr
358+
quantize (bool): If True, return a quantized version of the model
359+
"""
360+
weights = (ResNeXt101_64X4D_QuantizedWeights if quantize else ResNeXt101_64X4D_Weights).verify(weights)
361+
362+
_ovewrite_named_param(kwargs, "groups", 64)
363+
_ovewrite_named_param(kwargs, "width_per_group", 4)
364+
return _resnet(QuantizableBottleneck, [3, 4, 23, 3], weights, progress, quantize, **kwargs)

torchvision/models/resnet.py

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
"ResNet152_Weights",
2222
"ResNeXt50_32X4D_Weights",
2323
"ResNeXt101_32X8D_Weights",
24+
"ResNeXt101_64X4D_Weights",
2425
"Wide_ResNet50_2_Weights",
2526
"Wide_ResNet101_2_Weights",
2627
"resnet18",
@@ -30,6 +31,7 @@
3031
"resnet152",
3132
"resnext50_32x4d",
3233
"resnext101_32x8d",
34+
"resnext101_64x4d",
3335
"wide_resnet50_2",
3436
"wide_resnet101_2",
3537
]
@@ -491,6 +493,24 @@ class ResNeXt101_32X8D_Weights(WeightsEnum):
491493
DEFAULT = IMAGENET1K_V2
492494

493495

496+
class ResNeXt101_64X4D_Weights(WeightsEnum):
497+
IMAGENET1K_V1 = Weights(
498+
url="https://download.pytorch.org/models/resnext101_64x4d-173b62eb.pth",
499+
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
500+
meta={
501+
**_COMMON_META,
502+
"num_params": 83455272,
503+
"recipe": "https://github.com/pytorch/vision/pull/5935",
504+
"metrics": {
505+
# Mock
506+
"acc@1": 83.246,
507+
"acc@5": 96.454,
508+
},
509+
},
510+
)
511+
DEFAULT = IMAGENET1K_V1
512+
513+
494514
class Wide_ResNet50_2_Weights(WeightsEnum):
495515
IMAGENET1K_V1 = Weights(
496516
url="https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth",
@@ -734,6 +754,34 @@ def resnext101_32x8d(
734754
return _resnet(Bottleneck, [3, 4, 23, 3], weights, progress, **kwargs)
735755

736756

757+
def resnext101_64x4d(
758+
*, weights: Optional[ResNeXt101_64X4D_Weights] = None, progress: bool = True, **kwargs: Any
759+
) -> ResNet:
760+
"""ResNeXt-101 64x4d model from
761+
`Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_.
762+
763+
Args:
764+
weights (:class:`~torchvision.models.ResNeXt101_64X4D_Weights`, optional): The
765+
pretrained weights to use. See
766+
:class:`~torchvision.models.ResNeXt101_64X4D_Weights` below for
767+
more details, and possible values. By default, no pre-trained
768+
weights are used.
769+
progress (bool, optional): If True, displays a progress bar of the
770+
download to stderr. Default is True.
771+
**kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
772+
base class. Please refer to the `source code
773+
<https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
774+
for more details about this class.
775+
.. autoclass:: torchvision.models.ResNeXt101_64X4D_Weights
776+
:members:
777+
"""
778+
weights = ResNeXt101_64X4D_Weights.verify(weights)
779+
780+
_ovewrite_named_param(kwargs, "groups", 64)
781+
_ovewrite_named_param(kwargs, "width_per_group", 4)
782+
return _resnet(Bottleneck, [3, 4, 23, 3], weights, progress, **kwargs)
783+
784+
737785
@handle_legacy_interface(weights=("pretrained", Wide_ResNet50_2_Weights.IMAGENET1K_V1))
738786
def wide_resnet50_2(
739787
*, weights: Optional[Wide_ResNet50_2_Weights] = None, progress: bool = True, **kwargs: Any

0 commit comments

Comments
 (0)