File tree Expand file tree Collapse file tree 3 files changed +33
-0
lines changed
src/anomalib/models/image Expand file tree Collapse file tree 3 files changed +33
-0
lines changed Original file line number Diff line number Diff line change 1212import torch
1313from lightning .pytorch .utilities .types import STEP_OUTPUT
1414from torch import nn
15+ from torchvision .transforms .v2 import Compose , Resize , Transform
1516
1617from anomalib import LearningType
1718from anomalib .data .utils import Augmenter
@@ -150,3 +151,13 @@ def learning_type(self) -> LearningType:
150151 LearningType: Learning type of the model.
151152 """
152153 return LearningType .ONE_CLASS
154+
155+ @staticmethod
156+ def configure_transforms (image_size : tuple [int , int ] | None = None ) -> Transform :
157+ """Default transform for DRAEM. Normalization is not needed as the images are scaled to [0, 1] in Dataset."""
158+ image_size = image_size or (256 , 256 )
159+ return Compose (
160+ [
161+ Resize (image_size , antialias = True ),
162+ ],
163+ )
Original file line number Diff line number Diff line change 1313import torch
1414from lightning .pytorch .utilities .types import STEP_OUTPUT , OptimizerLRScheduler
1515from torch import Tensor
16+ from torchvision .transforms .v2 import Compose , Resize , Transform
1617
1718from anomalib import LearningType
1819from anomalib .data .utils import DownloadInfo , download_and_extract
@@ -191,3 +192,13 @@ def learning_type(self) -> LearningType:
191192 LearningType: Learning type of the model.
192193 """
193194 return LearningType .ONE_CLASS
195+
196+ @staticmethod
197+ def configure_transforms (image_size : tuple [int , int ] | None = None ) -> Transform :
198+ """Default transform for DSR. Normalization is not needed as the images are scaled to [0, 1] in Dataset."""
199+ image_size = image_size or (256 , 256 )
200+ return Compose (
201+ [
202+ Resize (image_size , antialias = True ),
203+ ],
204+ )
Original file line number Diff line number Diff line change 1111
1212import torch
1313from lightning .pytorch .utilities .types import STEP_OUTPUT
14+ from torchvision .transforms .v2 import Compose , Resize , Transform
1415
1516from anomalib import LearningType
1617from anomalib .models .components import AnomalyModule , MemoryBankMixin
@@ -143,3 +144,13 @@ def learning_type(self) -> LearningType:
143144 LearningType: Learning type of the model.
144145 """
145146 return LearningType .ONE_CLASS
147+
148+ @staticmethod
149+ def configure_transforms (image_size : tuple [int , int ] | None = None ) -> Transform :
150+ """Default transform for RKDE."""
151+ image_size = image_size or (240 , 360 )
152+ return Compose (
153+ [
154+ Resize (image_size , antialias = True ),
155+ ],
156+ )
You can’t perform that action at this time.
0 commit comments