Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions src/anomalib/data/utils/path.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,15 +146,15 @@ def validate_path(
path: str | Path,
base_dir: str | Path | None = None,
should_exist: bool = True,
accepted_extensions: tuple[str, ...] | None = None,
extensions: tuple[str, ...] | None = None,
) -> Path:
"""Validate the path.

Args:
path (str | Path): Path to validate.
base_dir (str | Path): Base directory to restrict file access.
should_exist (bool): If True, do not raise an exception if the path does not exist.
accepted_extensions (tuple[str, ...] | None): Accepted extensions for the path. An exception is raised if the
extensions (tuple[str, ...] | None): Accepted extensions for the path. An exception is raised if the
path does not have one of the accepted extensions. If None, no check is performed. Defaults to None.

Returns:
Expand Down Expand Up @@ -221,8 +221,8 @@ def validate_path(
raise PermissionError(msg)

# Check if the path has one of the accepted extensions
if accepted_extensions is not None and path.suffix not in accepted_extensions:
msg = f"Path extension is not accepted. Accepted extensions: {accepted_extensions}. Path: {path}"
if extensions is not None and path.suffix not in extensions:
msg = f"Path extension is not accepted. Accepted extensions: {extensions}. Path: {path}"
raise ValueError(msg)

return path
Expand Down
6 changes: 3 additions & 3 deletions src/anomalib/metrics/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
import torchmetrics
from omegaconf import DictConfig, ListConfig

from . import per_image
from . import pimo
from .anomaly_score_distribution import AnomalyScoreDistribution
from .aupr import AUPR
from .aupro import AUPRO
Expand All @@ -20,7 +20,7 @@
from .f1_max import F1Max
from .f1_score import F1Score
from .min_max import MinMax
from .per_image import AUPIMO, PIMO
from .pimo import AUPIMO, PIMO
from .precision_recall_curve import BinaryPrecisionRecallCurve
from .pro import PRO
from .threshold import F1AdaptiveThreshold, ManualThreshold
Expand All @@ -37,7 +37,7 @@
"ManualThreshold",
"MinMax",
"PRO",
"per_image",
"pimo",
"PIMO",
"AUPIMO",
]
Expand Down
483 changes: 0 additions & 483 deletions src/anomalib/metrics/per_image/utils.py

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,6 @@

from .enums import StatsOutliersPolicy, StatsRepeatedPolicy, ThresholdMethod
from .pimo import AUPIMO, PIMO, AUPIMOResult, PIMOResult
from .utils import (
compare_models_pairwise_ttest_rel,
compare_models_pairwise_wilcoxon,
format_pairwise_tests_results,
per_image_scores_stats,
)

__all__ = [
# constants
Expand All @@ -27,9 +21,4 @@
# torchmetrics interfaces
"PIMO",
"AUPIMO",
# utils
"compare_models_pairwise_ttest_rel",
"compare_models_pairwise_wilcoxon",
"format_pairwise_tests_results",
"per_image_scores_stats",
]
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,12 @@
# SPDX-License-Identifier: Apache-2.0

import logging
from collections import OrderedDict
from typing import TYPE_CHECKING

import torch
from torch import Tensor

from .utils import images_classes_from_masks

if TYPE_CHECKING:
from .pimo import AUPIMOResult

logger = logging.getLogger(__name__)


Expand Down Expand Up @@ -139,10 +134,6 @@ def is_thresh_bounds(thresh_bounds: tuple[float, float]) -> None:


def is_anomaly_maps(anomaly_maps: Tensor) -> None:
if not isinstance(anomaly_maps, Tensor):
msg = f"Expected anomaly maps to be an Tensor, but got {type(anomaly_maps)}"
raise TypeError(msg)

if anomaly_maps.ndim != 3:
msg = f"Expected anomaly maps have 3 dimensions (N, H, W), but got {anomaly_maps.ndim} dimensions"
raise ValueError(msg)
Expand All @@ -156,10 +147,6 @@ def is_anomaly_maps(anomaly_maps: Tensor) -> None:


def is_masks(masks: Tensor) -> None:
if not isinstance(masks, Tensor):
msg = f"Expected masks to be an Tensor, but got {type(masks)}"
raise TypeError(msg)

if masks.ndim != 3:
msg = f"Expected masks have 3 dimensions (N, H, W), but got {masks.ndim} dimensions"
raise ValueError(msg)
Expand All @@ -185,10 +172,6 @@ def is_masks(masks: Tensor) -> None:


def is_binclf_curves(binclf_curves: Tensor, valid_threshs: Tensor | None) -> None:
if not isinstance(binclf_curves, Tensor):
msg = f"Expected binclf curves to be an Tensor, but got {type(binclf_curves)}"
raise TypeError(msg)

if binclf_curves.ndim != 4:
msg = f"Expected binclf curves to be 4D, but got {binclf_curves.ndim}D"
raise ValueError(msg)
Expand Down Expand Up @@ -229,10 +212,6 @@ def is_binclf_curves(binclf_curves: Tensor, valid_threshs: Tensor | None) -> Non


def is_images_classes(images_classes: Tensor) -> None:
if not isinstance(images_classes, Tensor):
msg = f"Expected image classes to be an Tensor, but got {type(images_classes)}."
raise TypeError(msg)

if images_classes.ndim != 1:
msg = f"Expected image classes to be 1D, but got {images_classes.ndim}D."
raise ValueError(msg)
Expand All @@ -258,10 +237,6 @@ def is_images_classes(images_classes: Tensor) -> None:


def is_rates(rates: Tensor, nan_allowed: bool) -> None:
if not isinstance(rates, Tensor):
msg = f"Expected rates to be an Tensor, but got {type(rates)}."
raise TypeError(msg)

if rates.ndim != 1:
msg = f"Expected rates to be 1D, but got {rates.ndim}D."
raise ValueError(msg)
Expand Down Expand Up @@ -307,10 +282,6 @@ def is_rate_curve(rate_curve: Tensor, nan_allowed: bool, decreasing: bool) -> No


def is_per_image_rate_curves(rate_curves: Tensor, nan_allowed: bool, decreasing: bool | None) -> None:
if not isinstance(rate_curves, Tensor):
msg = f"Expected per-image rate curves to be an Tensor, but got {type(rate_curves)}."
raise TypeError(msg)

if rate_curves.ndim != 2:
msg = f"Expected per-image rate curves to be 2D, but got {rate_curves.ndim}D."
raise ValueError(msg)
Expand Down Expand Up @@ -454,177 +425,3 @@ def is_image_class(image_class: int) -> None:
if image_class not in {0, 1}:
msg = f"Expected image class to be either 0 for 'normal' or 1 for 'anomalous', but got {image_class}."
raise ValueError(msg)


def is_models_ordered(models_ordered: tuple[str, ...]) -> None:
if not isinstance(models_ordered, tuple):
msg = f"Expected models ordered to be a tuple, but got {type(models_ordered)}."
raise TypeError(msg)

if len(models_ordered) < 2:
msg = f"Expected models ordered to have at least 2 models, but got {len(models_ordered)}."
raise ValueError(msg)

for model_name in models_ordered:
if not isinstance(model_name, str):
msg = f"Expected model name to be a string, but got {type(model_name)} for model {model_name}."
raise TypeError(msg)

if model_name == "":
msg = "Expected model name to be non-empty, but got empty string."
raise ValueError(msg)

num_redundant_models = len(models_ordered) - len(set(models_ordered))
if num_redundant_models > 0:
msg = f"Expected models ordered to have unique models, but got {num_redundant_models} redundant models."
raise ValueError(msg)


def is_confidences(confidences: dict[tuple[str, str], float]) -> None:
if not isinstance(confidences, dict):
msg = f"Expected confidences to be a dict, but got {type(confidences)}."
raise TypeError(msg)

for (model1, model2), confidence in confidences.items():
if not isinstance(model1, str):
msg = f"Expected model name to be a string, but got {type(model1)} for model {model1}."
raise TypeError(msg)

if not isinstance(model2, str):
msg = f"Expected model name to be a string, but got {type(model2)} for model {model2}."
raise TypeError(msg)

if not isinstance(confidence, float):
msg = f"Expected confidence to be a float, but got {type(confidence)} for models {model1} and {model2}."
raise TypeError(msg)

if not (0 <= confidence <= 1):
msg = f"Expected confidence to be between 0 and 1, but got {confidence} for models {model1} and {model2}."
raise ValueError(msg)


def joint_validate_models_ordered_and_confidences(
models_ordered: tuple[str, ...],
confidences: dict[tuple[str, str], float],
) -> None:
num_models = len(models_ordered)
expected_num_pairs = num_models * (num_models - 1)

if len(confidences) != expected_num_pairs:
msg = f"Expected {expected_num_pairs} pairs of models, but got {len(confidences)} pairs of models."
raise ValueError(msg)

models_in_confidences = {model for pair_models in confidences for model in pair_models}

diff = set(models_ordered).symmetric_difference(models_in_confidences)
if len(diff) > 0:
msg = (
"Expected models in confidences to be the same as models ordered, but got models missing in one"
f"of them: {diff}."
)
raise ValueError(msg)


def is_scores_per_model_tensor(scores_per_model: dict[str, Tensor] | OrderedDict[str, Tensor]) -> None:
first_key_value = None

for model_name, scores in scores_per_model.items():
if scores.ndim != 1:
msg = f"Expected scores to be 1D, but got {scores.ndim}D for model {model_name}."
raise ValueError(msg)

num_valid_scores = scores[~torch.isnan(scores)].numel()

if num_valid_scores < 1:
msg = f"Expected at least 1 non-nan score, but got {num_valid_scores} for model {model_name}."
raise ValueError(msg)

if first_key_value is None:
first_key_value = (model_name, scores)
continue

first_model_name, first_scores = first_key_value

# same shape
if scores.shape[0] != first_scores.shape[0]:
msg = (
"Expected scores to have the same number of scores, "
f"but got ({model_name}) {scores.shape[0]} != {first_scores.shape[0]} ({first_model_name})."
)
raise ValueError(msg)

# `nan` at the same indices
if (torch.isnan(scores) != torch.isnan(first_scores)).any():
msg = (
"Expected `nan` values, if any, to be at the same indices, "
f"but there are differences between models {model_name} and {first_model_name}."
)
raise ValueError(msg)


def is_scores_per_model_aupimoresult(
scores_per_model: dict[str, "AUPIMOResult"] | OrderedDict[str, "AUPIMOResult"],
) -> None:
first_key_value = None

for model_name, aupimoresult in scores_per_model.items():
if first_key_value is None:
first_key_value = (model_name, aupimoresult)
continue

first_model_name, first_aupimoresult = first_key_value

if aupimoresult.fpr_bounds != first_aupimoresult.fpr_bounds:
msg = (
"Expected AUPIMOResult objects in scores per model to have the same FPR bounds, "
f"but got ({model_name}) {aupimoresult.fpr_bounds} != "
f"{first_aupimoresult.fpr_bounds} ({first_model_name})."
)
raise ValueError(msg)


def is_scores_per_model(
scores_per_model: dict[str, Tensor]
| OrderedDict[str, Tensor]
| dict[str, "AUPIMOResult"]
| OrderedDict[str, "AUPIMOResult"],
) -> None:
# it has to be imported here to avoid circular imports
from .pimo import AUPIMOResult

if not isinstance(scores_per_model, dict | OrderedDict):
msg = f"Expected scores per model to be a dictionary or ordered dictionary, but got {type(scores_per_model)}."
raise TypeError(msg)

if len(scores_per_model) < 2:
msg = f"Expected scores per model to have at least 2 models, but got {len(scores_per_model)}."
raise ValueError(msg)

if not all(isinstance(model_name, str) for model_name in scores_per_model):
msg = "Expected scores per model to have model names (strings) as keys."
raise TypeError(msg)

first_instance = next(iter(scores_per_model.values()))

if (
isinstance(first_instance, Tensor)
and any(not isinstance(scores, Tensor) for scores in scores_per_model.values())
) or (
isinstance(first_instance, AUPIMOResult)
and any(not isinstance(scores, AUPIMOResult) for scores in scores_per_model.values())
):
msg = (
"Values in the scores per model dict must have the same type for values (Tensor or AUPIMOResult), "
"but more than one type was found."
)
raise TypeError(msg)

if isinstance(first_instance, Tensor):
is_scores_per_model_tensor(scores_per_model)
return

is_scores_per_model_tensor(
{model_name: scores.aupimos for model_name, scores in scores_per_model.items()},
)

is_scores_per_model_aupimoresult(scores_per_model)
Loading