From d7ce3a00b8349142607ce837d5d14563b2b142c4 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 22 Oct 2024 11:23:44 -0700 Subject: [PATCH 1/4] Use SerialRunner if only one CUDA device is available. Signed-off-by: Weilin Xu --- src/anomalib/pipelines/benchmark/pipeline.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/anomalib/pipelines/benchmark/pipeline.py b/src/anomalib/pipelines/benchmark/pipeline.py index 730b3ecccc..61777ccd7a 100644 --- a/src/anomalib/pipelines/benchmark/pipeline.py +++ b/src/anomalib/pipelines/benchmark/pipeline.py @@ -20,11 +20,12 @@ def _setup_runners(args: dict) -> list[Runner]: accelerators = args["accelerator"] if isinstance(args["accelerator"], list) else [args["accelerator"]] runners: list[Runner] = [] for accelerator in accelerators: - if accelerator == "cpu": - runners.append(SerialRunner(BenchmarkJobGenerator("cpu"))) - elif accelerator == "cuda": - runners.append(ParallelRunner(BenchmarkJobGenerator("cuda"), n_jobs=torch.cuda.device_count())) - else: + if accelerator not in ["cpu", "cuda"]: msg = f"Unsupported accelerator: {accelerator}" raise ValueError(msg) + device_count = torch.cuda.device_count() + if device_count <= 1: + runners.append(SerialRunner(BenchmarkJobGenerator(accelerator))) + else: + runners.append(ParallelRunner(BenchmarkJobGenerator(accelerator), n_jobs=device_count)) return runners From d77e972822f002473457b34df9f91fb222da976d Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 22 Oct 2024 11:35:55 -0700 Subject: [PATCH 2/4] Resolve PLR6201. Signed-off-by: Weilin Xu --- src/anomalib/pipelines/benchmark/pipeline.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/anomalib/pipelines/benchmark/pipeline.py b/src/anomalib/pipelines/benchmark/pipeline.py index 61777ccd7a..f68ee5e2a1 100644 --- a/src/anomalib/pipelines/benchmark/pipeline.py +++ b/src/anomalib/pipelines/benchmark/pipeline.py @@ -20,7 +20,7 @@ def _setup_runners(args: dict) -> list[Runner]: accelerators = args["accelerator"] if isinstance(args["accelerator"], list) else [args["accelerator"]] runners: list[Runner] = [] for accelerator in accelerators: - if accelerator not in ["cpu", "cuda"]: + if accelerator not in {"cpu", "cuda"}: msg = f"Unsupported accelerator: {accelerator}" raise ValueError(msg) device_count = torch.cuda.device_count() From 1003a304678a7ee835ef1c23aa745087f54f7658 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 22 Oct 2024 11:49:18 -0700 Subject: [PATCH 3/4] Update CHANGELOG. Signed-off-by: Weilin Xu --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b50bf09ecb..8b9025d055 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ### Fixed +- Make single GPU benchmarking 5x more efficient by [mzweilin](https://github.com/mzweilin) in https://github.com/openvinotoolkit/anomalib/pull/2390 + ### New Contributors **Full Changelog**: From aaffba877519e0bbccb7f92929d5b17939798da7 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Wed, 23 Oct 2024 09:22:22 -0700 Subject: [PATCH 4/4] Keep the same logging level in benchmarking. Signed-off-by: Weilin Xu --- src/anomalib/utils/logging.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/anomalib/utils/logging.py b/src/anomalib/utils/logging.py index 21f7994fbf..d73ef440c4 100644 --- a/src/anomalib/utils/logging.py +++ b/src/anomalib/utils/logging.py @@ -74,10 +74,8 @@ def redirect_logs(log_file: str) -> None: """ Path(log_file).parent.mkdir(exist_ok=True, parents=True) logger_file_handler = logging.FileHandler(log_file) - root_logger = logging.getLogger() - root_logger.setLevel(logging.DEBUG) format_string = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" - logging.basicConfig(format=format_string, level=logging.DEBUG, handlers=[logger_file_handler]) + logging.basicConfig(format=format_string, handlers=[logger_file_handler]) logging.captureWarnings(capture=True) # remove other handlers from all loggers loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]