Skip to content
Merged
222 changes: 222 additions & 0 deletions easybuild/easyconfigs/p/PyTorch/PyTorch-2.6.0-foss-2024a.eb
Original file line number Diff line number Diff line change
@@ -0,0 +1,222 @@
name = 'PyTorch'
version = '2.6.0'

homepage = 'https://pytorch.org/'
description = """Tensors and Dynamic neural networks in Python with strong GPU acceleration.
PyTorch is a deep learning framework that puts Python first."""

toolchain = {'name': 'foss', 'version': '2024a'}

source_urls = [GITHUB_RELEASE]
sources = ['%(namelower)s-v%(version)s.tar.gz']
patches = [
'PyTorch-1.7.0_disable-dev-shm-test.patch',
'PyTorch-1.12.1_add-hypothesis-suppression.patch',
'PyTorch-1.12.1_fix-TestTorch.test_to.patch',
'PyTorch-1.13.1_fix-gcc-12-warning-in-fbgemm.patch',
'PyTorch-1.13.1_skip-failing-singular-grad-test.patch',
'PyTorch-2.0.1_avoid-test_quantization-failures.patch',
'PyTorch-2.0.1_skip-failing-gradtest.patch',
'PyTorch-2.0.1_skip-test_shuffle_reproducibility.patch',
'PyTorch-2.0.1_skip-tests-skipped-in-subprocess.patch',
'PyTorch-2.1.0_remove-test-requiring-online-access.patch',
'PyTorch-2.1.0_skip-dynamo-test_predispatch.patch',
'PyTorch-2.1.2_workaround_dynamo_failure_without_nnpack.patch',
'PyTorch-2.3.0_disable_test_linear_package_if_no_half_types_are_available.patch',
'PyTorch-2.3.0_fix-mkldnn-avx512-f32-bias.patch',
'PyTorch-2.3.0_skip_test_var_mean_differentiable.patch',
'PyTorch-2.6.0_add-checkfunctionexists-include.patch',
'PyTorch-2.6.0_allow-sympy-1.13.3.patch',
'PyTorch-2.6.0_avoid_caffe2_test_cpp_jit.patch',
'PyTorch-2.6.0_disable_DataType_dependent_test_if_tensorboard_is_not_available.patch',
'PyTorch-2.6.0_disable_tests_which_need_network_download.patch',
'PyTorch-2.6.0_disable-gcc12-warnings.patch',
'PyTorch-2.6.0_fix-accuracy-issues-in-linalg_solve.patch',
'PyTorch-2.6.0_fix-cpuinfo-bug-with-smt.patch',
'PyTorch-2.6.0_fix-distributed-tests-without-gpus.patch',
'PyTorch-2.6.0_fix-edge-case-causing-test_trigger_bisect_on_error-failure.patch',
'PyTorch-2.6.0_fix-ExcTests.test_trigger_on_error.patch',
'PyTorch-2.6.0_fix-flaky-test_aot_export_with_torch_cond.patch',
'PyTorch-2.6.0_fix-inductor-device-interface.patch',
'PyTorch-2.6.0_fix-server-in-test_control_plane.patch',
'PyTorch-2.6.0_fix-skip-decorators.patch',
'PyTorch-2.6.0_fix-sympy-1.13-compat.patch',
'PyTorch-2.6.0_fix-test_autograd_cpp_node_saved_float.patch',
'PyTorch-2.6.0_fix-test_linear_with_embedding.patch',
'PyTorch-2.6.0_fix-test_linear_with_in_out_buffer-without-mkl.patch',
'PyTorch-2.6.0_fix-test_public_bindings.patch',
'PyTorch-2.6.0_fix-test_unbacked_bindings_for_divisible_u_symint.patch',
'PyTorch-2.6.0_fix-vsx-vector-shift-functions.patch',
'PyTorch-2.6.0_fix-xnnpack-float16-convert.patch',
'PyTorch-2.6.0_increase-tolerance-test_aotdispatch-matmul.patch',
'PyTorch-2.6.0_increase-tolerance-test_quick-baddbmm.patch',
'PyTorch-2.6.0_increase-tolerance-test_vmap_autograd_grad.patch',
'PyTorch-2.6.0_remove-test_slice_with_floordiv.patch',
'PyTorch-2.6.0_show-test-duration.patch',
'PyTorch-2.6.0_skip-diff-test-on-ppc.patch',
'PyTorch-2.6.0_skip-test_checkpoint_wrapper_parity-on-cpu.patch',
'PyTorch-2.6.0_skip-test_init_from_local_shards.patch',
'PyTorch-2.6.0_skip-test_jvp_linalg_det_singular.patch',
'PyTorch-2.6.0_skip-test-requiring-MKL.patch',
'PyTorch-2.6.0_skip-test_segfault.patch',
'PyTorch-2.6.0_skip-tests-without-fbgemm.patch',
]
checksums = [
{'pytorch-v2.6.0.tar.gz': '3005690eb7b083c443a38c7657938af63902f524ad87a6c83f1aca38c77e3b57'},
{'PyTorch-1.7.0_disable-dev-shm-test.patch': '622cb1eaeadc06e13128a862d9946bcc1f1edd3d02b259c56a9aecc4d5406b8a'},
{'PyTorch-1.12.1_add-hypothesis-suppression.patch':
'e71ffb94ebe69f580fa70e0de84017058325fdff944866d6bd03463626edc32c'},
{'PyTorch-1.12.1_fix-TestTorch.test_to.patch': '75f27987c3f25c501e719bd2b1c70a029ae0ee28514a97fe447516aee02b1535'},
{'PyTorch-1.13.1_fix-gcc-12-warning-in-fbgemm.patch':
'5c7be91a6096083a0b1315efe0001537499c600f1f569953c6a2c7f4cc1d0910'},
{'PyTorch-1.13.1_skip-failing-singular-grad-test.patch':
'72688a57b2bb617665ad1a1d5e362c5111ae912c10936bb38a089c0204729f48'},
{'PyTorch-2.0.1_avoid-test_quantization-failures.patch':
'02e3f47e4ed1d7d6077e26f1ae50073dc2b20426269930b505f4aefe5d2f33cd'},
{'PyTorch-2.0.1_skip-failing-gradtest.patch': '8030bdec6ba49b057ab232d19a7f1a5e542e47e2ec340653a246ec9ed59f8bc1'},
{'PyTorch-2.0.1_skip-test_shuffle_reproducibility.patch':
'7047862abc1abaff62954da59700f36d4f39fcf83167a638183b1b7f8fec78ae'},
{'PyTorch-2.0.1_skip-tests-skipped-in-subprocess.patch':
'166c134573a95230e39b9ea09ece3ad8072f39d370c9a88fb2a1e24f6aaac2b5'},
{'PyTorch-2.1.0_remove-test-requiring-online-access.patch':
'35184b8c5a1b10f79e511cc25db3b8a5585a5d58b5d1aa25dd3d250200b14fd7'},
{'PyTorch-2.1.0_skip-dynamo-test_predispatch.patch':
'6298daf9ddaa8542850eee9ea005f28594ab65b1f87af43d8aeca1579a8c4354'},
{'PyTorch-2.1.2_workaround_dynamo_failure_without_nnpack.patch':
'fb96eefabf394617bbb3fbd3a7a7c1aa5991b3836edc2e5d2a30e708bfe49ba1'},
{'PyTorch-2.3.0_disable_test_linear_package_if_no_half_types_are_available.patch':
'23416f2d9d5226695ec3fbea0671e3650c655c19deefd3f0f8ddab5afa50f485'},
{'PyTorch-2.3.0_fix-mkldnn-avx512-f32-bias.patch':
'ee07d21c3ac7aeb0bd0e39507b18a417b9125284a529102929c4b5c6727c2976'},
{'PyTorch-2.3.0_skip_test_var_mean_differentiable.patch':
'9703fd0f1fca8916f6d79d83e9a7efe8e3f717362a5fdaa8f5d9da90d0c75018'},
{'PyTorch-2.6.0_add-checkfunctionexists-include.patch':
'93579e35e946fb06025a50c42f3625ed8b8ac9f503a963cc23767e2c8869f0ea'},
{'PyTorch-2.6.0_allow-sympy-1.13.3.patch': 'd17f5c528f64fe5e905c9154e90654e8ed2b7f0c16418ffd84ed3913aeb57eea'},
{'PyTorch-2.6.0_avoid_caffe2_test_cpp_jit.patch':
'88d03d90359bc1fe3cfa3562624d4fbfd4c6654c9199c556ca912ac55289ce55'},
{'PyTorch-2.6.0_disable_DataType_dependent_test_if_tensorboard_is_not_available.patch':
'74db866787f1e666ed3b35db5204f05a0ba8d989fb23057a72dd07928388dc46'},
{'PyTorch-2.6.0_disable_tests_which_need_network_download.patch':
'fe76129811e4eb24d0e12c397335a4c7971b0c4e48ce9cdb9169f3ef9de7aac4'},
{'PyTorch-2.6.0_disable-gcc12-warnings.patch': '892643650788b743106ebe4e70c68be42a756eba797f0f79e31708d6e008a620'},
{'PyTorch-2.6.0_fix-accuracy-issues-in-linalg_solve.patch':
'a6b1cfe8f03ad5b17437e04e6a0369a25fcc79eed939ce6912ceca1c0ab0f444'},
{'PyTorch-2.6.0_fix-cpuinfo-bug-with-smt.patch':
'2ecb182802e795ed79b7a5f2ce9459780290b4097e981a737a98d4b47d3e2555'},
{'PyTorch-2.6.0_fix-distributed-tests-without-gpus.patch':
'011cffc098b6818eb160b6bec2e671dec46cb2a8457ce32144ea01cc9ed4290a'},
{'PyTorch-2.6.0_fix-edge-case-causing-test_trigger_bisect_on_error-failure.patch':
'fd918fa510bf04c95f3bcc2f4abea417632a0fefb278154ec95207ca0d1719ed'},
{'PyTorch-2.6.0_fix-ExcTests.test_trigger_on_error.patch':
'445472d43a61523b2ed169023f5f6db197bc2df8408f59e6254e55f5cb1d3a11'},
{'PyTorch-2.6.0_fix-flaky-test_aot_export_with_torch_cond.patch':
'79cf77a795e06c4c3206a998ce8f4a92072f79736803008ede65e5ec2f204bfc'},
{'PyTorch-2.6.0_fix-inductor-device-interface.patch':
'e8e6af1ea5f01568c23127d4f83aacb482ec9005ba558b68763748a581bcc5bc'},
{'PyTorch-2.6.0_fix-server-in-test_control_plane.patch':
'1337689ff28ecaa8d1d0edf60d322bcdd7846fec040925325d357b19eb6e4342'},
{'PyTorch-2.6.0_fix-skip-decorators.patch': 'ec1ba1ef2a2b2c6753a0b35d10c6af0457fc90fe98e2f77979745d9f79d79c86'},
{'PyTorch-2.6.0_fix-sympy-1.13-compat.patch': 'b801690a5b79ba6e4916ac6f719c36682b2a197582aee5e6f385e808f776920e'},
{'PyTorch-2.6.0_fix-test_autograd_cpp_node_saved_float.patch':
'928c4b1dc16f3d4a7bec29d8749b89ebd41488845938e2514c7fa8c048950e33'},
{'PyTorch-2.6.0_fix-test_linear_with_embedding.patch':
'56c053de7cfaa2f9898c3b036a185b499f5d44a7b4cd0442c45a8c94928322bf'},
{'PyTorch-2.6.0_fix-test_linear_with_in_out_buffer-without-mkl.patch':
'8cf9e5d434eb8d3b81400622ca23714c7002a0b835e7e08b384b84408c7ed085'},
{'PyTorch-2.6.0_fix-test_public_bindings.patch':
'066d88acd8156ed3f91b6a8e924de57f8aef944aa1bf67dc453b830ee1c26094'},
{'PyTorch-2.6.0_fix-test_unbacked_bindings_for_divisible_u_symint.patch':
'5f5ce1e275888cd6a057a0769fffaa9e49dde003ba191fd70b0265d8c6259a9b'},
{'PyTorch-2.6.0_fix-vsx-vector-shift-functions.patch':
'82ce0b48e3b7c3dfd3a2ba915f4675d5c3a6d149646e1e0d6a29eedbbaecc8bd'},
{'PyTorch-2.6.0_fix-xnnpack-float16-convert.patch':
'a6fcb475040c6fed2c0ec8b3f9c1e9fb964220413e84c8f2ee4092770ee6ac7d'},
{'PyTorch-2.6.0_increase-tolerance-test_aotdispatch-matmul.patch':
'c1c6ea41504e4479d258225ecefc7e9c5726934601610904ae555501a11e9109'},
{'PyTorch-2.6.0_increase-tolerance-test_quick-baddbmm.patch':
'9850facdfb5d98451249570788217ede07466cae9ba52cd03afd3ec803ba33c9'},
{'PyTorch-2.6.0_increase-tolerance-test_vmap_autograd_grad.patch':
'8d5eb53bb0a1456af333ae646c860033d6dd037bd9152601a200ca5c10ebf3cb'},
{'PyTorch-2.6.0_remove-test_slice_with_floordiv.patch':
'1b7ff59a595b9ebbc042d8ff53e3f6c72a1d3b04fb82228f4433473f28623f9b'},
{'PyTorch-2.6.0_show-test-duration.patch': '5508f2f9619204d9f3c356dbd4000a00d58f452ab2d64ae920eb8bc8b5484d75'},
{'PyTorch-2.6.0_skip-diff-test-on-ppc.patch': '6f2f87cad1b0ab8c5a0c7b3f7fbc14e4bdfbe61da26a3934ded9dda7fe368c74'},
{'PyTorch-2.6.0_skip-test_checkpoint_wrapper_parity-on-cpu.patch':
'600f74de167b6fea4d849229de6d653dc616093b456962729222d6bfa767a8e8'},
{'PyTorch-2.6.0_skip-test_init_from_local_shards.patch':
'222383195f6a3b7c545ffeadb4dd469b9f3361b42c0866de3d3f0f91f8fbe777'},
{'PyTorch-2.6.0_skip-test_jvp_linalg_det_singular.patch':
'3bbe8e585765d6db2a77ed0f751eadf924fbbedc95bbd88f447538ceede273fd'},
{'PyTorch-2.6.0_skip-test-requiring-MKL.patch':
'f1c9b1c77b09d59317fd52d390e7d948a147325b927ad6373c1fa1d1d6ea1ea8'},
{'PyTorch-2.6.0_skip-test_segfault.patch': '26806bd62e6b61b56ebaa52d68ca44c415a28124f684bd2fb373557ada68ef52'},
{'PyTorch-2.6.0_skip-tests-without-fbgemm.patch':
'ed35099de94a14322a879066da048ec9bc565dc81287b4adc4fec46f9afe90cf'},
]

osdependencies = [OS_PKG_IBVERBS_DEV]

builddependencies = [
('CMake', '3.29.3'),
('hypothesis', '6.103.1'),
# For tests
('parameterized', '0.9.0'),
('pytest-flakefinder', '1.1.0'),
('pytest-rerunfailures', '15.0'),
('pytest-shard', '0.1.2'),
('pytest-subtests', '0.13.1'),
('tlparse', '0.3.37'),
('optree', '0.14.1'),
('unittest-xml-reporting', '3.1.0'),
]

dependencies = [
('Ninja', '1.12.1'), # Required for JIT compilation of C++ extensions
('Python', '3.12.3'),
('Python-bundle-PyPI', '2024.06'),
('protobuf', '28.0'),
('protobuf-python', '5.28.0'),
('pybind11', '2.12.0'),
('PuLP', '2.8.0'),
('SciPy-bundle', '2024.05'),
('PyYAML', '6.0.2'),
('MPFR', '4.2.1'),
('GMP', '6.3.0'),
('numactl', '2.0.18'),
('FFmpeg', '7.0.2'),
('Pillow', '10.4.0'),
('expecttest', '0.2.1'),
('networkx', '3.4.2'),
('sympy', '1.13.3'),
('Z3', '4.13.0',),
]

buildcmd = '%(python)s setup.py build' # Run the (long) build in the build step

excluded_tests = {
'': [
# This test seems to take too long on NVIDIA Ampere at least.
'distributed/test_distributed_spawn',
# no xdoctest
'doctests',
# intermittent failures on various systems
# See https://github.com/easybuilders/easybuild-easyconfigs/issues/17712
'distributed/rpc/test_tensorpipe_agent',
# This test is expected to fail when run in their CI, but won't in our case.
# It just checks for a "CI" env variable
'test_ci_sanity_check_fail',
]
}

local_test_opts = '--continue-through-error --pipe-logs --verbose %(excluded_tests)s'
runtest = 'cd test && PYTHONUNBUFFERED=1 %(python)s run_test.py ' + local_test_opts

# Especially test_quantization has a few corner cases that are triggered by the random input values,
# those cannot be easily avoided, see https://github.com/pytorch/pytorch/issues/107030
# So allow a low number of tests to fail as the tests "usually" succeed
max_failed_tests = 16

tests = ['PyTorch-check-cpp-extension.py']

moduleclass = 'ai'
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
From 8d91bfd9654589c41b3bbb589bcb0bf95443c53e Mon Sep 17 00:00:00 2001
From: Nikita Shulga <[email protected]>
Date: Tue, 28 Jan 2025 08:40:31 -0800
Subject: [PATCH] [BE] Include CheckFunctionExists in `FindBLAS.cmake`
(#145849)

It's used in the script, so it must be included
Pull Request resolved: https://github.com/pytorch/pytorch/pull/145849
Approved by: https://github.com/Skylion007
---
cmake/Modules/FindBLAS.cmake | 1 +
1 file changed, 1 insertion(+)

diff --git a/cmake/Modules/FindBLAS.cmake b/cmake/Modules/FindBLAS.cmake
index 5ce875f529206..8e54eedb2aa8f 100644
--- a/cmake/Modules/FindBLAS.cmake
+++ b/cmake/Modules/FindBLAS.cmake
@@ -25,6 +25,7 @@ SET(WITH_BLAS "" CACHE STRING "Blas type [accelerate/acml/atlas/blis/generic/got
# Old FindBlas
INCLUDE(CheckCSourceRuns)
INCLUDE(CheckFortranFunctionExists)
+INCLUDE(CheckFunctionExists)

MACRO(Check_Fortran_Libraries LIBRARIES _prefix _name _flags _list)
# This macro checks for the existence of the combination of fortran libraries
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
Increase allowed sympy version to 1.13.3 as done in e.g. https://github.com/pytorch/pytorch/pull/148575 for 2.7+

This allows using our existing sympy 1.13.3 easyconfigs.

Author: Alexander Grund (TU Dresden)

diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1099,7 +1099,7 @@ def main():
"filelock",
"typing-extensions>=4.10.0",
'setuptools ; python_version >= "3.12"',
- 'sympy==1.13.1 ; python_version >= "3.9"',
+ "sympy>=1.13.3",
"networkx",
"jinja2",
"fsspec",
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
Avoid tripping on //caffe2/test/cpp/jit:test_custom_class_registrations with IS_SANDCASTLE

Author: Ake Sandgren
Update for 2.6: Alexander Grund (TU Dresden)

diff --git a/test/export/test_export.py b/test/export/test_export.py
index 703a0c6e918..91892503955 100755
--- a/test/export/test_export.py
+++ b/test/export/test_export.py
@@ -11324,7 +11324,7 @@ class TestExportCustomClass(TorchTestCase):
def setUp(self):
if IS_FBCODE:
lib_file_path = "//caffe2/test/cpp/jit:test_custom_class_registrations"
- elif IS_SANDCASTLE or IS_MACOS:
+ elif False or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
elif IS_WINDOWS:
lib_file_path = find_library_location("torchbind_test.dll")
diff --git a/test/export/test_lift_unlift.py b/test/export/test_lift_unlift.py
index c027fc55717..17358101b8c 100644
--- a/test/export/test_lift_unlift.py
+++ b/test/export/test_lift_unlift.py
@@ -147,7 +147,7 @@ class TestLift(TestCase):
def setUp(self):
if IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
- elif IS_SANDCASTLE or IS_FBCODE:
+ elif False or IS_FBCODE:
torch.ops.load_library(
"//caffe2/test/cpp/jit:test_custom_class_registrations"
)
@@ -380,7 +380,7 @@ class ConstantAttrMapTest(TestCase):
def setUp(self):
if IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
- elif IS_SANDCASTLE or IS_FBCODE:
+ elif False or IS_FBCODE:
torch.ops.load_library(
"//caffe2/test/cpp/jit:test_custom_class_registrations"
)
diff --git a/test/test_weak.py b/test/test_weak.py
index e8b6ee6f556..a6b3f0e052d 100644
--- a/test/test_weak.py
+++ b/test/test_weak.py
@@ -593,7 +593,7 @@ class WeakKeyDictionaryScriptObjectTestCase(TestCase):

def __init__(self, *args, **kw):
unittest.TestCase.__init__(self, *args, **kw)
- if IS_SANDCASTLE or IS_FBCODE:
+ if False or IS_FBCODE:
torch.ops.load_library(
"//caffe2/test/cpp/jit:test_custom_class_registrations"
)
diff --git a/torch/testing/_internal/torchbind_impls.py b/torch/testing/_internal/torchbind_impls.py
index 5566b241f56..63159276572 100644
--- a/torch/testing/_internal/torchbind_impls.py
+++ b/torch/testing/_internal/torchbind_impls.py
@@ -113,7 +113,7 @@ def load_torchbind_test_lib():
IS_WINDOWS,
)

- if IS_SANDCASTLE or IS_FBCODE:
+ if False or IS_FBCODE:
torch.ops.load_library("//caffe2/test/cpp/jit:test_custom_class_registrations")
elif IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
GCC 12 has a false positive warning when compiled for some architectures, e.g. Intel Sapphire Rapids.
See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=112370

Suppress this warning such that the build doesn't error.
Also disable a false positive that produces a lot of warnings/output.

Author: Alexander Grund (TU Dresden)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index b74bf4536f4..bb062fa843a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -653,6 +653,8 @@ if(MSVC)
string(APPEND CMAKE_CXX_FLAGS " /FS")
string(APPEND CMAKE_CUDA_FLAGS " -Xcompiler /FS")
endif(MSVC)
+append_cxx_flag_if_supported("-Wno-free-nonheap-object" CMAKE_CXX_FLAGS)
+append_cxx_flag_if_supported("-Wno-dangling-reference" CMAKE_CXX_FLAGS)

string(APPEND CMAKE_CUDA_FLAGS " -Xfatbin -compress-all")

Loading