diff --git a/recipe/build.sh b/recipe/build.sh index 22dde8f0e..4496601a5 100644 --- a/recipe/build.sh +++ b/recipe/build.sh @@ -89,8 +89,6 @@ export USE_SYSTEM_SLEEF=1 # use our protobuf export BUILD_CUSTOM_PROTOBUF=OFF rm -rf $PREFIX/bin/protoc -export USE_SYSTEM_PYBIND11=1 -export USE_SYSTEM_EIGEN_INSTALL=1 # prevent six from being downloaded > third_party/NNPACK/cmake/DownloadSix.cmake @@ -244,8 +242,7 @@ case ${PKG_NAME} in mv build/lib.*/torch/bin/* ${PREFIX}/bin/ mv build/lib.*/torch/lib/* ${PREFIX}/lib/ - # need to merge these now because we're using system pybind11, meaning the destination directory is not empty - rsync -a build/lib.*/torch/share/* ${PREFIX}/share/ + mv build/lib.*/torch/share/* ${PREFIX}/share/ mv build/lib.*/torch/include/{ATen,caffe2,tensorpipe,torch,c10} ${PREFIX}/include/ rm ${PREFIX}/lib/libtorch_python.* @@ -253,7 +250,7 @@ case ${PKG_NAME} in cp build/CMakeCache.txt build/CMakeCache.txt.orig ;; pytorch) - $PREFIX/bin/python -m pip install . --no-deps --no-build-isolation -v --no-clean \ + $PREFIX/bin/python -m pip install . --no-deps -v --no-clean \ | sed "s,${CXX},\$\{CXX\},g" \ | sed "s,${PREFIX},\$\{PREFIX\},g" # Keep this in ${PREFIX}/lib so that the library can be found by diff --git a/recipe/meta.yaml b/recipe/meta.yaml index e1c2a2d6b..edb7439e2 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -70,7 +70,6 @@ source: - patches/0017-Add-conda-prefix-to-inductor-include-paths.patch - patches/0018-make-ATEN_INCLUDE_DIR-relative-to-TORCH_INSTALL_PREF.patch - patches/0019-remove-DESTINATION-lib-from-CMake-install-TARGETS-di.patch # [win] - - patches/0020-make-library-name-in-test_mutable_custom_op_fixed_la.patch - patches/0021-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch - patches_submodules/fbgemm/0001-remove-DESTINATION-lib-from-CMake-install-directives.patch # [win] - patches_submodules/tensorpipe/0001-switch-away-from-find_package-CUDA.patch @@ -79,6 +78,8 @@ build: number: {{ build }} # cuda 11.8 was dropped due to maintenance effort, see discussion in #177 skip: true # [cuda_compiler_version == "11.8"] + # skip known-passing builds while bringing up GPU builds + skip: true # [cuda_compiler_version == "None" or win] # This logic allows two rc variants to be defined in the conda_build_config, but only one to actually be built. # We want to be able to define two variants in the cbc so we can assign different labels to each in the upload channel # (by zipping is_rc with channel_targets). This prevents rc builds being used unless specifically requested. @@ -127,7 +128,6 @@ requirements: - protobuf - make # [linux] - sccache # [win] - - rsync # [unix] host: # GPU requirements - cudnn # [cuda_compiler_version != "None"] @@ -178,8 +178,8 @@ requirements: - libuv - pkg-config # [unix] - typing_extensions - - pybind11 - - eigen + - pybind11 # [win] + - eigen # [win] - zlib run: # GPU requirements without run_exports @@ -325,13 +325,13 @@ outputs: - intel-openmp {{ mkl }} # [win] - libabseil - libprotobuf + - eigen # [win] + - pybind11 # [win] - sleef - libuv - pkg-config # [unix] - typing_extensions - {{ pin_subpackage('libtorch', exact=True) }} - - pybind11 - - eigen - zlib run: - llvm-openmp # [osx] @@ -348,7 +348,7 @@ outputs: - filelock - jinja2 - networkx - - pybind11 + - pybind11 # [win] - nomkl # [blas_impl != "mkl"] - fsspec # avoid that people without GPUs needlessly download ~0.5-1GB @@ -397,7 +397,6 @@ outputs: # tools/ is needed to optimise test run # as of pytorch=2.0.0, there is a bug when trying to run tests without the tools - tools - #- .ci/pytorch/smoke_test/smoke_test.py commands: # Run pip check so as to ensure that all pytorch packages are installed # https://github.com/conda-forge/pytorch-cpu-feedstock/issues/24 @@ -423,30 +422,6 @@ outputs: - if not exist %LIBRARY_BIN%\torch_python.dll exit 1 # [win] - if not exist %LIBRARY_LIB%\torch_python.lib exit 1 # [win] - # See here for environment variables needed by the smoke test script - # https://github.com/pytorch/pytorch/blob/266fd35c5842902f6304aa8e7713b252cbfb243c/.ci/pytorch/smoke_test/smoke_test.py#L16 - - set MATRIX_GPU_ARCH_VERSION="{{ '.'.join((cuda_compiler_version or "").split('.')[:2]) }}" # [(cuda_compiler_version != "None") and (win)] - - set MATRIX_GPU_ARCH_TYPE="cuda" # [(cuda_compiler_version != "None") and (win)] - - set MATRIX_GPU_ARCH_VERSION="none" # [(cuda_compiler_version == "None") and (win)] - - set MATRIX_GPU_ARCH_TYPE="none" # [(cuda_compiler_version == "None") and (win)] - - set MATRIX_CHANNEL="defaults" # [win] - - set MATRIX_STABLE_VERSION={{ version }} # [win] - - set MATRIX_PACKAGE_TYPE="conda" # [win] - - set TARGET_OS="windows" # [win] - - set OMP_NUM_THREADS=4 # [win] - - export MATRIX_GPU_ARCH_VERSION="{{ '.'.join((cuda_compiler_version or "").split('.')[:2]) }}" # [(cuda_compiler_version != "None") and (linux and x86_64)] - - export MATRIX_GPU_ARCH_TYPE="cuda" # [(cuda_compiler_version != "None") and (linux and x86_64)] - - export MATRIX_GPU_ARCH_VERSION="none" # [(cuda_compiler_version == "None") and (not win)] - - export MATRIX_GPU_ARCH_TYPE="none" # [(cuda_compiler_version == "None") and (not win)] - - export MATRIX_CHANNEL="defaults" # [not win] - - export MATRIX_STABLE_VERSION="{{ version }}" # [not win] - - export MATRIX_PACKAGE_TYPE="conda" # [not win] - - export TARGET_OS="linux" # [linux] - - export TARGET_OS="macos-arm64" # [(osx and arm64)] - - export TARGET_OS="macos-x86_64" # [(osx and x86_64)] - - export OMP_NUM_THREADS=4 # [not win] - #- python ./smoke_test/smoke_test.py --package torchonly - # a reasonably safe subset of tests that should run under 15 minutes {% set tests = " ".join([ "test/test_autograd.py", @@ -566,6 +541,7 @@ about: - LICENSE - NOTICE - third_party/CMake/Copyright.txt + - third_party/pybind11/LICENSE # [unix] summary: PyTorch is an optimized tensor library for deep learning using GPUs and CPUs. description: | PyTorch is a Python package that provides two high-level features: diff --git a/recipe/patches/0020-make-library-name-in-test_mutable_custom_op_fixed_la.patch b/recipe/patches/0020-make-library-name-in-test_mutable_custom_op_fixed_la.patch deleted file mode 100644 index 17c54e337..000000000 --- a/recipe/patches/0020-make-library-name-in-test_mutable_custom_op_fixed_la.patch +++ /dev/null @@ -1,57 +0,0 @@ -From 39041f5a78068d2cf58d99f76938aee95a3c7bb5 Mon Sep 17 00:00:00 2001 -From: "H. Vetinari" -Date: Thu, 30 Jan 2025 13:23:14 +1100 -Subject: [PATCH 20/21] make library name in - `test_mutable_custom_op_fixed_layout{,2}` unique - -Suggested-By: Daniel Petry ---- - test/inductor/test_torchinductor.py | 14 +++++++++----- - 1 file changed, 9 insertions(+), 5 deletions(-) - -diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py -index 610f5d27332..99e2169febb 100644 ---- a/test/inductor/test_torchinductor.py -+++ b/test/inductor/test_torchinductor.py -@@ -10628,7 +10628,8 @@ class CommonTemplate: - @requires_gpu() - @config.patch(implicit_fallbacks=True) - def test_mutable_custom_op_fixed_layout2(self): -- with torch.library._scoped_library("mylib", "DEF") as lib: -+ unique_lib_name = f"mylib_{id(self)}" # Make unique name using test instance id -+ with torch.library._scoped_library(unique_lib_name, "DEF") as lib: - mod = nn.Conv2d(3, 128, 1, stride=1, bias=False).to(device=GPU_TYPE) - inp = torch.rand(2, 3, 128, 128, device=GPU_TYPE) - expected_stride = mod(inp).clone().stride() -@@ -10664,8 +10665,9 @@ class CommonTemplate: - def fn(x): - # Inductor changes the conv to be channels-last - z = mod(x) -- output = torch.ops.mylib.bar(z, torch._dynamo.is_compiling()) -- torch.ops.mylib.add_one(output) -+ mylib = importlib.import_module(f"torch.ops.{unique_lib_name}") -+ output = mylib.bar(z, torch._dynamo.is_compiling()) -+ mylib.add_one(output) - return output**2 - - with torch.no_grad(): -@@ -10681,7 +10683,8 @@ class CommonTemplate: - - @config.patch(implicit_fallbacks=True) - def test_mutable_custom_op_fixed_layout(self): -- with torch.library._scoped_library("mylib", "DEF") as lib: -+ unique_lib_name = f"mylib_{id(self)}" # Make unique name using test instance id -+ with torch.library._scoped_library(unique_lib_name, "DEF") as lib: - lib.define( - "copy_(Tensor(a!) dst, Tensor src) -> ()", - tags=torch.Tag.needs_fixed_stride_order, -@@ -10697,7 +10700,8 @@ class CommonTemplate: - - def f(x): - full_default_3 = torch.full([3], 7.0, device="cpu") -- chunk_cat_default_1 = torch.ops.mylib.copy_.default(full_default_3, x) -+ mylib = importlib.import_module(f"torch.ops.{unique_lib_name}") -+ chunk_cat_default_1 = mylib.copy_.default(full_default_3, x) - mul_out = torch.mul(full_default_3, full_default_3) - return mul_out -