From 11db2fff66d451533d2996673708cdccd53b9edc Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Sat, 18 Jan 2025 14:33:03 +0100 Subject: [PATCH 01/58] pytorch v2.6.0.rc7 --- recipe/meta.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 91f0ee793..2aa1d6123 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -1,6 +1,6 @@ # if you wish to build release candidate number X, append the version string with ".rcX" -{% set version = "2.5.1" %} -{% set build = 9 %} +{% set version = "2.6.0.rc7" %} +{% set build = 0 %} {% if cuda_compiler_version != "None" %} {% set build = build + 200 %} From 564530143722b78547a53ab7567b0158d8a5cf3d Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Sat, 18 Jan 2025 14:34:22 +0100 Subject: [PATCH 02/58] MNT: Re-rendered with conda-build 25.1.1, conda-smithy 3.45.2, and conda-forge-pinning 2025.01.18.07.29.32 --- .azure-pipelines/azure-pipelines-osx.yml | 60 +++++----- ...nNonecxx_compiler_version13is_rcTrue.yaml} | 10 +- ...n12.6cxx_compiler_version13is_rcTrue.yaml} | 10 +- ...nNonecxx_compiler_version13is_rcTrue.yaml} | 10 +- ...n12.6cxx_compiler_version13is_rcTrue.yaml} | 10 +- ...nNonecxx_compiler_version13is_rcTrue.yaml} | 10 +- ...n12.6cxx_compiler_version13is_rcTrue.yaml} | 10 +- ...rcTruenumpy2.0python3.10.____cpython.yaml} | 10 +- ...rcTruenumpy2.0python3.11.____cpython.yaml} | 10 +- ...rcTruenumpy2.0python3.12.____cpython.yaml} | 10 +- ..._rcTruenumpy2.0python3.9.____cpython.yaml} | 10 +- ...cis_rcTruenumpy2python3.13.____cp313.yaml} | 10 +- ...rcTruenumpy2.0python3.10.____cpython.yaml} | 10 +- ...rcTruenumpy2.0python3.11.____cpython.yaml} | 10 +- ...rcTruenumpy2.0python3.12.____cpython.yaml} | 10 +- ..._rcTruenumpy2.0python3.9.____cpython.yaml} | 10 +- ...cis_rcTruenumpy2python3.13.____cp313.yaml} | 10 +- ...rcTruenumpy2.0python3.10.____cpython.yaml} | 10 +- ...rcTruenumpy2.0python3.11.____cpython.yaml} | 10 +- ...rcTruenumpy2.0python3.12.____cpython.yaml} | 10 +- ..._rcTruenumpy2.0python3.9.____cpython.yaml} | 10 +- ...cis_rcTruenumpy2python3.13.____cp313.yaml} | 10 +- ...necuda_compiler_versionNoneis_rcTrue.yaml} | 4 +- ...cccuda_compiler_version12.6is_rcTrue.yaml} | 4 +- .github/workflows/conda-build.yml | 32 +++--- README.md | 108 +++++++++--------- 26 files changed, 209 insertions(+), 209 deletions(-) rename .ci_support/{linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml => linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml} (92%) rename .ci_support/{linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml => linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml} (92%) rename .ci_support/{linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml => linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml} (92%) rename .ci_support/{linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml => linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml} (92%) rename .ci_support/{linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml => linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml} (92%) rename .ci_support/{linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml => linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml} (92%) rename .ci_support/{osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml => osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml} (90%) rename .ci_support/{osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml => osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml} (90%) rename .ci_support/{osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml => osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml} (90%) rename .ci_support/{osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml => osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml} (90%) rename .ci_support/{osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml => osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml} (90%) rename .ci_support/{osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml => osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml} (90%) rename .ci_support/{osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml => osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml} (90%) rename .ci_support/{osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml => osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml} (90%) rename .ci_support/{osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml => osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml} (90%) rename .ci_support/{osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml => osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml} (90%) rename .ci_support/{osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml => osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml} (90%) rename .ci_support/{osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml => osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml} (90%) rename .ci_support/{osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml => osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml} (90%) rename .ci_support/{osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml => osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml} (90%) rename .ci_support/{osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml => osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml} (90%) rename .ci_support/{win_64_channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNoneis_rcFalse.yaml => win_64_channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNoneis_rcTrue.yaml} (95%) rename .ci_support/{win_64_channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6is_rcFalse.yaml => win_64_channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6is_rcTrue.yaml} (95%) diff --git a/.azure-pipelines/azure-pipelines-osx.yml b/.azure-pipelines/azure-pipelines-osx.yml index bcfc9f37a..9b43e1a8a 100755 --- a/.azure-pipelines/azure-pipelines-osx.yml +++ b/.azure-pipelines/azure-pipelines-osx.yml @@ -8,50 +8,50 @@ jobs: vmImage: macOS-13 strategy: matrix: - osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython: - CONFIG: osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython + osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython: + CONFIG: osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython UPLOAD_PACKAGES: 'True' - osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython: - CONFIG: osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython + osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython: + CONFIG: osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython UPLOAD_PACKAGES: 'True' - osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython: - CONFIG: osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython + osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython: + CONFIG: osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython UPLOAD_PACKAGES: 'True' - osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython: - CONFIG: osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython + osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython: + CONFIG: osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython UPLOAD_PACKAGES: 'True' - osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313: - CONFIG: osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313 + osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313: + CONFIG: osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313 UPLOAD_PACKAGES: 'True' - osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython: - CONFIG: osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython + osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython: + CONFIG: osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython UPLOAD_PACKAGES: 'True' - osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython: - CONFIG: osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython + osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython: + CONFIG: osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython UPLOAD_PACKAGES: 'True' - osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython: - CONFIG: osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython + osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython: + CONFIG: osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython UPLOAD_PACKAGES: 'True' - osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython: - CONFIG: osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython + osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython: + CONFIG: osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython UPLOAD_PACKAGES: 'True' - osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313: - CONFIG: osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313 + osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313: + CONFIG: osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313 UPLOAD_PACKAGES: 'True' - osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython: - CONFIG: osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython + osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython: + CONFIG: osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython UPLOAD_PACKAGES: 'True' - osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython: - CONFIG: osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython + osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython: + CONFIG: osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython UPLOAD_PACKAGES: 'True' - osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython: - CONFIG: osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython + osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython: + CONFIG: osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython UPLOAD_PACKAGES: 'True' - osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython: - CONFIG: osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython + osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython: + CONFIG: osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython UPLOAD_PACKAGES: 'True' - osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313: - CONFIG: osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313 + osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313: + CONFIG: osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313 UPLOAD_PACKAGES: 'True' timeoutInMinutes: 360 variables: {} diff --git a/.ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml b/.ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml similarity index 92% rename from .ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml rename to .ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml index 45418ed98..a78a0027d 100644 --- a/.ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml +++ b/.ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml @@ -13,7 +13,7 @@ cdt_name: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - None cuda_compiler_version: @@ -29,15 +29,15 @@ docker_image: github_actions_labels: - cirun-openstack-gpu-2xlarge is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml b/.ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml similarity index 92% rename from .ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml rename to .ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml index 182156149..01847ccdf 100644 --- a/.ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml +++ b/.ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml @@ -13,7 +13,7 @@ cdt_name: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - cuda-nvcc cuda_compiler_version: @@ -29,15 +29,15 @@ docker_image: github_actions_labels: - cirun-openstack-gpu-2xlarge is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml b/.ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml similarity index 92% rename from .ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml rename to .ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml index 77351ad71..8b693c032 100644 --- a/.ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml +++ b/.ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml @@ -13,7 +13,7 @@ cdt_name: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - None cuda_compiler_version: @@ -29,15 +29,15 @@ docker_image: github_actions_labels: - cirun-openstack-gpu-2xlarge is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml b/.ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml similarity index 92% rename from .ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml rename to .ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml index aa913c621..ea6b01996 100644 --- a/.ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml +++ b/.ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml @@ -13,7 +13,7 @@ cdt_name: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - cuda-nvcc cuda_compiler_version: @@ -29,15 +29,15 @@ docker_image: github_actions_labels: - cirun-openstack-gpu-2xlarge is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml b/.ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml similarity index 92% rename from .ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml rename to .ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml index d4635f051..8b5baaa1b 100644 --- a/.ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml +++ b/.ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml @@ -13,7 +13,7 @@ cdt_name: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - None cuda_compiler_version: @@ -29,15 +29,15 @@ docker_image: github_actions_labels: - cirun-openstack-gpu-2xlarge is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml b/.ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml similarity index 92% rename from .ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml rename to .ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml index a7d7b6591..0e5851648 100644 --- a/.ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml +++ b/.ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml @@ -13,7 +13,7 @@ cdt_name: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - cuda-nvcc cuda_compiler_version: @@ -29,15 +29,15 @@ docker_image: github_actions_labels: - cirun-openstack-gpu-2xlarge is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml b/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml similarity index 90% rename from .ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml rename to .ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml index 66656f427..311bdb756 100644 --- a/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml +++ b/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - None cuda_compiler_version: @@ -25,15 +25,15 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml b/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml similarity index 90% rename from .ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml rename to .ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml index 04aefc778..6068d3561 100644 --- a/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml +++ b/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - None cuda_compiler_version: @@ -25,15 +25,15 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml b/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml similarity index 90% rename from .ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml rename to .ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml index cbbe6612f..d5c2bde05 100644 --- a/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml +++ b/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - None cuda_compiler_version: @@ -25,15 +25,15 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml b/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml similarity index 90% rename from .ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml rename to .ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml index 02a4aa43b..40c12d146 100644 --- a/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml +++ b/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - None cuda_compiler_version: @@ -25,15 +25,15 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml b/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml similarity index 90% rename from .ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml rename to .ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml index 2aef50475..8b1030b7e 100644 --- a/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml +++ b/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - None cuda_compiler_version: @@ -25,15 +25,15 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml b/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml similarity index 90% rename from .ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml rename to .ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml index 023552294..3560d4376 100644 --- a/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml +++ b/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - None cuda_compiler_version: @@ -25,15 +25,15 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml b/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml similarity index 90% rename from .ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml rename to .ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml index b57aaf092..3de8c0a7b 100644 --- a/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml +++ b/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - None cuda_compiler_version: @@ -25,15 +25,15 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml b/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml similarity index 90% rename from .ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml rename to .ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml index d549ae43e..b14fac625 100644 --- a/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml +++ b/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - None cuda_compiler_version: @@ -25,15 +25,15 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml b/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml similarity index 90% rename from .ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml rename to .ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml index c5705b306..517e85734 100644 --- a/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml +++ b/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - None cuda_compiler_version: @@ -25,15 +25,15 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml b/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml similarity index 90% rename from .ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml rename to .ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml index 7c8113b3f..53451a18b 100644 --- a/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml +++ b/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - None cuda_compiler_version: @@ -25,15 +25,15 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml b/.ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml similarity index 90% rename from .ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml rename to .ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml index 4ca2b50c8..bad711727 100644 --- a/.ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml +++ b/.ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - None cuda_compiler_version: @@ -25,15 +25,15 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml b/.ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml similarity index 90% rename from .ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml rename to .ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml index 17f951bb4..dac06bb1d 100644 --- a/.ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml +++ b/.ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - None cuda_compiler_version: @@ -25,15 +25,15 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml b/.ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml similarity index 90% rename from .ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml rename to .ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml index 793a82255..d984d3b3f 100644 --- a/.ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml +++ b/.ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - None cuda_compiler_version: @@ -25,15 +25,15 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml b/.ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml similarity index 90% rename from .ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml rename to .ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml index 0ec714e44..75abdb732 100644 --- a/.ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml +++ b/.ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - None cuda_compiler_version: @@ -25,15 +25,15 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml b/.ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml similarity index 90% rename from .ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml rename to .ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml index abf491d5f..466779f09 100644 --- a/.ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml +++ b/.ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - None cuda_compiler_version: @@ -25,15 +25,15 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'False' +- 'True' libabseil: - '20240722' libblas: -- 3.9 *netlib +- 3.9.* *netlib libcblas: -- 3.9 *netlib +- 3.9.* *netlib liblapack: -- 3.9 *netlib +- 3.9.* *netlib libprotobuf: - 5.28.3 libtorch: diff --git a/.ci_support/win_64_channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNoneis_rcFalse.yaml b/.ci_support/win_64_channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNoneis_rcTrue.yaml similarity index 95% rename from .ci_support/win_64_channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNoneis_rcFalse.yaml rename to .ci_support/win_64_channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNoneis_rcTrue.yaml index 483682129..34518fd03 100644 --- a/.ci_support/win_64_channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNoneis_rcFalse.yaml +++ b/.ci_support/win_64_channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNoneis_rcTrue.yaml @@ -7,7 +7,7 @@ c_stdlib: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - None cuda_compiler_version: @@ -19,7 +19,7 @@ cxx_compiler: github_actions_labels: - cirun-azure-windows-2xlarge is_rc: -- 'False' +- 'True' libabseil: - '20240722' libprotobuf: diff --git a/.ci_support/win_64_channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6is_rcFalse.yaml b/.ci_support/win_64_channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6is_rcTrue.yaml similarity index 95% rename from .ci_support/win_64_channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6is_rcFalse.yaml rename to .ci_support/win_64_channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6is_rcTrue.yaml index c0402bfc3..63a96a4a1 100644 --- a/.ci_support/win_64_channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6is_rcFalse.yaml +++ b/.ci_support/win_64_channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6is_rcTrue.yaml @@ -7,7 +7,7 @@ c_stdlib: channel_sources: - conda-forge channel_targets: -- conda-forge main +- conda-forge pytorch_rc cuda_compiler: - cuda-nvcc cuda_compiler_version: @@ -19,7 +19,7 @@ cxx_compiler: github_actions_labels: - cirun-azure-windows-2xlarge is_rc: -- 'False' +- 'True' libabseil: - '20240722' libprotobuf: diff --git a/.github/workflows/conda-build.yml b/.github/workflows/conda-build.yml index 40a005a95..1714a477f 100644 --- a/.github/workflows/conda-build.yml +++ b/.github/workflows/conda-build.yml @@ -21,50 +21,50 @@ jobs: fail-fast: false matrix: include: - - CONFIG: linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse + - CONFIG: linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue UPLOAD_PACKAGES: True os: ubuntu - runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_64_blas_implgenericc_compiler_hb59e174a3e', 'linux', 'x64', 'self-hosted'] + runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_64_blas_implgenericc_compiler_hfd3c9e418e', 'linux', 'x64', 'self-hosted'] DOCKER_IMAGE: quay.io/condaforge/linux-anvil-x86_64:alma9 CONDA_FORGE_DOCKER_RUN_ARGS: "--gpus all" - - CONFIG: linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse + - CONFIG: linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue UPLOAD_PACKAGES: True os: ubuntu - runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_64_blas_implgenericc_compiler_h5e2cde8be1', 'linux', 'x64', 'self-hosted'] + runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_64_blas_implgenericc_compiler_h3d69e44d93', 'linux', 'x64', 'self-hosted'] DOCKER_IMAGE: quay.io/condaforge/linux-anvil-x86_64:alma9 CONDA_FORGE_DOCKER_RUN_ARGS: "--gpus all" - - CONFIG: linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse + - CONFIG: linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue UPLOAD_PACKAGES: True os: ubuntu - runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_64_blas_implmklc_compiler_ver_h0b96eb68c6', 'linux', 'x64', 'self-hosted'] + runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_64_blas_implmklc_compiler_ver_hbf2b41cf9b', 'linux', 'x64', 'self-hosted'] DOCKER_IMAGE: quay.io/condaforge/linux-anvil-x86_64:alma9 CONDA_FORGE_DOCKER_RUN_ARGS: "--gpus all" - - CONFIG: linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse + - CONFIG: linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue UPLOAD_PACKAGES: True os: ubuntu - runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_64_blas_implmklc_compiler_ver_hc39dedf959', 'linux', 'x64', 'self-hosted'] + runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_64_blas_implmklc_compiler_ver_hbac2c29721', 'linux', 'x64', 'self-hosted'] DOCKER_IMAGE: quay.io/condaforge/linux-anvil-x86_64:alma9 CONDA_FORGE_DOCKER_RUN_ARGS: "--gpus all" - - CONFIG: linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse + - CONFIG: linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue UPLOAD_PACKAGES: True os: ubuntu - runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_aarch64_c_compiler_version13c_h352ed12235', 'linux', 'x64', 'self-hosted'] + runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_aarch64_c_compiler_version13c_h20ac0d977f', 'linux', 'x64', 'self-hosted'] DOCKER_IMAGE: quay.io/condaforge/linux-anvil-x86_64:alma9 CONDA_FORGE_DOCKER_RUN_ARGS: "--gpus all" - - CONFIG: linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse + - CONFIG: linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue UPLOAD_PACKAGES: True os: ubuntu - runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_aarch64_c_compiler_version13c_h94771c6c09', 'linux', 'x64', 'self-hosted'] + runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_aarch64_c_compiler_version13c_hc16130462f', 'linux', 'x64', 'self-hosted'] DOCKER_IMAGE: quay.io/condaforge/linux-anvil-x86_64:alma9 CONDA_FORGE_DOCKER_RUN_ARGS: "--gpus all" - - CONFIG: win_64_channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNoneis_rcFalse + - CONFIG: win_64_channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNoneis_rcTrue UPLOAD_PACKAGES: True os: windows - runs_on: ['cirun-azure-windows-2xlarge--${{ github.run_id }}-win_64_channel_targetsconda-forge_m_h0b759d4e63', 'windows', 'x64', 'self-hosted'] - - CONFIG: win_64_channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6is_rcFalse + runs_on: ['cirun-azure-windows-2xlarge--${{ github.run_id }}-win_64_channel_targetsconda-forge_p_h4ffee7b9ab', 'windows', 'x64', 'self-hosted'] + - CONFIG: win_64_channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6is_rcTrue UPLOAD_PACKAGES: True os: windows - runs_on: ['cirun-azure-windows-2xlarge--${{ github.run_id }}-win_64_channel_targetsconda-forge_m_h790f30616b', 'windows', 'x64', 'self-hosted'] + runs_on: ['cirun-azure-windows-2xlarge--${{ github.run_id }}-win_64_channel_targetsconda-forge_p_hc6d99508db', 'windows', 'x64', 'self-hosted'] steps: - name: Checkout code diff --git a/README.md b/README.md index 0047ae415..febc622f7 100644 --- a/README.md +++ b/README.md @@ -29,164 +29,164 @@ Current build status - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -210,14 +210,14 @@ Current release info Installing pytorch-cpu ====================== -Installing `pytorch-cpu` from the `conda-forge` channel can be achieved by adding `conda-forge` to your channels with: +Installing `pytorch-cpu` from the `conda-forge/label/pytorch_rc` channel can be achieved by adding `conda-forge/label/pytorch_rc` to your channels with: ``` -conda config --add channels conda-forge +conda config --add channels conda-forge/label/pytorch_rc conda config --set channel_priority strict ``` -Once the `conda-forge` channel has been enabled, `libtorch, pytorch, pytorch-cpu, pytorch-gpu` can be installed with `conda`: +Once the `conda-forge/label/pytorch_rc` channel has been enabled, `libtorch, pytorch, pytorch-cpu, pytorch-gpu` can be installed with `conda`: ``` conda install libtorch pytorch pytorch-cpu pytorch-gpu @@ -232,26 +232,26 @@ mamba install libtorch pytorch pytorch-cpu pytorch-gpu It is possible to list all of the versions of `libtorch` available on your platform with `conda`: ``` -conda search libtorch --channel conda-forge +conda search libtorch --channel conda-forge/label/pytorch_rc ``` or with `mamba`: ``` -mamba search libtorch --channel conda-forge +mamba search libtorch --channel conda-forge/label/pytorch_rc ``` Alternatively, `mamba repoquery` may provide more information: ``` # Search all versions available on your platform: -mamba repoquery search libtorch --channel conda-forge +mamba repoquery search libtorch --channel conda-forge/label/pytorch_rc # List packages depending on `libtorch`: -mamba repoquery whoneeds libtorch --channel conda-forge +mamba repoquery whoneeds libtorch --channel conda-forge/label/pytorch_rc # List dependencies of `libtorch`: -mamba repoquery depends libtorch --channel conda-forge +mamba repoquery depends libtorch --channel conda-forge/label/pytorch_rc ``` From 1219e2688dc1e72548e159f425ae8e2faf9da5d3 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Sat, 18 Jan 2025 14:40:11 +0100 Subject: [PATCH 03/58] rebase patches --- recipe/meta.yaml | 31 ++----- ...-of-python-3-and-error-without-numpy.patch | 10 +-- recipe/patches/0002-Help-find-numpy.patch | 8 +- ...03-Add-USE_SYSTEM_NVTX-option-138287.patch | 86 ------------------- ....patch => 0003-Update-sympy-version.patch} | 12 +-- ...=> 0004-Fix-duplicate-linker-script.patch} | 8 +- ...05-Allow-libcufile-for-conda-builds.patch} | 6 +- ...Allow-overriding-CUDA-related-paths.patch} | 8 +- ...kle-error-in-serialization.py-136034.patch | 46 ---------- ...verwrite-ld-with-environment-variabl.patch | 32 ------- ...S_USE_CBLAS_DOT-for-OpenBLAS-builds.patch} | 8 +- ...2484.patch => 0008-fix-issue-142484.patch} | 8 +- ...BLAS.patch => 0009-Fix-FindOpenBLAS.patch} | 4 +- ...st-test_linalg.py-for-NumPy-2-136800.patch | 77 ----------------- ...est-failures-in-test_torch.py-137740.patch | 60 ------------- ...Enable-Python-3.13-on-windows-138095.patch | 52 ----------- 16 files changed, 45 insertions(+), 411 deletions(-) delete mode 100644 recipe/patches/0003-Add-USE_SYSTEM_NVTX-option-138287.patch rename recipe/patches/{0004-Update-sympy-version.patch => 0003-Update-sympy-version.patch} (62%) rename recipe/patches/{0005-Fix-duplicate-linker-script.patch => 0004-Fix-duplicate-linker-script.patch} (80%) rename recipe/patches/{0008-Allow-libcufile-for-conda-builds.patch => 0005-Allow-libcufile-for-conda-builds.patch} (83%) rename recipe/patches/{0009-Allow-overriding-CUDA-related-paths.patch => 0006-Allow-overriding-CUDA-related-paths.patch} (89%) delete mode 100644 recipe/patches/0006-fix-3.13-pickle-error-in-serialization.py-136034.patch delete mode 100644 recipe/patches/0007-Allow-users-to-overwrite-ld-with-environment-variabl.patch rename recipe/patches/{0012-Use-BLAS_USE_CBLAS_DOT-for-OpenBLAS-builds.patch => 0007-Use-BLAS_USE_CBLAS_DOT-for-OpenBLAS-builds.patch} (86%) rename recipe/patches/{0013-fix-issue-142484.patch => 0008-fix-issue-142484.patch} (83%) rename recipe/patches/{0014-Fix-FindOpenBLAS.patch => 0009-Fix-FindOpenBLAS.patch} (93%) delete mode 100644 recipe/patches/0010-Fix-test-test_linalg.py-for-NumPy-2-136800.patch delete mode 100644 recipe/patches/0011-Fixes-NumPy-2-test-failures-in-test_torch.py-137740.patch delete mode 100644 recipe/patches/0015-CD-Enable-Python-3.13-on-windows-138095.patch diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 2aa1d6123..cd90e57f4 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -33,36 +33,23 @@ source: {% endif %} patches: - patches/0001-Force-usage-of-python-3-and-error-without-numpy.patch - # https://github.com/pytorch/pytorch/pull/137084 + # backport https://github.com/pytorch/pytorch/pull/137084 - patches/0002-Help-find-numpy.patch - # https://github.com/pytorch/pytorch/pull/138287 - - patches/0003-Add-USE_SYSTEM_NVTX-option-138287.patch - # sympy 1.13.2 was reported to result in test failures on Windows and mac + # unpin sympy; 1.13.2 was reported to result in test failures on Windows and mac, see # https://github.com/pytorch/pytorch/pull/133235 - - patches/0004-Update-sympy-version.patch - - patches/0005-Fix-duplicate-linker-script.patch # [cuda_compiler_version != "None" and aarch64] - # https://github.com/pytorch/pytorch/pull/136034 - - patches/0006-fix-3.13-pickle-error-in-serialization.py-136034.patch - # https://github.com/pytorch/pytorch/pull/137331 - - patches/0007-Allow-users-to-overwrite-ld-with-environment-variabl.patch + - patches/0003-Update-sympy-version.patch + - patches/0004-Fix-duplicate-linker-script.patch # [cuda_compiler_version != "None" and aarch64] # conda-specific patch, upstream force-disables libcufile w/ TH_BINARY_BUILD # for their PyPI wheel builds - - patches/0008-Allow-libcufile-for-conda-builds.patch + - patches/0005-Allow-libcufile-for-conda-builds.patch # conda-specific patch, lets us override CUDA paths - - patches/0009-Allow-overriding-CUDA-related-paths.patch - # NumPy 2 fixes: - # https://github.com/pytorch/pytorch/pull/136800 - - patches/0010-Fix-test-test_linalg.py-for-NumPy-2-136800.patch - # https://github.com/pytorch/pytorch/pull/137740 - - patches/0011-Fixes-NumPy-2-test-failures-in-test_torch.py-137740.patch + - patches/0006-Allow-overriding-CUDA-related-paths.patch # fix BLAS calling convention for openblas - - patches/0012-Use-BLAS_USE_CBLAS_DOT-for-OpenBLAS-builds.patch + - patches/0007-Use-BLAS_USE_CBLAS_DOT-for-OpenBLAS-builds.patch # fix mkl-2024 issue # https://github.com/pytorch/pytorch/pull/143894 - - patches/0013-fix-issue-142484.patch - - patches/0014-Fix-FindOpenBLAS.patch - # backport https://github.com/pytorch/pytorch/pull/138095 - - patches/0015-CD-Enable-Python-3.13-on-windows-138095.patch + - patches/0008-fix-issue-142484.patch + - patches/0009-Fix-FindOpenBLAS.patch build: number: {{ build }} diff --git a/recipe/patches/0001-Force-usage-of-python-3-and-error-without-numpy.patch b/recipe/patches/0001-Force-usage-of-python-3-and-error-without-numpy.patch index fab6479d1..323729361 100644 --- a/recipe/patches/0001-Force-usage-of-python-3-and-error-without-numpy.patch +++ b/recipe/patches/0001-Force-usage-of-python-3-and-error-without-numpy.patch @@ -1,17 +1,17 @@ -From f3a0f9aab6dce56eea590b946f60256014b61bf7 Mon Sep 17 00:00:00 2001 +From c0d40d9ba90599a42e5880ef6d1e7efaee13556a Mon Sep 17 00:00:00 2001 From: Mark Harfouche Date: Sun, 1 Sep 2024 17:35:40 -0400 -Subject: [PATCH 01/15] Force usage of python 3 and error without numpy +Subject: [PATCH 1/9] Force usage of python 3 and error without numpy --- cmake/Dependencies.cmake | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake -index e78305e0a8e..15c62548601 100644 +index 1813f4418a2..36b507f4f6e 100644 --- a/cmake/Dependencies.cmake +++ b/cmake/Dependencies.cmake -@@ -861,9 +861,9 @@ if(BUILD_PYTHON) +@@ -848,9 +848,9 @@ if(BUILD_PYTHON) if(USE_NUMPY) list(APPEND PYTHON_COMPONENTS NumPy) endif() @@ -23,7 +23,7 @@ index e78305e0a8e..15c62548601 100644 endif() if(NOT Python_Interpreter_FOUND) -@@ -880,7 +880,7 @@ if(BUILD_PYTHON) +@@ -867,7 +867,7 @@ if(BUILD_PYTHON) if(Python_Development.Module_FOUND) if(USE_NUMPY) if(NOT Python_NumPy_FOUND) diff --git a/recipe/patches/0002-Help-find-numpy.patch b/recipe/patches/0002-Help-find-numpy.patch index 58792c4b0..31eaf64b1 100644 --- a/recipe/patches/0002-Help-find-numpy.patch +++ b/recipe/patches/0002-Help-find-numpy.patch @@ -1,17 +1,17 @@ -From 21c30036b5b86f403c0cf4426165d9a6a50edb1a Mon Sep 17 00:00:00 2001 +From 74b0cdb8494a70ba7598fe1c08e7c64f6d069b38 Mon Sep 17 00:00:00 2001 From: Mark Harfouche Date: Tue, 1 Oct 2024 00:28:40 -0400 -Subject: [PATCH 02/15] Help find numpy +Subject: [PATCH 2/9] Help find numpy --- tools/setup_helpers/cmake.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/setup_helpers/cmake.py b/tools/setup_helpers/cmake.py -index 4b605fe5975..bde41323c76 100644 +index 84e4dad32d3..8ce7272bea8 100644 --- a/tools/setup_helpers/cmake.py +++ b/tools/setup_helpers/cmake.py -@@ -305,9 +305,15 @@ class CMake: +@@ -306,9 +306,15 @@ class CMake: sys.exit(1) build_options.update(cmake__options) diff --git a/recipe/patches/0003-Add-USE_SYSTEM_NVTX-option-138287.patch b/recipe/patches/0003-Add-USE_SYSTEM_NVTX-option-138287.patch deleted file mode 100644 index 38baefeb5..000000000 --- a/recipe/patches/0003-Add-USE_SYSTEM_NVTX-option-138287.patch +++ /dev/null @@ -1,86 +0,0 @@ -From d1826af525db41eda5020a1404f5d5521d67a5dc Mon Sep 17 00:00:00 2001 -From: Jeongseok Lee -Date: Sat, 19 Oct 2024 04:26:01 +0000 -Subject: [PATCH 03/15] Add USE_SYSTEM_NVTX option (#138287) - -## Summary - -We are currently [updating](https://github.com/conda-forge/pytorch-cpu-feedstock/pull/277) the [`conda-forge::pytorch`](https://anaconda.org/conda-forge/pytorch) package to version 2.5.0. This update includes a new dependency, the third_party/NVTX submodule. However, like other package management frameworks (e.g., apt), conda-forge prefers using system-installed packages instead of vendor-provided third-party packages. - -This pull request aims to add an option, `USE_SYSTEM_NVTX`, to select whether to use the vendored nvtx or the system-installed one, with the default being the vendored one (which is the current behavior). - -## Test Plan - -The `USE_SYSTEM_NVTX` option is tested by building the `conda-forge::pytorch` package with the change applied as a [patch](https://github.com/conda-forge/pytorch-cpu-feedstock/blob/cd1d2464dd14e48ae4bd2214e6885e2432de483e/recipe/patches/0005-Use-system-nvtx3.patch). -Pull Request resolved: https://github.com/pytorch/pytorch/pull/138287 -Approved by: https://github.com/albanD ---- - CMakeLists.txt | 2 ++ - cmake/public/cuda.cmake | 6 +++++- - setup.py | 16 +++++++++++++++- - 3 files changed, 22 insertions(+), 2 deletions(-) - -diff --git a/CMakeLists.txt b/CMakeLists.txt -index 98593c2de97..ae3c3f2cbd5 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -470,6 +470,7 @@ option(USE_SYSTEM_FXDIV "Use system-provided fxdiv." OFF) - option(USE_SYSTEM_BENCHMARK "Use system-provided google benchmark." OFF) - option(USE_SYSTEM_ONNX "Use system-provided onnx." OFF) - option(USE_SYSTEM_XNNPACK "Use system-provided xnnpack." OFF) -+OPTION(USE_SYSTEM_NVTX "Use system-provided nvtx." OFF) - option(USE_GOLD_LINKER "Use ld.gold to link" OFF) - if(USE_SYSTEM_LIBS) - set(USE_SYSTEM_CPUINFO ON) -@@ -488,6 +489,7 @@ if(USE_SYSTEM_LIBS) - if(USE_NCCL) - set(USE_SYSTEM_NCCL ON) - endif() -+ set(USE_SYSTEM_NVTX ON) - endif() - - # /Z7 override option When generating debug symbols, CMake default to use the -diff --git a/cmake/public/cuda.cmake b/cmake/public/cuda.cmake -index afc1bc12abf..152fbdbe6dd 100644 ---- a/cmake/public/cuda.cmake -+++ b/cmake/public/cuda.cmake -@@ -170,7 +170,11 @@ else() - endif() - - # nvToolsExt --find_path(nvtx3_dir NAMES nvtx3 PATHS "${PROJECT_SOURCE_DIR}/third_party/NVTX/c/include" NO_DEFAULT_PATH) -+if(USE_SYSTEM_NVTX) -+ find_path(nvtx3_dir NAMES nvtx3) -+else() -+ find_path(nvtx3_dir NAMES nvtx3 PATHS "${PROJECT_SOURCE_DIR}/third_party/NVTX/c/include" NO_DEFAULT_PATH) -+endif() - find_package_handle_standard_args(nvtx3 DEFAULT_MSG nvtx3_dir) - if(nvtx3_FOUND) - add_library(torch::nvtx3 INTERFACE IMPORTED) -diff --git a/setup.py b/setup.py -index 2b0cfa99d71..7174777ed4e 100644 ---- a/setup.py -+++ b/setup.py -@@ -183,7 +183,21 @@ - # USE_SYSTEM_LIBS (work in progress) - # Use system-provided libraries to satisfy the build dependencies. - # When turned on, the following cmake variables will be toggled as well: --# USE_SYSTEM_CPUINFO=ON USE_SYSTEM_SLEEF=ON BUILD_CUSTOM_PROTOBUF=OFF -+# USE_SYSTEM_CPUINFO=ON -+# USE_SYSTEM_SLEEF=ON -+# USE_SYSTEM_GLOO=ON -+# BUILD_CUSTOM_PROTOBUF=OFF -+# USE_SYSTEM_EIGEN_INSTALL=ON -+# USE_SYSTEM_FP16=ON -+# USE_SYSTEM_PTHREADPOOL=ON -+# USE_SYSTEM_PSIMD=ON -+# USE_SYSTEM_FXDIV=ON -+# USE_SYSTEM_BENCHMARK=ON -+# USE_SYSTEM_ONNX=ON -+# USE_SYSTEM_XNNPACK=ON -+# USE_SYSTEM_PYBIND11=ON -+# USE_SYSTEM_NCCL=ON -+# USE_SYSTEM_NVTX=ON - # - # USE_MIMALLOC - # Static link mimalloc into C10, and use mimalloc in alloc_cpu & alloc_free. diff --git a/recipe/patches/0004-Update-sympy-version.patch b/recipe/patches/0003-Update-sympy-version.patch similarity index 62% rename from recipe/patches/0004-Update-sympy-version.patch rename to recipe/patches/0003-Update-sympy-version.patch index 3767fdfc6..95349359c 100644 --- a/recipe/patches/0004-Update-sympy-version.patch +++ b/recipe/patches/0003-Update-sympy-version.patch @@ -1,20 +1,20 @@ -From e3219c5fe8834753b0cf9e92be4d1ef1e874f370 Mon Sep 17 00:00:00 2001 +From a0961062612e79f407eeed1779a52faed52f1672 Mon Sep 17 00:00:00 2001 From: Jeongseok Lee Date: Thu, 17 Oct 2024 15:04:05 -0700 -Subject: [PATCH 04/15] Update sympy version +Subject: [PATCH 3/9] Update sympy version --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py -index 7174777ed4e..65be34e39b1 100644 +index a6a6db7b033..d73cec7dd86 100644 --- a/setup.py +++ b/setup.py -@@ -1158,7 +1158,7 @@ def main(): - "typing-extensions>=4.8.0", +@@ -1099,7 +1099,7 @@ def main(): + "filelock", + "typing-extensions>=4.10.0", 'setuptools ; python_version >= "3.12"', - 'sympy==1.12.1 ; python_version == "3.8"', - 'sympy==1.13.1 ; python_version >= "3.9"', + 'sympy>=1.13.1,!=1.13.2 ; python_version >= "3.9"', "networkx", diff --git a/recipe/patches/0005-Fix-duplicate-linker-script.patch b/recipe/patches/0004-Fix-duplicate-linker-script.patch similarity index 80% rename from recipe/patches/0005-Fix-duplicate-linker-script.patch rename to recipe/patches/0004-Fix-duplicate-linker-script.patch index 9fc6adab2..915531210 100644 --- a/recipe/patches/0005-Fix-duplicate-linker-script.patch +++ b/recipe/patches/0004-Fix-duplicate-linker-script.patch @@ -1,17 +1,17 @@ -From 08a1f44fbc81324aa98d720dfb7b87a261923ac2 Mon Sep 17 00:00:00 2001 +From 7c9eee174d67055308c6eae50c6e9e387084b730 Mon Sep 17 00:00:00 2001 From: Jeongseok Lee Date: Sun, 3 Nov 2024 01:12:36 -0700 -Subject: [PATCH 05/15] Fix duplicate linker script +Subject: [PATCH 4/9] Fix duplicate linker script --- setup.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py -index 65be34e39b1..b0e01e0d1ee 100644 +index d73cec7dd86..75fdfce7e35 100644 --- a/setup.py +++ b/setup.py -@@ -1184,7 +1184,9 @@ def main(): +@@ -1125,7 +1125,9 @@ def main(): filein="cmake/prioritized_text.txt", fout="cmake/linker_script.ld" ) linker_script_path = os.path.abspath("cmake/linker_script.ld") diff --git a/recipe/patches/0008-Allow-libcufile-for-conda-builds.patch b/recipe/patches/0005-Allow-libcufile-for-conda-builds.patch similarity index 83% rename from recipe/patches/0008-Allow-libcufile-for-conda-builds.patch rename to recipe/patches/0005-Allow-libcufile-for-conda-builds.patch index 95c7a842b..870102b56 100644 --- a/recipe/patches/0008-Allow-libcufile-for-conda-builds.patch +++ b/recipe/patches/0005-Allow-libcufile-for-conda-builds.patch @@ -1,14 +1,14 @@ -From 2578bf017b0453c8eb028a09b7523fb150429eca Mon Sep 17 00:00:00 2001 +From 590138a7a544d10ae997cdca3177b74d61374ebf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Fri, 22 Nov 2024 17:50:53 +0100 -Subject: [PATCH 08/15] Allow libcufile for conda builds +Subject: [PATCH 5/9] Allow libcufile for conda builds --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt -index ae3c3f2cbd5..3a845151cf8 100644 +index c8af5f00b5c..2361613e522 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -255,7 +255,7 @@ cmake_dependent_option(USE_CUDSS "Use cuDSS" ON "USE_CUDA" OFF) diff --git a/recipe/patches/0009-Allow-overriding-CUDA-related-paths.patch b/recipe/patches/0006-Allow-overriding-CUDA-related-paths.patch similarity index 89% rename from recipe/patches/0009-Allow-overriding-CUDA-related-paths.patch rename to recipe/patches/0006-Allow-overriding-CUDA-related-paths.patch index 8c09887b7..19ab5e05c 100644 --- a/recipe/patches/0009-Allow-overriding-CUDA-related-paths.patch +++ b/recipe/patches/0006-Allow-overriding-CUDA-related-paths.patch @@ -1,7 +1,7 @@ -From 52e530f222f2d30531c8da889695ac2674964245 Mon Sep 17 00:00:00 2001 +From b1020cc01cda16552b0b7960849db80b8f77b7d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Wed, 27 Nov 2024 13:47:23 +0100 -Subject: [PATCH 09/15] Allow overriding CUDA-related paths +Subject: [PATCH 6/9] Allow overriding CUDA-related paths --- cmake/Modules/FindCUDAToolkit.cmake | 2 +- @@ -22,10 +22,10 @@ index ec9ae530aa6..b7c0bd9fc51 100644 set(CUDAToolkit_LIBRARY_ROOT "${CMAKE_CUDA_COMPILER_LIBRARY_ROOT}") set(CUDAToolkit_VERSION "${CMAKE_CUDA_COMPILER_TOOLKIT_VERSION}") diff --git a/tools/setup_helpers/cmake.py b/tools/setup_helpers/cmake.py -index bde41323c76..b171837cd4a 100644 +index 8ce7272bea8..67b2b495c62 100644 --- a/tools/setup_helpers/cmake.py +++ b/tools/setup_helpers/cmake.py -@@ -252,7 +252,7 @@ class CMake: +@@ -253,7 +253,7 @@ class CMake: true_var = additional_options.get(var) if true_var is not None: build_options[true_var] = val diff --git a/recipe/patches/0006-fix-3.13-pickle-error-in-serialization.py-136034.patch b/recipe/patches/0006-fix-3.13-pickle-error-in-serialization.py-136034.patch deleted file mode 100644 index d4253916a..000000000 --- a/recipe/patches/0006-fix-3.13-pickle-error-in-serialization.py-136034.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 15df314a41c69a31c0443254d5552aa1b39d708d Mon Sep 17 00:00:00 2001 -From: William Wen -Date: Fri, 13 Sep 2024 13:02:33 -0700 -Subject: [PATCH 06/15] fix 3.13 pickle error in serialization.py (#136034) - -Error encountered when adding dynamo 3.13 support. -Pull Request resolved: https://github.com/pytorch/pytorch/pull/136034 -Approved by: https://github.com/albanD ---- - torch/serialization.py | 16 ++++++++++++---- - 1 file changed, 12 insertions(+), 4 deletions(-) - -diff --git a/torch/serialization.py b/torch/serialization.py -index d936d31d6f5..d937680c031 100644 ---- a/torch/serialization.py -+++ b/torch/serialization.py -@@ -1005,8 +1005,12 @@ def _legacy_save(obj, f, pickle_module, pickle_protocol) -> None: - pickle_module.dump(MAGIC_NUMBER, f, protocol=pickle_protocol) - pickle_module.dump(PROTOCOL_VERSION, f, protocol=pickle_protocol) - pickle_module.dump(sys_info, f, protocol=pickle_protocol) -- pickler = pickle_module.Pickler(f, protocol=pickle_protocol) -- pickler.persistent_id = persistent_id -+ -+ class PyTorchLegacyPickler(pickle_module.Pickler): -+ def persistent_id(self, obj): -+ return persistent_id(obj) -+ -+ pickler = PyTorchLegacyPickler(f, protocol=pickle_protocol) - pickler.dump(obj) - - serialized_storage_keys = sorted(serialized_storages.keys()) -@@ -1083,8 +1087,12 @@ def _save( - - # Write the pickle data for `obj` - data_buf = io.BytesIO() -- pickler = pickle_module.Pickler(data_buf, protocol=pickle_protocol) -- pickler.persistent_id = persistent_id -+ -+ class PyTorchPickler(pickle_module.Pickler): # type: ignore[name-defined] -+ def persistent_id(self, obj): -+ return persistent_id(obj) -+ -+ pickler = PyTorchPickler(data_buf, protocol=pickle_protocol) - pickler.dump(obj) - data_value = data_buf.getvalue() - zip_file.write_record("data.pkl", data_value, len(data_value)) diff --git a/recipe/patches/0007-Allow-users-to-overwrite-ld-with-environment-variabl.patch b/recipe/patches/0007-Allow-users-to-overwrite-ld-with-environment-variabl.patch deleted file mode 100644 index 514982cc7..000000000 --- a/recipe/patches/0007-Allow-users-to-overwrite-ld-with-environment-variabl.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 655f694854c3eafdd631235b60bc6c1b279218ed Mon Sep 17 00:00:00 2001 -From: Mark Harfouche -Date: Thu, 3 Oct 2024 22:49:56 -0400 -Subject: [PATCH 07/15] Allow users to overwrite ld with environment variables - -This should help in the case of cross compilation. - -xref: https://github.com/conda-forge/pytorch-cpu-feedstock/pull/261 ---- - tools/setup_helpers/generate_linker_script.py | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - -diff --git a/tools/setup_helpers/generate_linker_script.py b/tools/setup_helpers/generate_linker_script.py -index 11c397a9e5f..e66fc197062 100644 ---- a/tools/setup_helpers/generate_linker_script.py -+++ b/tools/setup_helpers/generate_linker_script.py -@@ -1,3 +1,4 @@ -+import os - import subprocess - - -@@ -9,8 +10,8 @@ def gen_linker_script( - prioritized_text = [ - line.replace("\n", "") for line in prioritized_text if line != "\n" - ] -- -- linker_script_lines = subprocess.check_output(["ld", "-verbose"], text=True).split( -+ ld = os.environ.get("LD", "ld") -+ linker_script_lines = subprocess.check_output([ld, "-verbose"], text=True).split( - "\n" - ) - diff --git a/recipe/patches/0012-Use-BLAS_USE_CBLAS_DOT-for-OpenBLAS-builds.patch b/recipe/patches/0007-Use-BLAS_USE_CBLAS_DOT-for-OpenBLAS-builds.patch similarity index 86% rename from recipe/patches/0012-Use-BLAS_USE_CBLAS_DOT-for-OpenBLAS-builds.patch rename to recipe/patches/0007-Use-BLAS_USE_CBLAS_DOT-for-OpenBLAS-builds.patch index 4d2159dd5..688fb56a4 100644 --- a/recipe/patches/0012-Use-BLAS_USE_CBLAS_DOT-for-OpenBLAS-builds.patch +++ b/recipe/patches/0007-Use-BLAS_USE_CBLAS_DOT-for-OpenBLAS-builds.patch @@ -1,7 +1,7 @@ -From e74a39d09953d4c4dd293e167121cfe83fb172ec Mon Sep 17 00:00:00 2001 +From 1e44d97374c4bf42f5074b7d01d30a78837857b1 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 18 Dec 2024 03:59:00 +0000 -Subject: [PATCH 12/15] Use BLAS_USE_CBLAS_DOT for OpenBLAS builds +Subject: [PATCH 7/9] Use BLAS_USE_CBLAS_DOT for OpenBLAS builds There are two calling conventions for *dotu functions @@ -31,10 +31,10 @@ functional calls. 1 file changed, 1 insertion(+) diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake -index 15c62548601..3965416eb29 100644 +index 36b507f4f6e..b94993c34ba 100644 --- a/cmake/Dependencies.cmake +++ b/cmake/Dependencies.cmake -@@ -182,6 +182,7 @@ elseif(BLAS STREQUAL "OpenBLAS") +@@ -181,6 +181,7 @@ elseif(BLAS STREQUAL "OpenBLAS") set(BLAS_INFO "open") set(BLAS_FOUND 1) set(BLAS_LIBRARIES ${OpenBLAS_LIB}) diff --git a/recipe/patches/0013-fix-issue-142484.patch b/recipe/patches/0008-fix-issue-142484.patch similarity index 83% rename from recipe/patches/0013-fix-issue-142484.patch rename to recipe/patches/0008-fix-issue-142484.patch index 528b0371e..239870f61 100644 --- a/recipe/patches/0013-fix-issue-142484.patch +++ b/recipe/patches/0008-fix-issue-142484.patch @@ -1,7 +1,7 @@ -From 67b122f715e93592f1d7913ab970619f7b571b96 Mon Sep 17 00:00:00 2001 +From 8dc9af0ab842c42647a25de94f75a533fe541037 Mon Sep 17 00:00:00 2001 From: "Zheng, Zhaoqiong" Date: Fri, 27 Dec 2024 13:49:36 +0800 -Subject: [PATCH 13/15] fix issue 142484 +Subject: [PATCH 8/9] fix issue 142484 From https://github.com/pytorch/pytorch/pull/143894 --- @@ -9,10 +9,10 @@ From https://github.com/pytorch/pytorch/pull/143894 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/aten/src/ATen/native/mkl/SpectralOps.cpp b/aten/src/ATen/native/mkl/SpectralOps.cpp -index e26cfbf6d8e..c61b76d3205 100644 +index 3d777ecdcf8..2227e492dea 100644 --- a/aten/src/ATen/native/mkl/SpectralOps.cpp +++ b/aten/src/ATen/native/mkl/SpectralOps.cpp -@@ -477,7 +477,17 @@ static Tensor& _exec_fft(Tensor& out, const Tensor& self, IntArrayRef out_sizes, +@@ -478,7 +478,17 @@ static Tensor& _exec_fft(Tensor& out, const Tensor& self, IntArrayRef out_sizes, const auto value_type = c10::toRealValueType(input.scalar_type()); out.resize_(batched_out_sizes, MemoryFormat::Contiguous); diff --git a/recipe/patches/0014-Fix-FindOpenBLAS.patch b/recipe/patches/0009-Fix-FindOpenBLAS.patch similarity index 93% rename from recipe/patches/0014-Fix-FindOpenBLAS.patch rename to recipe/patches/0009-Fix-FindOpenBLAS.patch index 33fb2ffb9..35a31a181 100644 --- a/recipe/patches/0014-Fix-FindOpenBLAS.patch +++ b/recipe/patches/0009-Fix-FindOpenBLAS.patch @@ -1,7 +1,7 @@ -From a6924f47769d3d068b6d8e40686733b7ff32544e Mon Sep 17 00:00:00 2001 +From d017c04f30b4e05ebc716eeea6dc757ba51b9ba3 Mon Sep 17 00:00:00 2001 From: Bas Zalmstra Date: Thu, 16 May 2024 10:46:49 +0200 -Subject: [PATCH 14/15] Fix FindOpenBLAS +Subject: [PATCH 9/9] Fix FindOpenBLAS --- cmake/Modules/FindOpenBLAS.cmake | 15 +++++++++------ diff --git a/recipe/patches/0010-Fix-test-test_linalg.py-for-NumPy-2-136800.patch b/recipe/patches/0010-Fix-test-test_linalg.py-for-NumPy-2-136800.patch deleted file mode 100644 index 4dcc58a8a..000000000 --- a/recipe/patches/0010-Fix-test-test_linalg.py-for-NumPy-2-136800.patch +++ /dev/null @@ -1,77 +0,0 @@ -From db0902077a0ef68d0a48ae42f3b60f9cff7b2248 Mon Sep 17 00:00:00 2001 -From: Haifeng Jin -Date: Tue, 1 Oct 2024 07:53:24 +0000 -Subject: [PATCH 10/15] Fix test/test_linalg.py for NumPy 2 (#136800) - -Related to #107302. - -When built and tested with NumPy 2 the following unit tests failed. - -``` -=========================================================== short test summary info ============================================================ -FAILED [0.0026s] test/test_linalg.py::TestLinalgCPU::test_householder_product_cpu_complex128 - TypeError: expected np.ndarray (got Tensor) -FAILED [0.0024s] test/test_linalg.py::TestLinalgCPU::test_householder_product_cpu_complex64 - TypeError: expected np.ndarray (got Tensor) -FAILED [0.0025s] test/test_linalg.py::TestLinalgCPU::test_householder_product_cpu_float32 - TypeError: expected np.ndarray (got Tensor) -FAILED [0.0024s] test/test_linalg.py::TestLinalgCPU::test_householder_product_cpu_float64 - TypeError: expected np.ndarray (got Tensor) -FAILED [0.0016s] test/test_linalg.py::TestLinalgCPU::test_nuclear_norm_axes_small_brute_force_old_cpu - ValueError: Unable to avoid copy while creating an array as requested. -FAILED [0.0054s] test/test_linalg.py::TestLinalgCPU::test_solve_cpu_complex128 - AssertionError: The values for attribute 'shape' do not match: torch.Size([0, 0]) != torch.Size([0, 0, 0]). -FAILED [0.0055s] test/test_linalg.py::TestLinalgCPU::test_solve_cpu_complex64 - AssertionError: The values for attribute 'shape' do not match: torch.Size([0, 0]) != torch.Size([0, 0, 0]). -FAILED [0.0048s] test/test_linalg.py::TestLinalgCPU::test_solve_cpu_float32 - AssertionError: The values for attribute 'shape' do not match: torch.Size([0, 0]) != torch.Size([0, 0, 0]). -FAILED [0.0054s] test/test_linalg.py::TestLinalgCPU::test_solve_cpu_float64 - AssertionError: The values for attribute 'shape' do not match: torch.Size([0, 0]) != torch.Size([0, 0, 0]). -=========================================== 9 failed, 1051 passed, 118 skipped in 152.51s (0:02:32) ============================================ -``` - -This PR fixes them. The test is now compatible with both NumPy 1 & 2. - -Some more details: - -1. The `np.linalg.solve` has changed its behavior. So I added an adapt function in the unit test to keep its behavior the same no matter it is NumPy 1 or Numpy 2. -2. The cause of the failure is when passing a `torch.Tensor` to `np.linalg.qr`, the return type in NumPy 1 is `(np.ndarray, np.ndarray)`, while it is `(torch.Tensor, torch.Tensor)` in NumPy 2. -3. NumPy 2 does not allow `np.array(obj, copy=False)`, but recommended to use `np.asarray(obj)` instead. - -Pull Request resolved: https://github.com/pytorch/pytorch/pull/136800 -Approved by: https://github.com/lezcano ---- - test/test_linalg.py | 15 ++++++++++++--- - 1 file changed, 12 insertions(+), 3 deletions(-) - -diff --git a/test/test_linalg.py b/test/test_linalg.py -index e9ec874d695..060bccef2e5 100644 ---- a/test/test_linalg.py -+++ b/test/test_linalg.py -@@ -2351,7 +2351,7 @@ class TestLinalg(TestCase): - if self.device_type != 'cpu' and randrange(100) < 95: - return # too many cpu <==> device copies - -- a = np.array(x.cpu(), copy=False) -+ a = np.asarray(x.cpu()) - expected = np.linalg.norm(a, "nuc", axis=axes) - - ans = torch.norm(x, "nuc", dim=axes) -@@ -3082,7 +3082,14 @@ class TestLinalg(TestCase): - self.assertEqual(b.expand_as(Ax), Ax) - - # Check against NumPy -- expected = np.linalg.solve(A.cpu().numpy(), b.expand_as(x).cpu().numpy()) -+ if rhs == (): -+ # In NumPy 2, "b" can no longer be a vector (i.e. rhs == ()) if has batch dimensions. -+ # So, reshape it to a matrix and back. Related documentation: -+ # https://numpy.org/doc/1.26/reference/generated/numpy.linalg.solve.html -+ # https://numpy.org/doc/2.0/reference/generated/numpy.linalg.solve.html -+ expected = np.linalg.solve(A.cpu().numpy(), b.cpu().numpy().reshape(*b.shape, 1)).reshape(b.shape) -+ else: -+ expected = np.linalg.solve(A.cpu().numpy(), b.cpu().numpy()) - self.assertEqual(x, expected) - - batches = [(), (0, ), (3, ), (2, 3)] -@@ -5234,7 +5241,9 @@ class TestLinalg(TestCase): - tau_shape = [*A_cpu.shape[:-2], A_cpu.shape[-1]] - tau = torch.empty(tau_shape, dtype=dtype).view(-1, A_cpu.shape[-1]) - for A_i, reflectors_i, tau_i in zip(A_cpu.contiguous().view(*flattened_batch_shape), reflectors, tau): -- reflectors_tmp, tau_i[:] = map(torch.from_numpy, np.linalg.qr(A_i, mode='raw')) -+ reflectors_tmp, tau_i[:] = ( -+ torch.from_numpy(x) if isinstance(x, np.ndarray) else x for x in np.linalg.qr(A_i, mode='raw') -+ ) - reflectors_i[:] = reflectors_tmp.T - reflectors = reflectors.view(*A_cpu.shape) - tau = tau.view(tau_shape) diff --git a/recipe/patches/0011-Fixes-NumPy-2-test-failures-in-test_torch.py-137740.patch b/recipe/patches/0011-Fixes-NumPy-2-test-failures-in-test_torch.py-137740.patch deleted file mode 100644 index fe156f3d4..000000000 --- a/recipe/patches/0011-Fixes-NumPy-2-test-failures-in-test_torch.py-137740.patch +++ /dev/null @@ -1,60 +0,0 @@ -From a1ced1daa37fdcf5345aa0c3095cab8674c1cded Mon Sep 17 00:00:00 2001 -From: Haifeng Jin -Date: Sat, 12 Oct 2024 02:40:17 +0000 -Subject: [PATCH 11/15] Fixes NumPy 2 test failures in test_torch.py (#137740) - -Related to #107302 - -The breakages are caused by backward incompatibility between NumPy 1 and NumPy 2. -This PR fixes all the corresponding test failures in `test_torch.py`. - -1. The dtype of the return value `np.percentile` when passed a `torch.float32` tensor. -NumPy 1: Return value of `np.float64`. -NumPy 2: Return value of `np.float32`. -Solution: Enforce it with `.astype(np.float64)`. - -2. The type of `np.gradient()` when returning multiple arrays. -NumPy1: A list of arrays. -NumPy2: A tuple of arrays. -Solution: Cast the tuple to a list. -Pull Request resolved: https://github.com/pytorch/pytorch/pull/137740 -Approved by: https://github.com/ezyang ---- - test/test_torch.py | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/test/test_torch.py b/test/test_torch.py -index be4d6180819..c6fd6ac9f19 100644 ---- a/test/test_torch.py -+++ b/test/test_torch.py -@@ -2891,7 +2891,7 @@ else: - - # if the given input arg is not a list, it returns a list of single element: [arg] - def _wrap_to_list(self, input_array): -- return input_array if isinstance(input_array, list) else [input_array] -+ return list(input_array) if isinstance(input_array, (list, tuple)) else [input_array] - - # To ensure inf, -inf, and nan values do not cause divergence between Numpy and PyTorch. - # There are two types of possible divergence: -@@ -3029,7 +3029,7 @@ else: - # Result is given just as real number and all the imaginary parts to be equal to zero. - self.assertEqual(expected[i].imag, torch.zeros(actual[i].shape), exact_dtype=False) - else: -- actual, expected = self._inf_nan_preprocess(list(actual), expected) -+ actual, expected = self._inf_nan_preprocess(list(actual), list(expected)) - self.assertEqual(actual, expected, equal_nan=True, exact_dtype=False) - - @onlyNativeDeviceTypes -@@ -7549,10 +7549,10 @@ class TestTorch(TestCase): - torch.mean(sample, dim=0), torch.full((d,), 0.5), atol=2, rtol=2 - ) - torch.testing.assert_close( -- np.percentile(sample, 25, axis=0), np.repeat(0.25, d), atol=2, rtol=2 -+ np.percentile(sample, 25, axis=0).astype(np.float64), np.repeat(0.25, d), atol=2, rtol=2 - ) - torch.testing.assert_close( -- np.percentile(sample, 75, axis=0), np.repeat(0.75, d), atol=2, rtol=2 -+ np.percentile(sample, 75, axis=0).astype(np.float64), np.repeat(0.75, d), atol=2, rtol=2 - ) - - @skipIfTorchDynamo("np.float64 restored as float32 after graph break.") diff --git a/recipe/patches/0015-CD-Enable-Python-3.13-on-windows-138095.patch b/recipe/patches/0015-CD-Enable-Python-3.13-on-windows-138095.patch deleted file mode 100644 index b62f6456c..000000000 --- a/recipe/patches/0015-CD-Enable-Python-3.13-on-windows-138095.patch +++ /dev/null @@ -1,52 +0,0 @@ -From 66fa234eccfe98339bfa2bba6eae425ce27a037d Mon Sep 17 00:00:00 2001 -From: atalman -Date: Tue, 12 Nov 2024 12:28:10 +0000 -Subject: [PATCH 15/15] CD Enable Python 3.13 on windows (#138095) - -Adding CD windows. Part of: https://github.com/pytorch/pytorch/issues/130249 -Builder PR landed with smoke test: https://github.com/pytorch/builder/pull/2035 - -Pull Request resolved: https://github.com/pytorch/pytorch/pull/138095 -Approved by: https://github.com/Skylion007, https://github.com/malfet - -Cherry-pick-note: minus changes in `.github/*` ---- - functorch/csrc/dim/dim.cpp | 1 + - functorch/csrc/dim/dim_opcode.c | 13 ++++++++++++- - 2 files changed, 13 insertions(+), 1 deletion(-) - -diff --git a/functorch/csrc/dim/dim.cpp b/functorch/csrc/dim/dim.cpp -index 722618efbb0..f98818bfdcc 100644 ---- a/functorch/csrc/dim/dim.cpp -+++ b/functorch/csrc/dim/dim.cpp -@@ -38,6 +38,7 @@ PyObject* Dim_init() { - #include "python_variable_simple.h" - - #if IS_PYTHON_3_11_PLUS -+ - #define Py_BUILD_CORE - #include "internal/pycore_opcode.h" - #undef Py_BUILD_CORE -diff --git a/functorch/csrc/dim/dim_opcode.c b/functorch/csrc/dim/dim_opcode.c -index 81ba62a3781..1b5d0677344 100644 ---- a/functorch/csrc/dim/dim_opcode.c -+++ b/functorch/csrc/dim/dim_opcode.c -@@ -1,6 +1,17 @@ - #include - #if defined(_WIN32) && IS_PYTHON_3_11_PLUS - #define Py_BUILD_CORE --#define NEED_OPCODE_TABLES -+#define NEED_OPCODE_TABLES // To get _PyOpcode_Deopt, _PyOpcode_Caches -+ -+#if IS_PYTHON_3_13_PLUS -+#include // To get PyUnstable_Code_GetFirstFree -+#define NEED_OPCODE_METADATA -+#include "internal/pycore_opcode_metadata.h" -+#undef NEED_OPCODE_METADATA -+#else - #include "internal/pycore_opcode.h" - #endif -+ -+#undef NEED_OPCODE_TABLES -+#undef Py_BUILD_CORE -+#endif From 1e211b200c6ef1f2d0491132da64412d55379139 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Sun, 19 Jan 2025 09:23:20 +0100 Subject: [PATCH 04/58] Remove bound on setuptools version See https://github.com/pytorch/pytorch/commit/2398e758d2493c8142d4b7f9c72207f05991e9e9 Partially reverts commit 46d7b46a8467e9db4355c0dd22593551bd684d1d. --- recipe/meta.yaml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index cd90e57f4..4d7073442 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -133,9 +133,7 @@ requirements: - numpy * # [megabuild] - numpy # [not megabuild] - pip - # see https://github.com/pytorch/pytorch/issues/136541 - - setuptools <=72.1.0 # [win] - - setuptools # [not win] + - setuptools - pyyaml - requests - six @@ -271,9 +269,7 @@ outputs: - python - numpy - pip - # see https://github.com/pytorch/pytorch/issues/136541 - - setuptools <=72.1.0 # [win] - - setuptools # [not win] + - setuptools - pyyaml - requests - six From 63c7c007ef8b11a60a96bdf5f013eecb1d96924e Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Sun, 19 Jan 2025 09:35:49 +0100 Subject: [PATCH 05/58] better sort pytorch run requirements --- recipe/meta.yaml | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 4d7073442..f6785d763 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -288,27 +288,27 @@ outputs: - typing_extensions - {{ pin_subpackage('libtorch', exact=True) }} run: + - libtorch {{ version }} - llvm-openmp # [osx] - intel-openmp {{ mkl }} # [win] - libblas * *{{ blas_impl }} # [blas_impl == "mkl"] + - nomkl # [blas_impl != "mkl"] # GPU requirements without run_exports - - {{ pin_compatible('cudnn') }} # [cuda_compiler_version != "None"] - # other requirements + - {{ pin_compatible('cudnn') }} # [cuda_compiler_version != "None"] + - triton {{ triton }} # [cuda_compiler_version != "None" and not win] + # avoid that people without GPUs needlessly download ~0.5-1GB + - __cuda # [cuda_compiler_version != "None"] - python - - typing_extensions - # sympy 1.13.2 was reported to result in test failures on Windows and mac - # https://github.com/pytorch/pytorch/pull/133235 - - sympy >=1.13.1,!=1.13.2 + # other requirements, see https://github.com/pytorch/pytorch/blame/main/requirements.txt - filelock + - fsspec - jinja2 - networkx - - nomkl # [blas_impl != "mkl"] - - fsspec - # avoid that people without GPUs needlessly download ~0.5-1GB - - __cuda # [cuda_compiler_version != "None"] - - libtorch {{ version }} - setuptools - - triton {{ triton }} # [cuda_compiler_version != "None" and not win] + # sympy 1.13.2 was reported to result in test failures on Windows and mac + # https://github.com/pytorch/pytorch/pull/133235 + - sympy >=1.13.1,!=1.13.2 + - typing_extensions run_constrained: # These constraints ensure conflict between pytorch and # pytorch-cpu 1.1 which we built before conda-forge had GPU infrastructure From bc3581dfdf2c63969c8701ef486c9bc111fb6d81 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Sun, 19 Jan 2025 09:37:38 +0100 Subject: [PATCH 06/58] update dependencies for 2.6.0 --- recipe/meta.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index f6785d763..047a92929 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -304,11 +304,12 @@ outputs: - fsspec - jinja2 - networkx + - optree >=0.13.0 - setuptools # sympy 1.13.2 was reported to result in test failures on Windows and mac # https://github.com/pytorch/pytorch/pull/133235 - sympy >=1.13.1,!=1.13.2 - - typing_extensions + - typing_extensions >=4.10.0 run_constrained: # These constraints ensure conflict between pytorch and # pytorch-cpu 1.1 which we built before conda-forge had GPU infrastructure From 113d413f5118d8c25c7a52e30716251d38dbd711 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Sun, 19 Jan 2025 09:38:02 +0100 Subject: [PATCH 07/58] pin libtorch in pytorch --- recipe/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 047a92929..bdba3679a 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -288,7 +288,7 @@ outputs: - typing_extensions - {{ pin_subpackage('libtorch', exact=True) }} run: - - libtorch {{ version }} + - {{ pin_subpackage('libtorch', exact=True) }} - llvm-openmp # [osx] - intel-openmp {{ mkl }} # [win] - libblas * *{{ blas_impl }} # [blas_impl == "mkl"] From c1b9cf6f0de8a4184f40732647432ee79b6c0602 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Mon, 20 Jan 2025 14:01:18 +1100 Subject: [PATCH 08/58] backport fix for aarch build failure --- recipe/meta.yaml | 3 ++ ...aarch64-gcc13-workflow-and-resolve-a.patch | 50 +++++++++++++++++++ 2 files changed, 53 insertions(+) create mode 100644 recipe/patches_submodules/0001-Fix-bazel-linux-aarch64-gcc13-workflow-and-resolve-a.patch diff --git a/recipe/meta.yaml b/recipe/meta.yaml index bdba3679a..3f361118b 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -50,6 +50,9 @@ source: # https://github.com/pytorch/pytorch/pull/143894 - patches/0008-fix-issue-142484.patch - patches/0009-Fix-FindOpenBLAS.patch + # backport https://github.com/google/XNNPACK/commit/5f23827e66cca435fa400b6e221892ac95af0079 + # for https://github.com/pytorch/pytorch/issues/141083 + - patches_submodules/0001-Fix-bazel-linux-aarch64-gcc13-workflow-and-resolve-a.patch build: number: {{ build }} diff --git a/recipe/patches_submodules/0001-Fix-bazel-linux-aarch64-gcc13-workflow-and-resolve-a.patch b/recipe/patches_submodules/0001-Fix-bazel-linux-aarch64-gcc13-workflow-and-resolve-a.patch new file mode 100644 index 000000000..b4125af10 --- /dev/null +++ b/recipe/patches_submodules/0001-Fix-bazel-linux-aarch64-gcc13-workflow-and-resolve-a.patch @@ -0,0 +1,50 @@ +From 24642e92b0ee9cbbda43096dd804ce6d63f3826c Mon Sep 17 00:00:00 2001 +From: XNNPACK Team +Date: Mon, 25 Nov 2024 13:00:56 -0800 +Subject: [PATCH] Fix `bazel-linux-aarch64-gcc13` workflow and resolve + accompanying build errors. + +Note that task names should not be prefixes of another task since this messes up retrieving the correct cache for each task. + +PiperOrigin-RevId: 700075031 + +Cherry-pick note: removed changes in .github/workflows/build.yml due to conflicts +--- + src/reference/unary-elementwise.cc | 10 ++++++++++ + src/xnnpack/simd/s16-neon.h | 2 +- + 2 files changed, 11 insertions(+), 1 deletion(-) + +diff --git a/src/reference/unary-elementwise.cc b/src/reference/unary-elementwise.cc +index bd95ded6c..da892d8be 100644 +--- a/src/reference/unary-elementwise.cc ++++ b/src/reference/unary-elementwise.cc +@@ -127,6 +127,16 @@ struct ConvertOp { + } + }; + ++#ifdef XNN_HAVE_FLOAT16 ++template <> ++struct ConvertOp { ++ explicit ConvertOp(const xnn_unary_uparams*) {} ++ _Float16 operator()(xnn_bfloat16 x) const { ++ return static_cast<_Float16>(static_cast(x)); ++ } ++}; ++#endif ++ + template + const xnn_unary_elementwise_config* get_convert_config( + std::true_type /*input_quantized*/, std::true_type /*output_quantized*/) { +diff --git a/src/xnnpack/simd/s16-neon.h b/src/xnnpack/simd/s16-neon.h +index 4e8ebcfbd..e8392f4e9 100644 +--- a/src/xnnpack/simd/s16-neon.h ++++ b/src/xnnpack/simd/s16-neon.h +@@ -70,7 +70,7 @@ static XNN_INLINE void xnn_store_tail_s16(int16_t* output, xnn_simd_s16_t v, + v_low = vget_high_s16(v); + } + if (num_elements & 2) { +- vst1_lane_s32((void*) output, vreinterpret_s32_s16(v_low), 0); ++ vst1_lane_s32((int32_t*) output, vreinterpret_s32_s16(v_low), 0); + output += 2; + v_low = vext_s16(v_low, v_low, 2); + } From bf65aa4661f67997a8ff742eadaef7c2b533e651 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Mon, 20 Jan 2025 14:02:06 +1100 Subject: [PATCH 09/58] reduce OMP_NUM_THREADS further due to OOMs in test --- recipe/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 3f361118b..61e7b1eff 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -426,7 +426,7 @@ outputs: # the whole test suite takes forever, but we should get a good enough coverage # for potential packaging problems by running a fixed subset - - export OMP_NUM_THREADS=4 # [unix] + - export OMP_NUM_THREADS=2 # [unix] # reduced paralellism to avoid OOM; test only one python version on aarch because emulation is super-slow - python -m pytest -n 2 {{ tests }} -k "not ({{ skips }})" --durations=50 # [unix and (not aarch64 or py==312)] - python -m pytest -v -s {{ tests }} -k "not ({{ skips }})" --durations=50 # [win] From 6757593df13f3c56c0aee301195e35392ec11cb4 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Mon, 20 Jan 2025 08:07:55 +0100 Subject: [PATCH 10/58] use full paths to patched files in submodule --- ...ux-aarch64-gcc13-workflow-and-resolve-a.patch | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/recipe/patches_submodules/0001-Fix-bazel-linux-aarch64-gcc13-workflow-and-resolve-a.patch b/recipe/patches_submodules/0001-Fix-bazel-linux-aarch64-gcc13-workflow-and-resolve-a.patch index b4125af10..9a5d3129c 100644 --- a/recipe/patches_submodules/0001-Fix-bazel-linux-aarch64-gcc13-workflow-and-resolve-a.patch +++ b/recipe/patches_submodules/0001-Fix-bazel-linux-aarch64-gcc13-workflow-and-resolve-a.patch @@ -10,14 +10,14 @@ PiperOrigin-RevId: 700075031 Cherry-pick note: removed changes in .github/workflows/build.yml due to conflicts --- - src/reference/unary-elementwise.cc | 10 ++++++++++ - src/xnnpack/simd/s16-neon.h | 2 +- + third_party/XNNPACK/src/reference/unary-elementwise.cc | 10 ++++++++++ + third_party/XNNPACK/src/xnnpack/simd/s16-neon.h | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) -diff --git a/src/reference/unary-elementwise.cc b/src/reference/unary-elementwise.cc +diff --git a/third_party/XNNPACK/src/reference/unary-elementwise.cc b/third_party/XNNPACK/src/reference/unary-elementwise.cc index bd95ded6c..da892d8be 100644 ---- a/src/reference/unary-elementwise.cc -+++ b/src/reference/unary-elementwise.cc +--- a/third_party/XNNPACK/src/reference/unary-elementwise.cc ++++ b/third_party/XNNPACK/src/reference/unary-elementwise.cc @@ -127,6 +127,16 @@ struct ConvertOp { } }; @@ -35,10 +35,10 @@ index bd95ded6c..da892d8be 100644 template const xnn_unary_elementwise_config* get_convert_config( std::true_type /*input_quantized*/, std::true_type /*output_quantized*/) { -diff --git a/src/xnnpack/simd/s16-neon.h b/src/xnnpack/simd/s16-neon.h +diff --git a/third_party/XNNPACK/src/xnnpack/simd/s16-neon.h b/third_party/XNNPACK/src/xnnpack/simd/s16-neon.h index 4e8ebcfbd..e8392f4e9 100644 ---- a/src/xnnpack/simd/s16-neon.h -+++ b/src/xnnpack/simd/s16-neon.h +--- a/third_party/XNNPACK/src/xnnpack/simd/s16-neon.h ++++ b/third_party/XNNPACK/src/xnnpack/simd/s16-neon.h @@ -70,7 +70,7 @@ static XNN_INLINE void xnn_store_tail_s16(int16_t* output, xnn_simd_s16_t v, v_low = vget_high_s16(v); } From fa31c3f63580a0c6990f37f91c7e266ab4aaf6c6 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Mon, 20 Jan 2025 08:14:40 +0100 Subject: [PATCH 11/58] don't pin libtorch exactly for non-megabuilds --- recipe/meta.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 61e7b1eff..2ad03b88d 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -291,7 +291,9 @@ outputs: - typing_extensions - {{ pin_subpackage('libtorch', exact=True) }} run: - - {{ pin_subpackage('libtorch', exact=True) }} + - {{ pin_subpackage('libtorch', exact=True) }} # [megabuild] + # for non-megabuild, allow libtorch from any python version + - libtorch {{ version }}.* *_{{ build }} # [not megabuild] - llvm-openmp # [osx] - intel-openmp {{ mkl }} # [win] - libblas * *{{ blas_impl }} # [blas_impl == "mkl"] From 49d134a9e8e9bc5e23148bad1d2cb374219f0bcc Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Mon, 20 Jan 2025 08:16:28 +0100 Subject: [PATCH 12/58] make build number arithmetic explicit don't rely on PKG_BUILDNUM resolving this correctly, which is either racy, or implicitly depends on a separate render pass after setting build.number --- recipe/meta.yaml | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 2ad03b88d..06e20f9c1 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -66,8 +66,8 @@ build: {% else %} skip: true # [is_rc] {% endif %} - string: cuda{{ cuda_compiler_version | replace('.', '') }}_{{ blas_impl }}_h{{ PKG_HASH }}_{{ PKG_BUILDNUM }} # [cuda_compiler_version != "None"] - string: cpu_{{ blas_impl }}_h{{ PKG_HASH }}_{{ PKG_BUILDNUM }} # [cuda_compiler_version == "None"] + string: cuda{{ cuda_compiler_version | replace('.', '') }}_{{ blas_impl }}_h{{ PKG_HASH }}_{{ build }} # [cuda_compiler_version != "None"] + string: cpu_{{ blas_impl }}_h{{ PKG_HASH }}_{{ build }} # [cuda_compiler_version == "None"] detect_binary_files_with_prefix: false run_exports: - {{ pin_subpackage('libtorch', max_pin='x.x') }} @@ -168,8 +168,8 @@ requirements: - pytorch-gpu ==99999999 # [cuda_compiler_version == "None"] - pytorch-gpu =={{ version }} # [cuda_compiler_version != "None"] - pytorch-cpu ==99999999 # [cuda_compiler_version != "None"] - - pytorch {{ version }} cuda{{ cuda_compiler_version | replace('.', '') }}_{{ blas_impl }}_*_{{ PKG_BUILDNUM }} # [cuda_compiler_version != "None"] - - pytorch {{ version }} cpu_{{ blas_impl }}_*_{{ PKG_BUILDNUM }} # [cuda_compiler_version == "None"] + - pytorch {{ version }} cuda{{ cuda_compiler_version | replace('.', '') }}_{{ blas_impl }}_*_{{ build }} # [cuda_compiler_version != "None"] + - pytorch {{ version }} cpu_{{ blas_impl }}_*_{{ build }} # [cuda_compiler_version == "None"] # if using OpenBLAS, ensure that a version compatible with OpenMP is used # otherwise, we get the following warnings: # OpenBLAS Warning : Detect OpenMP Loop and this application may hang. Please rebuild the library with USE_OPENMP=1 option. @@ -216,8 +216,8 @@ outputs: script: build.sh # [unix] script: bld.bat # [win] build: - string: cuda{{ cuda_compiler_version | replace('.', '') }}_{{ blas_impl }}_py{{ CONDA_PY }}_h{{ PKG_HASH }}_{{ PKG_BUILDNUM }} # [cuda_compiler_version != "None"] - string: cpu_{{ blas_impl }}_py{{ CONDA_PY }}_h{{ PKG_HASH }}_{{ PKG_BUILDNUM }} # [cuda_compiler_version == "None"] + string: cuda{{ cuda_compiler_version | replace('.', '') }}_{{ blas_impl }}_py{{ CONDA_PY }}_h{{ PKG_HASH }}_{{ build }} # [cuda_compiler_version != "None"] + string: cpu_{{ blas_impl }}_py{{ CONDA_PY }}_h{{ PKG_HASH }}_{{ build }} # [cuda_compiler_version == "None"] detect_binary_files_with_prefix: false run_exports: - {{ pin_subpackage('pytorch', max_pin='x.x') }} @@ -440,19 +440,19 @@ outputs: {% set pytorch_cpu_gpu = "pytorch-gpu" %} # [cuda_compiler_version != "None"] - name: {{ pytorch_cpu_gpu }} build: - string: cuda{{ cuda_compiler_version | replace('.', '') }}_{{ blas_impl }}_h{{ PKG_HASH }}_{{ PKG_BUILDNUM }} # [megabuild and cuda_compiler_version != "None"] - string: cpu_{{ blas_impl }}_h{{ PKG_HASH }}_{{ PKG_BUILDNUM }} # [megabuild and cuda_compiler_version == "None"] - string: cuda{{ cuda_compiler_version | replace('.', '') }}_{{ blas_impl }}py{{ CONDA_PY }}_h{{ PKG_HASH }}_{{ PKG_BUILDNUM }} # [not megabuild and cuda_compiler_version != "None"] - string: cpu_{{ blas_impl }}_py{{ CONDA_PY }}_h{{ PKG_HASH }}_{{ PKG_BUILDNUM }} # [not megabuild and cuda_compiler_version == "None"] + string: cuda{{ cuda_compiler_version | replace('.', '') }}_{{ blas_impl }}_h{{ PKG_HASH }}_{{ build }} # [megabuild and cuda_compiler_version != "None"] + string: cpu_{{ blas_impl }}_h{{ PKG_HASH }}_{{ build }} # [megabuild and cuda_compiler_version == "None"] + string: cuda{{ cuda_compiler_version | replace('.', '') }}_{{ blas_impl }}py{{ CONDA_PY }}_h{{ PKG_HASH }}_{{ build }} # [not megabuild and cuda_compiler_version != "None"] + string: cpu_{{ blas_impl }}_py{{ CONDA_PY }}_h{{ PKG_HASH }}_{{ build }} # [not megabuild and cuda_compiler_version == "None"] detect_binary_files_with_prefix: false # weigh down cpu implementation and give cuda preference track_features: - pytorch-cpu # [cuda_compiler_version == "None"] requirements: run: - - pytorch {{ version }}=cuda*_{{ blas_impl }}*{{ PKG_BUILDNUM }} # [megabuild and cuda_compiler_version != "None"] - - pytorch {{ version }}=cpu_{{ blas_impl }}*{{ PKG_BUILDNUM }} # [megabuild and cuda_compiler_version == "None"] - - {{ pin_subpackage("pytorch", exact=True) }} # [not megabuild] + - pytorch {{ version }}=cuda*_{{ blas_impl }}*{{ build }} # [megabuild and cuda_compiler_version != "None"] + - pytorch {{ version }}=cpu_{{ blas_impl }}*{{ build }} # [megabuild and cuda_compiler_version == "None"] + - {{ pin_subpackage("pytorch", exact=True) }} # [not megabuild] test: imports: - torch From 7cd981db647510da1daa657af58e26504851b58a Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Mon, 20 Jan 2025 08:30:07 +0100 Subject: [PATCH 13/58] disable patch in submodule --- recipe/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 06e20f9c1..62fb17ee4 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -52,7 +52,7 @@ source: - patches/0009-Fix-FindOpenBLAS.patch # backport https://github.com/google/XNNPACK/commit/5f23827e66cca435fa400b6e221892ac95af0079 # for https://github.com/pytorch/pytorch/issues/141083 - - patches_submodules/0001-Fix-bazel-linux-aarch64-gcc13-workflow-and-resolve-a.patch + # - patches_submodules/0001-Fix-bazel-linux-aarch64-gcc13-workflow-and-resolve-a.patch build: number: {{ build }} From 1405263bf7c8bdeef988a66c6c577bb7ed669ebd Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Mon, 20 Jan 2025 09:12:07 +0100 Subject: [PATCH 14/58] don't pin build number of libtorch in pytorch (for non-megabuild) otherwise conda breaks ``` conda_build.exceptions.RecipeError: Mismatching hashes in recipe. Exact pins in dependencies that contribute to the hash often cause this. Can you change one or more exact pins to version bound constraints? Involved packages were: Mismatching package: libtorch (id cpu_generic_habf3c96_0); dep: libtorch 2.6.0.rc7 *0; consumer package: pytorch ``` --- recipe/meta.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 62fb17ee4..5301d0f59 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -292,8 +292,9 @@ outputs: - {{ pin_subpackage('libtorch', exact=True) }} run: - {{ pin_subpackage('libtorch', exact=True) }} # [megabuild] - # for non-megabuild, allow libtorch from any python version - - libtorch {{ version }}.* *_{{ build }} # [not megabuild] + # for non-megabuild, allow libtorch from any python version; + # pinning build number would be nice but breaks conda + - libtorch {{ version }}.* # [not megabuild] - llvm-openmp # [osx] - intel-openmp {{ mkl }} # [win] - libblas * *{{ blas_impl }} # [blas_impl == "mkl"] From be1ff5b057bb9ef95a0a8c35a4904d0579badffe Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Fri, 31 Jan 2025 14:02:00 +1100 Subject: [PATCH 15/58] pytorch v2.6.0 --- recipe/meta.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 127a49daf..92e31c859 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -1,5 +1,5 @@ # if you wish to build release candidate number X, append the version string with ".rcX" -{% set version = "2.6.0.rc7" %} +{% set version = "2.6.0" %} {% set build = 0 %} # Use a higher build number for the CUDA variant, to ensure that it's @@ -16,7 +16,7 @@ # see .ci/docker/ci_commit_pins/triton.txt # pytorch and triton are released in tandem, see notes in their release process # https://github.com/pytorch/pytorch/blob/main/RELEASE.md#triton-dependency-for-the-release -{% set triton = "3.1.0" %} +{% set triton = "3.2.0" %} # TODO Temporary pin, remove me {% set mkl = "<2025" %} @@ -32,7 +32,7 @@ source: {% else %} # The "pytorch-v" tarballs contain submodules; the "pytorch-" ones don't. url: https://github.com/pytorch/pytorch/releases/download/v{{ version }}/pytorch-v{{ version }}.tar.gz - sha256: 740eb5fff95e33cfe699bad43be83523f569c7cc7f9c285c2a255416443dd266 + sha256: 3005690eb7b083c443a38c7657938af63902f524ad87a6c83f1aca38c77e3b57 {% endif %} patches: - patches/0001-Force-usage-of-python-3-and-error-without-numpy.patch @@ -386,7 +386,7 @@ outputs: # tools/ is needed to optimise test run # as of pytorch=2.0.0, there is a bug when trying to run tests without the tools - tools - #- .ci/pytorch/smoke_test/smoke_test.py + - .ci/pytorch/smoke_test/smoke_test.py commands: # Run pip check so as to ensure that all pytorch packages are installed # https://github.com/conda-forge/pytorch-cpu-feedstock/issues/24 @@ -434,7 +434,7 @@ outputs: - export TARGET_OS="macos-arm64" # [(osx and arm64)] - export TARGET_OS="macos-x86_64" # [(osx and x86_64)] - export OMP_NUM_THREADS=4 # [not win] - #- python ./smoke_test/smoke_test.py --package torchonly + - python ./smoke_test/smoke_test.py --package torchonly # a reasonably safe subset of tests that should run under 15 minutes {% set tests = " ".join([ From 95eb61406b6f2e79b1949aa5482315bb22189775 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Fri, 31 Jan 2025 14:03:10 +1100 Subject: [PATCH 16/58] MNT: Re-rendered with conda-build 25.1.1, conda-smithy 3.45.4, and conda-forge-pinning 2025.01.28.22.50.30 --- .azure-pipelines/azure-pipelines-osx.yml | 60 +++++----- ...Nonecxx_compiler_version13is_rcFalse.yaml} | 8 +- ...12.6cxx_compiler_version13is_rcFalse.yaml} | 8 +- ...Nonecxx_compiler_version13is_rcFalse.yaml} | 8 +- ...12.6cxx_compiler_version13is_rcFalse.yaml} | 8 +- ...Nonecxx_compiler_version13is_rcFalse.yaml} | 8 +- ...12.6cxx_compiler_version13is_rcFalse.yaml} | 8 +- ...cFalsenumpy2.0python3.10.____cpython.yaml} | 8 +- ...cFalsenumpy2.0python3.11.____cpython.yaml} | 8 +- ...cFalsenumpy2.0python3.12.____cpython.yaml} | 8 +- ...rcFalsenumpy2.0python3.9.____cpython.yaml} | 8 +- ...is_rcFalsenumpy2python3.13.____cp313.yaml} | 8 +- ...cFalsenumpy2.0python3.10.____cpython.yaml} | 8 +- ...cFalsenumpy2.0python3.11.____cpython.yaml} | 8 +- ...cFalsenumpy2.0python3.12.____cpython.yaml} | 8 +- ...rcFalsenumpy2.0python3.9.____cpython.yaml} | 8 +- ...is_rcFalsenumpy2python3.13.____cp313.yaml} | 8 +- ...cFalsenumpy2.0python3.10.____cpython.yaml} | 8 +- ...cFalsenumpy2.0python3.11.____cpython.yaml} | 8 +- ...cFalsenumpy2.0python3.12.____cpython.yaml} | 8 +- ...rcFalsenumpy2.0python3.9.____cpython.yaml} | 8 +- ...is_rcFalsenumpy2python3.13.____cp313.yaml} | 8 +- ...ecuda_compiler_versionNoneis_rcFalse.yaml} | 8 +- ...ccuda_compiler_version12.6is_rcFalse.yaml} | 8 +- .github/workflows/conda-build.yml | 32 +++--- README.md | 108 +++++++++--------- 26 files changed, 238 insertions(+), 146 deletions(-) rename .ci_support/{linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml => linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml} (94%) rename .ci_support/{linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml => linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml} (95%) rename .ci_support/{linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml => linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml} (94%) rename .ci_support/{linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml => linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml} (95%) rename .ci_support/{linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml => linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml} (95%) rename .ci_support/{linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml => linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml} (95%) rename .ci_support/{osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml => osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml} (94%) rename .ci_support/{osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml => osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml} (94%) rename .ci_support/{osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml => osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml} (94%) rename .ci_support/{osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml => osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml} (94%) rename .ci_support/{osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml => osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml} (94%) rename .ci_support/{osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml => osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml} (94%) rename .ci_support/{osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml => osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml} (94%) rename .ci_support/{osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml => osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml} (94%) rename .ci_support/{osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml => osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml} (94%) rename .ci_support/{osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml => osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml} (94%) rename .ci_support/{osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml => osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml} (94%) rename .ci_support/{osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml => osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml} (94%) rename .ci_support/{osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml => osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml} (94%) rename .ci_support/{osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml => osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml} (94%) rename .ci_support/{osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml => osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml} (94%) rename .ci_support/{win_64_channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNoneis_rcTrue.yaml => win_64_channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNoneis_rcFalse.yaml} (92%) rename .ci_support/{win_64_channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6is_rcTrue.yaml => win_64_channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6is_rcFalse.yaml} (92%) diff --git a/.azure-pipelines/azure-pipelines-osx.yml b/.azure-pipelines/azure-pipelines-osx.yml index 9b43e1a8a..bcfc9f37a 100755 --- a/.azure-pipelines/azure-pipelines-osx.yml +++ b/.azure-pipelines/azure-pipelines-osx.yml @@ -8,50 +8,50 @@ jobs: vmImage: macOS-13 strategy: matrix: - osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython: - CONFIG: osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython + osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython: + CONFIG: osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython UPLOAD_PACKAGES: 'True' - osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython: - CONFIG: osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython + osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython: + CONFIG: osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython UPLOAD_PACKAGES: 'True' - osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython: - CONFIG: osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython + osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython: + CONFIG: osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython UPLOAD_PACKAGES: 'True' - osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython: - CONFIG: osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython + osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython: + CONFIG: osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython UPLOAD_PACKAGES: 'True' - osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313: - CONFIG: osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313 + osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313: + CONFIG: osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313 UPLOAD_PACKAGES: 'True' - osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython: - CONFIG: osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython + osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython: + CONFIG: osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython UPLOAD_PACKAGES: 'True' - osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython: - CONFIG: osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython + osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython: + CONFIG: osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython UPLOAD_PACKAGES: 'True' - osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython: - CONFIG: osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython + osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython: + CONFIG: osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython UPLOAD_PACKAGES: 'True' - osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython: - CONFIG: osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython + osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython: + CONFIG: osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython UPLOAD_PACKAGES: 'True' - osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313: - CONFIG: osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313 + osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313: + CONFIG: osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313 UPLOAD_PACKAGES: 'True' - osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython: - CONFIG: osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython + osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython: + CONFIG: osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython UPLOAD_PACKAGES: 'True' - osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython: - CONFIG: osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython + osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython: + CONFIG: osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython UPLOAD_PACKAGES: 'True' - osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython: - CONFIG: osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython + osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython: + CONFIG: osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython UPLOAD_PACKAGES: 'True' - osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython: - CONFIG: osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython + osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython: + CONFIG: osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython UPLOAD_PACKAGES: 'True' - osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313: - CONFIG: osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313 + osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313: + CONFIG: osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313 UPLOAD_PACKAGES: 'True' timeoutInMinutes: 360 variables: {} diff --git a/.ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml b/.ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml similarity index 94% rename from .ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml rename to .ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml index a78a0027d..ddb79d64d 100644 --- a/.ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml +++ b/.ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml @@ -13,7 +13,7 @@ cdt_name: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - None cuda_compiler_version: @@ -29,7 +29,7 @@ docker_image: github_actions_labels: - cirun-openstack-gpu-2xlarge is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -54,6 +54,8 @@ numpy: - '2.0' - '2' - '2.0' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -78,3 +80,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml b/.ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml similarity index 95% rename from .ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml rename to .ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml index 01847ccdf..67218f693 100644 --- a/.ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml +++ b/.ci_support/linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml @@ -13,7 +13,7 @@ cdt_name: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - cuda-nvcc cuda_compiler_version: @@ -29,7 +29,7 @@ docker_image: github_actions_labels: - cirun-openstack-gpu-2xlarge is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -54,6 +54,8 @@ numpy: - '2.0' - '2' - '2.0' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -78,3 +80,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml b/.ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml similarity index 94% rename from .ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml rename to .ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml index 8b693c032..9814faf5d 100644 --- a/.ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml +++ b/.ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml @@ -13,7 +13,7 @@ cdt_name: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - None cuda_compiler_version: @@ -29,7 +29,7 @@ docker_image: github_actions_labels: - cirun-openstack-gpu-2xlarge is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -54,6 +54,8 @@ numpy: - '2.0' - '2' - '2.0' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -78,3 +80,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml b/.ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml similarity index 95% rename from .ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml rename to .ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml index ea6b01996..c4f5e7137 100644 --- a/.ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml +++ b/.ci_support/linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml @@ -13,7 +13,7 @@ cdt_name: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - cuda-nvcc cuda_compiler_version: @@ -29,7 +29,7 @@ docker_image: github_actions_labels: - cirun-openstack-gpu-2xlarge is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -54,6 +54,8 @@ numpy: - '2.0' - '2' - '2.0' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -78,3 +80,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml b/.ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml similarity index 95% rename from .ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml rename to .ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml index 8b5baaa1b..227d605dd 100644 --- a/.ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue.yaml +++ b/.ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse.yaml @@ -13,7 +13,7 @@ cdt_name: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - None cuda_compiler_version: @@ -29,7 +29,7 @@ docker_image: github_actions_labels: - cirun-openstack-gpu-2xlarge is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -54,6 +54,8 @@ numpy: - '2.0' - '2' - '2.0' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -78,3 +80,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml b/.ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml similarity index 95% rename from .ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml rename to .ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml index 0e5851648..fde13792a 100644 --- a/.ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue.yaml +++ b/.ci_support/linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse.yaml @@ -13,7 +13,7 @@ cdt_name: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - cuda-nvcc cuda_compiler_version: @@ -29,7 +29,7 @@ docker_image: github_actions_labels: - cirun-openstack-gpu-2xlarge is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -54,6 +54,8 @@ numpy: - '2.0' - '2' - '2.0' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -78,3 +80,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml b/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml similarity index 94% rename from .ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml rename to .ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml index 311bdb756..501ed4d21 100644 --- a/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml +++ b/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - None cuda_compiler_version: @@ -25,7 +25,7 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -48,6 +48,8 @@ mkl: - '2023' numpy: - '2.0' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -65,3 +67,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml b/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml similarity index 94% rename from .ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml rename to .ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml index 6068d3561..aa9f1282a 100644 --- a/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml +++ b/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - None cuda_compiler_version: @@ -25,7 +25,7 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -48,6 +48,8 @@ mkl: - '2023' numpy: - '2.0' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -65,3 +67,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml b/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml similarity index 94% rename from .ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml rename to .ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml index d5c2bde05..f76824aa4 100644 --- a/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml +++ b/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - None cuda_compiler_version: @@ -25,7 +25,7 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -48,6 +48,8 @@ mkl: - '2023' numpy: - '2.0' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -65,3 +67,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml b/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml similarity index 94% rename from .ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml rename to .ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml index 40c12d146..a7cbd2d65 100644 --- a/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml +++ b/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - None cuda_compiler_version: @@ -25,7 +25,7 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -48,6 +48,8 @@ mkl: - '2023' numpy: - '2.0' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -65,3 +67,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml b/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml similarity index 94% rename from .ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml rename to .ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml index 8b1030b7e..543e475ee 100644 --- a/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml +++ b/.ci_support/osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - None cuda_compiler_version: @@ -25,7 +25,7 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -48,6 +48,8 @@ mkl: - '2023' numpy: - '2' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -65,3 +67,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml b/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml similarity index 94% rename from .ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml rename to .ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml index 3560d4376..e1d6d150d 100644 --- a/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml +++ b/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - None cuda_compiler_version: @@ -25,7 +25,7 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -48,6 +48,8 @@ mkl: - '2023' numpy: - '2.0' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -65,3 +67,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml b/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml similarity index 94% rename from .ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml rename to .ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml index 3de8c0a7b..7af20c3ba 100644 --- a/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml +++ b/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - None cuda_compiler_version: @@ -25,7 +25,7 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -48,6 +48,8 @@ mkl: - '2023' numpy: - '2.0' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -65,3 +67,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml b/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml similarity index 94% rename from .ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml rename to .ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml index b14fac625..f07344373 100644 --- a/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml +++ b/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - None cuda_compiler_version: @@ -25,7 +25,7 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -48,6 +48,8 @@ mkl: - '2023' numpy: - '2.0' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -65,3 +67,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml b/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml similarity index 94% rename from .ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml rename to .ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml index 517e85734..807c918f1 100644 --- a/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml +++ b/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - None cuda_compiler_version: @@ -25,7 +25,7 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -48,6 +48,8 @@ mkl: - '2023' numpy: - '2.0' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -65,3 +67,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml b/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml similarity index 94% rename from .ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml rename to .ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml index 53451a18b..196835138 100644 --- a/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml +++ b/.ci_support/osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - None cuda_compiler_version: @@ -25,7 +25,7 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -48,6 +48,8 @@ mkl: - '2023' numpy: - '2' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -65,3 +67,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml b/.ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml similarity index 94% rename from .ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml rename to .ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml index bad711727..464936e2f 100644 --- a/.ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython.yaml +++ b/.ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - None cuda_compiler_version: @@ -25,7 +25,7 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -48,6 +48,8 @@ mkl: - '2023' numpy: - '2.0' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -65,3 +67,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml b/.ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml similarity index 94% rename from .ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml rename to .ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml index dac06bb1d..9785408ff 100644 --- a/.ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython.yaml +++ b/.ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - None cuda_compiler_version: @@ -25,7 +25,7 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -48,6 +48,8 @@ mkl: - '2023' numpy: - '2.0' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -65,3 +67,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml b/.ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml similarity index 94% rename from .ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml rename to .ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml index d984d3b3f..90557f3de 100644 --- a/.ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython.yaml +++ b/.ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - None cuda_compiler_version: @@ -25,7 +25,7 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -48,6 +48,8 @@ mkl: - '2023' numpy: - '2.0' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -65,3 +67,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml b/.ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml similarity index 94% rename from .ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml rename to .ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml index 75abdb732..2cee3388b 100644 --- a/.ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython.yaml +++ b/.ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - None cuda_compiler_version: @@ -25,7 +25,7 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -48,6 +48,8 @@ mkl: - '2023' numpy: - '2.0' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -65,3 +67,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml b/.ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml similarity index 94% rename from .ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml rename to .ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml index 466779f09..a7fe35f59 100644 --- a/.ci_support/osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313.yaml +++ b/.ci_support/osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313.yaml @@ -15,7 +15,7 @@ c_stdlib_version: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - None cuda_compiler_version: @@ -25,7 +25,7 @@ cxx_compiler: cxx_compiler_version: - '18' is_rc: -- 'True' +- 'False' libabseil: - '20240722' libblas: @@ -48,6 +48,8 @@ mkl: - '2023' numpy: - '2' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -65,3 +67,5 @@ zip_keys: - is_rc - - python - numpy +zlib: +- '1' diff --git a/.ci_support/win_64_channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNoneis_rcTrue.yaml b/.ci_support/win_64_channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNoneis_rcFalse.yaml similarity index 92% rename from .ci_support/win_64_channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNoneis_rcTrue.yaml rename to .ci_support/win_64_channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNoneis_rcFalse.yaml index 34518fd03..63e8c0dcd 100644 --- a/.ci_support/win_64_channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNoneis_rcTrue.yaml +++ b/.ci_support/win_64_channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNoneis_rcFalse.yaml @@ -7,7 +7,7 @@ c_stdlib: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - None cuda_compiler_version: @@ -19,7 +19,7 @@ cxx_compiler: github_actions_labels: - cirun-azure-windows-2xlarge is_rc: -- 'True' +- 'False' libabseil: - '20240722' libprotobuf: @@ -36,6 +36,8 @@ numpy: - '2.0' - '2' - '2.0' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -57,3 +59,5 @@ zip_keys: - cuda_compiler_version - - python - numpy +zlib: +- '1' diff --git a/.ci_support/win_64_channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6is_rcTrue.yaml b/.ci_support/win_64_channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6is_rcFalse.yaml similarity index 92% rename from .ci_support/win_64_channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6is_rcTrue.yaml rename to .ci_support/win_64_channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6is_rcFalse.yaml index 63a96a4a1..51971f6d3 100644 --- a/.ci_support/win_64_channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6is_rcTrue.yaml +++ b/.ci_support/win_64_channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6is_rcFalse.yaml @@ -7,7 +7,7 @@ c_stdlib: channel_sources: - conda-forge channel_targets: -- conda-forge pytorch_rc +- conda-forge main cuda_compiler: - cuda-nvcc cuda_compiler_version: @@ -19,7 +19,7 @@ cxx_compiler: github_actions_labels: - cirun-azure-windows-2xlarge is_rc: -- 'True' +- 'False' libabseil: - '20240722' libprotobuf: @@ -36,6 +36,8 @@ numpy: - '2.0' - '2' - '2.0' +orc: +- 2.0.3 pin_run_as_build: python: min_pin: x.x @@ -57,3 +59,5 @@ zip_keys: - cuda_compiler_version - - python - numpy +zlib: +- '1' diff --git a/.github/workflows/conda-build.yml b/.github/workflows/conda-build.yml index 1714a477f..40a005a95 100644 --- a/.github/workflows/conda-build.yml +++ b/.github/workflows/conda-build.yml @@ -21,50 +21,50 @@ jobs: fail-fast: false matrix: include: - - CONFIG: linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue + - CONFIG: linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse UPLOAD_PACKAGES: True os: ubuntu - runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_64_blas_implgenericc_compiler_hfd3c9e418e', 'linux', 'x64', 'self-hosted'] + runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_64_blas_implgenericc_compiler_hb59e174a3e', 'linux', 'x64', 'self-hosted'] DOCKER_IMAGE: quay.io/condaforge/linux-anvil-x86_64:alma9 CONDA_FORGE_DOCKER_RUN_ARGS: "--gpus all" - - CONFIG: linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue + - CONFIG: linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse UPLOAD_PACKAGES: True os: ubuntu - runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_64_blas_implgenericc_compiler_h3d69e44d93', 'linux', 'x64', 'self-hosted'] + runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_64_blas_implgenericc_compiler_h5e2cde8be1', 'linux', 'x64', 'self-hosted'] DOCKER_IMAGE: quay.io/condaforge/linux-anvil-x86_64:alma9 CONDA_FORGE_DOCKER_RUN_ARGS: "--gpus all" - - CONFIG: linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue + - CONFIG: linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse UPLOAD_PACKAGES: True os: ubuntu - runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_64_blas_implmklc_compiler_ver_hbf2b41cf9b', 'linux', 'x64', 'self-hosted'] + runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_64_blas_implmklc_compiler_ver_h0b96eb68c6', 'linux', 'x64', 'self-hosted'] DOCKER_IMAGE: quay.io/condaforge/linux-anvil-x86_64:alma9 CONDA_FORGE_DOCKER_RUN_ARGS: "--gpus all" - - CONFIG: linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue + - CONFIG: linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse UPLOAD_PACKAGES: True os: ubuntu - runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_64_blas_implmklc_compiler_ver_hbac2c29721', 'linux', 'x64', 'self-hosted'] + runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_64_blas_implmklc_compiler_ver_hc39dedf959', 'linux', 'x64', 'self-hosted'] DOCKER_IMAGE: quay.io/condaforge/linux-anvil-x86_64:alma9 CONDA_FORGE_DOCKER_RUN_ARGS: "--gpus all" - - CONFIG: linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue + - CONFIG: linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse UPLOAD_PACKAGES: True os: ubuntu - runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_aarch64_c_compiler_version13c_h20ac0d977f', 'linux', 'x64', 'self-hosted'] + runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_aarch64_c_compiler_version13c_h352ed12235', 'linux', 'x64', 'self-hosted'] DOCKER_IMAGE: quay.io/condaforge/linux-anvil-x86_64:alma9 CONDA_FORGE_DOCKER_RUN_ARGS: "--gpus all" - - CONFIG: linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue + - CONFIG: linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse UPLOAD_PACKAGES: True os: ubuntu - runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_aarch64_c_compiler_version13c_hc16130462f', 'linux', 'x64', 'self-hosted'] + runs_on: ['cirun-openstack-gpu-2xlarge--${{ github.run_id }}-linux_aarch64_c_compiler_version13c_h94771c6c09', 'linux', 'x64', 'self-hosted'] DOCKER_IMAGE: quay.io/condaforge/linux-anvil-x86_64:alma9 CONDA_FORGE_DOCKER_RUN_ARGS: "--gpus all" - - CONFIG: win_64_channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNoneis_rcTrue + - CONFIG: win_64_channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNoneis_rcFalse UPLOAD_PACKAGES: True os: windows - runs_on: ['cirun-azure-windows-2xlarge--${{ github.run_id }}-win_64_channel_targetsconda-forge_p_h4ffee7b9ab', 'windows', 'x64', 'self-hosted'] - - CONFIG: win_64_channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6is_rcTrue + runs_on: ['cirun-azure-windows-2xlarge--${{ github.run_id }}-win_64_channel_targetsconda-forge_m_h0b759d4e63', 'windows', 'x64', 'self-hosted'] + - CONFIG: win_64_channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6is_rcFalse UPLOAD_PACKAGES: True os: windows - runs_on: ['cirun-azure-windows-2xlarge--${{ github.run_id }}-win_64_channel_targetsconda-forge_p_hc6d99508db', 'windows', 'x64', 'self-hosted'] + runs_on: ['cirun-azure-windows-2xlarge--${{ github.run_id }}-win_64_channel_targetsconda-forge_m_h790f30616b', 'windows', 'x64', 'self-hosted'] steps: - name: Checkout code diff --git a/README.md b/README.md index 11043fb6e..4ce0f7506 100644 --- a/README.md +++ b/README.md @@ -37,164 +37,164 @@ Current build status
VariantStatus
linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalselinux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue - variant + variant
linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalselinux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue - variant + variant
linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalselinux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue - variant + variant
linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalselinux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue - variant + variant
linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalselinux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTrue - variant + variant
linux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalselinux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTrue - variant + variant
osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpythonosx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython - variant + variant
osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpythonosx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython - variant + variant
osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpythonosx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython - variant + variant
osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpythonosx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython - variant + variant
osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313 - variant + variant
osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpythonosx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython - variant + variant
osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpythonosx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython - variant + variant
osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpythonosx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython - variant + variant
osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpythonosx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython - variant + variant
osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313 - variant + variant
osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpythonosx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpython - variant + variant
osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpythonosx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpython - variant + variant
osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpythonosx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpython - variant + variant
osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpythonosx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpython - variant + variant
osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313 - variant + variant
win_64_channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNoneis_rcFalsewin_64_channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNoneis_rcTrue - variant + variant
win_64_channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6is_rcFalsewin_64_channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6is_rcTrue - variant + variant
- + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -218,14 +218,14 @@ Current release info Installing pytorch-cpu ====================== -Installing `pytorch-cpu` from the `conda-forge/label/pytorch_rc` channel can be achieved by adding `conda-forge/label/pytorch_rc` to your channels with: +Installing `pytorch-cpu` from the `conda-forge` channel can be achieved by adding `conda-forge` to your channels with: ``` -conda config --add channels conda-forge/label/pytorch_rc +conda config --add channels conda-forge conda config --set channel_priority strict ``` -Once the `conda-forge/label/pytorch_rc` channel has been enabled, `libtorch, pytorch, pytorch-cpu, pytorch-gpu` can be installed with `conda`: +Once the `conda-forge` channel has been enabled, `libtorch, pytorch, pytorch-cpu, pytorch-gpu` can be installed with `conda`: ``` conda install libtorch pytorch pytorch-cpu pytorch-gpu @@ -240,26 +240,26 @@ mamba install libtorch pytorch pytorch-cpu pytorch-gpu It is possible to list all of the versions of `libtorch` available on your platform with `conda`: ``` -conda search libtorch --channel conda-forge/label/pytorch_rc +conda search libtorch --channel conda-forge ``` or with `mamba`: ``` -mamba search libtorch --channel conda-forge/label/pytorch_rc +mamba search libtorch --channel conda-forge ``` Alternatively, `mamba repoquery` may provide more information: ``` # Search all versions available on your platform: -mamba repoquery search libtorch --channel conda-forge/label/pytorch_rc +mamba repoquery search libtorch --channel conda-forge # List packages depending on `libtorch`: -mamba repoquery whoneeds libtorch --channel conda-forge/label/pytorch_rc +mamba repoquery whoneeds libtorch --channel conda-forge # List dependencies of `libtorch`: -mamba repoquery depends libtorch --channel conda-forge/label/pytorch_rc +mamba repoquery depends libtorch --channel conda-forge ``` From beb54816734ee407e1e9982daecd674de74ee12e Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Fri, 31 Jan 2025 14:09:14 +1100 Subject: [PATCH 17/58] remove test skips for 3.13 --- recipe/meta.yaml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 92e31c859..8da1f07b9 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -461,12 +461,6 @@ outputs: {% set skips = skips ~ " or (TestLinalgCPU and test_pca_lowrank_cpu)" %} # [aarch64] {% set skips = skips ~ " or (TestLinalgCPU and test_svd_lowrank_cpu)" %} # [aarch64] {% set skips = skips ~ " or (TestMkldnnCPU and test_lstm_cpu)" %} # [aarch64] - # dynamo does not support python 3.13 - {% set skips = skips ~ " or (TestCustomOp and test_data_dependent_compile)" %} # [py==313] - {% set skips = skips ~ " or (TestCustomOp and test_functionalize_error)" %} # [py==313] - {% set skips = skips ~ " or (TestCustomOpAPI and test_compile)" %} # [py==313] - {% set skips = skips ~ " or (TestCustomOpAPI and test_fake)" %} # [py==313] - {% set skips = skips ~ " or test_compile_int4_mm or test_compile_int8_mm" %} # [py==313] # doesn't crash, but gets different result on aarch + CUDA {% set skips = skips ~ " or illcondition_matrix_input_should_not_crash_cpu" %} # [aarch64 and cuda_compiler_version != "None"] # may crash spuriously From 7c0d921efb425e1aabf2c2f57860ca48628344e4 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Fri, 31 Jan 2025 16:52:06 +1100 Subject: [PATCH 18/58] vendor upstream smoke_test.py; not part of tarball --- recipe/meta.yaml | 5 +- recipe/smoke_test.py | 388 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 391 insertions(+), 2 deletions(-) create mode 100644 recipe/smoke_test.py diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 8da1f07b9..d97c5e43b 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -386,7 +386,8 @@ outputs: # tools/ is needed to optimise test run # as of pytorch=2.0.0, there is a bug when trying to run tests without the tools - tools - - .ci/pytorch/smoke_test/smoke_test.py + files: + - smoke_test.py commands: # Run pip check so as to ensure that all pytorch packages are installed # https://github.com/conda-forge/pytorch-cpu-feedstock/issues/24 @@ -434,7 +435,7 @@ outputs: - export TARGET_OS="macos-arm64" # [(osx and arm64)] - export TARGET_OS="macos-x86_64" # [(osx and x86_64)] - export OMP_NUM_THREADS=4 # [not win] - - python ./smoke_test/smoke_test.py --package torchonly + - python ./smoke_test.py --package torchonly # a reasonably safe subset of tests that should run under 15 minutes {% set tests = " ".join([ diff --git a/recipe/smoke_test.py b/recipe/smoke_test.py new file mode 100644 index 000000000..c70185c05 --- /dev/null +++ b/recipe/smoke_test.py @@ -0,0 +1,388 @@ +import argparse +# from https://github.com/pytorch/pytorch/blob/v2.6.0/.ci/pytorch/smoke_test/smoke_test.py +# update content below for new versions! +# -------------------------------------- +import importlib +import json +import os +import re +import subprocess +import sys +from pathlib import Path + +import torch +import torch._dynamo +import torch.nn as nn +import torch.nn.functional as F + + +if "MATRIX_GPU_ARCH_VERSION" in os.environ: + gpu_arch_ver = os.getenv("MATRIX_GPU_ARCH_VERSION") +else: + gpu_arch_ver = os.getenv("GPU_ARCH_VERSION") # Use fallback if available +gpu_arch_type = os.getenv("MATRIX_GPU_ARCH_TYPE") +channel = os.getenv("MATRIX_CHANNEL") +package_type = os.getenv("MATRIX_PACKAGE_TYPE") +target_os = os.getenv("TARGET_OS", sys.platform) +BASE_DIR = Path(__file__).parent.parent.parent + +is_cuda_system = gpu_arch_type == "cuda" +NIGHTLY_ALLOWED_DELTA = 3 + +MODULES = [ + { + "name": "torchvision", + "repo": "https://github.com/pytorch/vision.git", + "smoke_test": "./vision/test/smoke_test.py", + "extension": "extension", + "repo_name": "vision", + }, + { + "name": "torchaudio", + "repo": "https://github.com/pytorch/audio.git", + "smoke_test": "./audio/test/smoke_test/smoke_test.py --no-ffmpeg", + "extension": "_extension", + "repo_name": "audio", + }, +] + + +class Net(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 32, 3, 1) + self.conv2 = nn.Conv2d(32, 64, 3, 1) + self.fc1 = nn.Linear(9216, 1) + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + x = F.max_pool2d(x, 2) + x = torch.flatten(x, 1) + output = self.fc1(x) + return output + + +def load_json_from_basedir(filename: str): + try: + with open(BASE_DIR / filename) as fptr: + return json.load(fptr) + except FileNotFoundError as exc: + raise ImportError(f"File {filename} not found error: {exc.strerror}") from exc + except json.JSONDecodeError as exc: + raise ImportError(f"Invalid JSON {filename}") from exc + + +def read_release_matrix(): + return load_json_from_basedir("release_matrix.json") + + +def test_numpy(): + import numpy as np + + x = np.arange(5) + torch.tensor(x) + + +def check_version(package: str) -> None: + release_version = os.getenv("RELEASE_VERSION") + # if release_version is specified, use it to validate the packages + if release_version: + release_matrix = read_release_matrix() + stable_version = release_matrix["torch"] + else: + stable_version = os.getenv("MATRIX_STABLE_VERSION") + + # only makes sense to check nightly package where dates are known + if channel == "nightly": + check_nightly_binaries_date(package) + elif stable_version is not None: + if not torch.__version__.startswith(stable_version): + raise RuntimeError( + f"Torch version mismatch, expected {stable_version} for channel {channel}. But its {torch.__version__}" + ) + + if release_version and package == "all": + for module in MODULES: + imported_module = importlib.import_module(module["name"]) + module_version = imported_module.__version__ + if not module_version.startswith(release_matrix[module["name"]]): + raise RuntimeError( + f"{module['name']} version mismatch, expected: \ + {release_matrix[module['name']]} for channel {channel}. But its {module_version}" + ) + else: + print(f"{module['name']} version actual: {module_version} expected: \ + {release_matrix[module['name']]} for channel {channel}.") + + else: + print(f"Skip version check for channel {channel} as stable version is None") + + +def check_nightly_binaries_date(package: str) -> None: + from datetime import datetime + + format_dt = "%Y%m%d" + + date_t_str = re.findall("dev\\d+", torch.__version__) + date_t_delta = datetime.now() - datetime.strptime(date_t_str[0][3:], format_dt) + if date_t_delta.days >= NIGHTLY_ALLOWED_DELTA: + raise RuntimeError( + f"the binaries are from {date_t_str} and are more than {NIGHTLY_ALLOWED_DELTA} days old!" + ) + + if package == "all": + for module in MODULES: + imported_module = importlib.import_module(module["name"]) + module_version = imported_module.__version__ + date_m_str = re.findall("dev\\d+", module_version) + date_m_delta = datetime.now() - datetime.strptime( + date_m_str[0][3:], format_dt + ) + print(f"Nightly date check for {module['name']} version {module_version}") + if date_m_delta.days > NIGHTLY_ALLOWED_DELTA: + raise RuntimeError( + f"Expected {module['name']} to be less then {NIGHTLY_ALLOWED_DELTA} days. But its {date_m_delta}" + ) + + +def test_cuda_runtime_errors_captured() -> None: + cuda_exception_missed = True + try: + print("Testing test_cuda_runtime_errors_captured") + torch._assert_async(torch.tensor(0, device="cuda")) + torch._assert_async(torch.tensor(0 + 0j, device="cuda")) + except RuntimeError as e: + if re.search("CUDA", f"{e}"): + print(f"Caught CUDA exception with success: {e}") + cuda_exception_missed = False + else: + raise e + if cuda_exception_missed: + raise RuntimeError("Expected CUDA RuntimeError but have not received!") + + +def smoke_test_cuda( + package: str, runtime_error_check: str, torch_compile_check: str +) -> None: + if not torch.cuda.is_available() and is_cuda_system: + raise RuntimeError(f"Expected CUDA {gpu_arch_ver}. However CUDA is not loaded.") + + if package == "all" and is_cuda_system: + for module in MODULES: + imported_module = importlib.import_module(module["name"]) + # TBD for vision move extension module to private so it will + # be _extention. + version = "N/A" + if module["extension"] == "extension": + version = imported_module.extension._check_cuda_version() + else: + version = imported_module._extension._check_cuda_version() + print(f"{module['name']} CUDA: {version}") + + # torch.compile is available on macos-arm64 and Linux for python 3.8-3.13 + if ( + torch_compile_check == "enabled" + and sys.version_info < (3, 14, 0) + and target_os in ["linux", "linux-aarch64", "macos-arm64", "darwin"] + ): + smoke_test_compile("cuda" if torch.cuda.is_available() else "cpu") + + if torch.cuda.is_available(): + if torch.version.cuda != gpu_arch_ver: + raise RuntimeError( + f"Wrong CUDA version. Loaded: {torch.version.cuda} Expected: {gpu_arch_ver}" + ) + print(f"torch cuda: {torch.version.cuda}") + # todo add cudnn version validation + print(f"torch cudnn: {torch.backends.cudnn.version()}") + print(f"cuDNN enabled? {torch.backends.cudnn.enabled}") + + torch.cuda.init() + print("CUDA initialized successfully") + print(f"Number of CUDA devices: {torch.cuda.device_count()}") + for i in range(torch.cuda.device_count()): + print(f"Device {i}: {torch.cuda.get_device_name(i)}") + + # nccl is availbale only on Linux + if sys.platform in ["linux", "linux2"]: + print(f"torch nccl version: {torch.cuda.nccl.version()}") + + if runtime_error_check == "enabled": + test_cuda_runtime_errors_captured() + + +def smoke_test_conv2d() -> None: + import torch.nn as nn + + print("Testing smoke_test_conv2d") + # With square kernels and equal stride + m = nn.Conv2d(16, 33, 3, stride=2) + # non-square kernels and unequal stride and with padding + m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2)) + assert m is not None + # non-square kernels and unequal stride and with padding and dilation + basic_conv = nn.Conv2d( + 16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1) + ) + input = torch.randn(20, 16, 50, 100) + output = basic_conv(input) + + if is_cuda_system: + print("Testing smoke_test_conv2d with cuda") + conv = nn.Conv2d(3, 3, 3).cuda() + x = torch.randn(1, 3, 24, 24, device="cuda") + with torch.cuda.amp.autocast(): + out = conv(x) + assert out is not None + + supported_dtypes = [torch.float16, torch.float32, torch.float64] + for dtype in supported_dtypes: + print(f"Testing smoke_test_conv2d with cuda for {dtype}") + conv = basic_conv.to(dtype).cuda() + input = torch.randn(20, 16, 50, 100, device="cuda").type(dtype) + output = conv(input) + assert output is not None + + +def test_linalg(device="cpu") -> None: + print(f"Testing smoke_test_linalg on {device}") + A = torch.randn(5, 3, device=device) + U, S, Vh = torch.linalg.svd(A, full_matrices=False) + assert ( + U.shape == A.shape + and S.shape == torch.Size([3]) + and Vh.shape == torch.Size([3, 3]) + ) + torch.dist(A, U @ torch.diag(S) @ Vh) + + U, S, Vh = torch.linalg.svd(A) + assert ( + U.shape == torch.Size([5, 5]) + and S.shape == torch.Size([3]) + and Vh.shape == torch.Size([3, 3]) + ) + torch.dist(A, U[:, :3] @ torch.diag(S) @ Vh) + + A = torch.randn(7, 5, 3, device=device) + U, S, Vh = torch.linalg.svd(A, full_matrices=False) + torch.dist(A, U @ torch.diag_embed(S) @ Vh) + + if device == "cuda": + supported_dtypes = [torch.float32, torch.float64] + for dtype in supported_dtypes: + print(f"Testing smoke_test_linalg with cuda for {dtype}") + A = torch.randn(20, 16, 50, 100, device=device, dtype=dtype) + torch.linalg.svd(A) + + +def smoke_test_compile(device: str = "cpu") -> None: + supported_dtypes = [torch.float16, torch.float32, torch.float64] + + def foo(x: torch.Tensor) -> torch.Tensor: + return torch.sin(x) + torch.cos(x) + + for dtype in supported_dtypes: + print(f"Testing smoke_test_compile for {device} and {dtype}") + x = torch.rand(3, 3, device=device).type(dtype) + x_eager = foo(x) + x_pt2 = torch.compile(foo)(x) + torch.testing.assert_close(x_eager, x_pt2) + + # Check that SIMD were detected for the architecture + if device == "cpu": + from torch._inductor.codecache import pick_vec_isa + + isa = pick_vec_isa() + if not isa: + raise RuntimeError("Can't detect vectorized ISA for CPU") + print(f"Picked CPU ISA {type(isa).__name__} bit width {isa.bit_width()}") + + # Reset torch dynamo since we are changing mode + torch._dynamo.reset() + dtype = torch.float32 + torch.set_float32_matmul_precision("high") + print(f"Testing smoke_test_compile with mode 'max-autotune' for {dtype}") + x = torch.rand(64, 1, 28, 28, device=device).type(torch.float32) + model = Net().to(device=device) + x_pt2 = torch.compile(model, mode="max-autotune")(x) + + +def smoke_test_modules(): + cwd = os.getcwd() + for module in MODULES: + if module["repo"]: + if not os.path.exists(f"{cwd}/{module['repo_name']}"): + print(f"Path does not exist: {cwd}/{module['repo_name']}") + try: + subprocess.check_output( + f"git clone --depth 1 {module['repo']}", + stderr=subprocess.STDOUT, + shell=True, + ) + except subprocess.CalledProcessError as exc: + raise RuntimeError( + f"Cloning {module['repo']} FAIL: {exc.returncode} Output: {exc.output}" + ) from exc + try: + smoke_test_command = f"python3 {module['smoke_test']}" + if target_os == "windows": + smoke_test_command = f"python {module['smoke_test']}" + output = subprocess.check_output( + smoke_test_command, + stderr=subprocess.STDOUT, + shell=True, + universal_newlines=True, + ) + except subprocess.CalledProcessError as exc: + raise RuntimeError( + f"Module {module['name']} FAIL: {exc.returncode} Output: {exc.output}" + ) from exc + else: + print(f"Output: \n{output}\n") + + +def main() -> None: + parser = argparse.ArgumentParser() + parser.add_argument( + "--package", + help="Package to include in smoke testing", + type=str, + choices=["all", "torchonly"], + default="all", + ) + parser.add_argument( + "--runtime-error-check", + help="No Runtime Error check", + type=str, + choices=["enabled", "disabled"], + default="enabled", + ) + parser.add_argument( + "--torch-compile-check", + help="Check torch compile", + type=str, + choices=["enabled", "disabled"], + default="enabled", + ) + options = parser.parse_args() + print(f"torch: {torch.__version__}") + print(torch.__config__.parallel_info()) + + check_version(options.package) + smoke_test_conv2d() + test_linalg() + test_numpy() + if is_cuda_system: + test_linalg("cuda") + + if options.package == "all": + smoke_test_modules() + + smoke_test_cuda( + options.package, options.runtime_error_check, options.torch_compile_check + ) + + +if __name__ == "__main__": + main() From b6276415ab18144997288b9d2c47b4c0c3cf28d2 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Sun, 2 Feb 2025 17:01:51 +1100 Subject: [PATCH 19/58] skip torchinductor tests on osx; segfaults hard and also runs into some openmp issue: ``` ----------------------------- Captured stderr call ----------------------------- OMP: Error #179: Function pthread_key_create failed: OMP: System error #35: Resource temporarily unavailable ``` --- recipe/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index d97c5e43b..4acfbdb05 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -451,7 +451,7 @@ outputs: ]) %} # tests torch.compile; avoid on aarch because it adds >4h in test runtime in emulation; # they add a lot of runtime (15->60min on windows), so run them for only one python version - {% set tests = tests ~ " test/inductor/test_torchinductor.py" %} # [py==312 and not aarch64] + {% set tests = tests ~ " test/inductor/test_torchinductor.py" %} # [py==312 and not (aarch64 or osx)] {% set skips = "(TestTorch and test_print)" %} # tolerance violation with openblas From c12f03db67c4d1b3dc85fb7de9b05f73552e694b Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Sun, 2 Feb 2025 17:35:51 +1100 Subject: [PATCH 20/58] backport patch for torchinductor failures on windows --- recipe/meta.yaml | 2 + ...-of-python-3-and-error-without-numpy.patch | 2 +- recipe/patches/0002-Help-find-numpy.patch | 2 +- .../patches/0003-Update-sympy-version.patch | 2 +- .../0004-Fix-duplicate-linker-script.patch | 2 +- ...-Allow-overriding-CUDA-related-paths.patch | 2 +- ...AS_USE_CBLAS_DOT-for-OpenBLAS-builds.patch | 2 +- recipe/patches/0007-fix-issue-142484.patch | 2 +- recipe/patches/0008-Fix-FindOpenBLAS.patch | 2 +- ...tils.cpp_extension.include_paths-use.patch | 2 +- ...oint-include-paths-to-PREFIX-include.patch | 2 +- ...nda-prefix-to-inductor-include-paths.patch | 2 +- ...E_DIR-relative-to-TORCH_INSTALL_PREF.patch | 2 +- ...ON-lib-from-CMake-install-TARGETS-di.patch | 2 +- ...e-in-test_mutable_custom_op_fixed_la.patch | 2 +- ...-find_package-CUDA-in-caffe2-CMake-m.patch | 2 +- ...AOTI_TORCH_EXPORT-on-Windows.-140030.patch | 66 +++++++++++++++++++ 17 files changed, 83 insertions(+), 15 deletions(-) create mode 100644 recipe/patches/0016-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 4acfbdb05..533358199 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -59,6 +59,8 @@ source: - patches/0013-remove-DESTINATION-lib-from-CMake-install-TARGETS-di.patch # [win] - patches/0014-make-library-name-in-test_mutable_custom_op_fixed_la.patch - patches/0015-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch + # backport https://github.com/pytorch/pytorch/pull/140030 + - patches/0016-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch - patches_submodules/fbgemm/0001-remove-DESTINATION-lib-from-CMake-install-directives.patch # [win] - patches_submodules/tensorpipe/0001-switch-away-from-find_package-CUDA.patch # backport https://github.com/google/XNNPACK/commit/5f23827e66cca435fa400b6e221892ac95af0079 diff --git a/recipe/patches/0001-Force-usage-of-python-3-and-error-without-numpy.patch b/recipe/patches/0001-Force-usage-of-python-3-and-error-without-numpy.patch index 5f175049b..716cafecd 100644 --- a/recipe/patches/0001-Force-usage-of-python-3-and-error-without-numpy.patch +++ b/recipe/patches/0001-Force-usage-of-python-3-and-error-without-numpy.patch @@ -1,7 +1,7 @@ From b1493b8712c1fc4ad02b2640c191f3c7f1fc6c9d Mon Sep 17 00:00:00 2001 From: Mark Harfouche Date: Sun, 1 Sep 2024 17:35:40 -0400 -Subject: [PATCH 01/15] Force usage of python 3 and error without numpy +Subject: [PATCH 01/16] Force usage of python 3 and error without numpy --- cmake/Dependencies.cmake | 6 +++--- diff --git a/recipe/patches/0002-Help-find-numpy.patch b/recipe/patches/0002-Help-find-numpy.patch index 653c4b5d4..26bb6ac87 100644 --- a/recipe/patches/0002-Help-find-numpy.patch +++ b/recipe/patches/0002-Help-find-numpy.patch @@ -1,7 +1,7 @@ From e88ebf63cc47b4471e6be3142cda1c2483b4dc9b Mon Sep 17 00:00:00 2001 From: Mark Harfouche Date: Tue, 1 Oct 2024 00:28:40 -0400 -Subject: [PATCH 02/15] Help find numpy +Subject: [PATCH 02/16] Help find numpy --- tools/setup_helpers/cmake.py | 6 ++++++ diff --git a/recipe/patches/0003-Update-sympy-version.patch b/recipe/patches/0003-Update-sympy-version.patch index 52df04c9f..89a20693f 100644 --- a/recipe/patches/0003-Update-sympy-version.patch +++ b/recipe/patches/0003-Update-sympy-version.patch @@ -1,7 +1,7 @@ From 3fb6b3704a6359521e186bfd4c6644a56aa08d90 Mon Sep 17 00:00:00 2001 From: Jeongseok Lee Date: Thu, 17 Oct 2024 15:04:05 -0700 -Subject: [PATCH 03/15] Update sympy version +Subject: [PATCH 03/16] Update sympy version --- setup.py | 2 +- diff --git a/recipe/patches/0004-Fix-duplicate-linker-script.patch b/recipe/patches/0004-Fix-duplicate-linker-script.patch index 8458e4a82..13aeaf092 100644 --- a/recipe/patches/0004-Fix-duplicate-linker-script.patch +++ b/recipe/patches/0004-Fix-duplicate-linker-script.patch @@ -1,7 +1,7 @@ From be785be20dab23d5cee88e13adf40150ce9ead3c Mon Sep 17 00:00:00 2001 From: Jeongseok Lee Date: Sun, 3 Nov 2024 01:12:36 -0700 -Subject: [PATCH 04/15] Fix duplicate linker script +Subject: [PATCH 04/16] Fix duplicate linker script --- setup.py | 4 +++- diff --git a/recipe/patches/0005-Allow-overriding-CUDA-related-paths.patch b/recipe/patches/0005-Allow-overriding-CUDA-related-paths.patch index 23d83bab0..a349fb8a7 100644 --- a/recipe/patches/0005-Allow-overriding-CUDA-related-paths.patch +++ b/recipe/patches/0005-Allow-overriding-CUDA-related-paths.patch @@ -1,7 +1,7 @@ From e0cb086099287bd51fdbe8e6f847ec2d0646f085 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Wed, 27 Nov 2024 13:47:23 +0100 -Subject: [PATCH 05/15] Allow overriding CUDA-related paths +Subject: [PATCH 05/16] Allow overriding CUDA-related paths --- cmake/Modules/FindCUDAToolkit.cmake | 2 +- diff --git a/recipe/patches/0006-Use-BLAS_USE_CBLAS_DOT-for-OpenBLAS-builds.patch b/recipe/patches/0006-Use-BLAS_USE_CBLAS_DOT-for-OpenBLAS-builds.patch index 2ababacc8..d2672ba5f 100644 --- a/recipe/patches/0006-Use-BLAS_USE_CBLAS_DOT-for-OpenBLAS-builds.patch +++ b/recipe/patches/0006-Use-BLAS_USE_CBLAS_DOT-for-OpenBLAS-builds.patch @@ -1,7 +1,7 @@ From 7e7547dab6c26e7fd324fde6cb6aad5d57bebcf9 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 18 Dec 2024 03:59:00 +0000 -Subject: [PATCH 06/15] Use BLAS_USE_CBLAS_DOT for OpenBLAS builds +Subject: [PATCH 06/16] Use BLAS_USE_CBLAS_DOT for OpenBLAS builds There are two calling conventions for *dotu functions diff --git a/recipe/patches/0007-fix-issue-142484.patch b/recipe/patches/0007-fix-issue-142484.patch index 30674b101..1b651ad27 100644 --- a/recipe/patches/0007-fix-issue-142484.patch +++ b/recipe/patches/0007-fix-issue-142484.patch @@ -1,7 +1,7 @@ From 63f0d3218792d874650a7926f2b956ecbe74eac0 Mon Sep 17 00:00:00 2001 From: "Zheng, Zhaoqiong" Date: Fri, 27 Dec 2024 13:49:36 +0800 -Subject: [PATCH 07/15] fix issue 142484 +Subject: [PATCH 07/16] fix issue 142484 From https://github.com/pytorch/pytorch/pull/143894 --- diff --git a/recipe/patches/0008-Fix-FindOpenBLAS.patch b/recipe/patches/0008-Fix-FindOpenBLAS.patch index 6a4307872..5c6844148 100644 --- a/recipe/patches/0008-Fix-FindOpenBLAS.patch +++ b/recipe/patches/0008-Fix-FindOpenBLAS.patch @@ -1,7 +1,7 @@ From 6e00778c46305f6a36670fa99a326c2426203a42 Mon Sep 17 00:00:00 2001 From: Bas Zalmstra Date: Thu, 16 May 2024 10:46:49 +0200 -Subject: [PATCH 08/15] Fix FindOpenBLAS +Subject: [PATCH 08/16] Fix FindOpenBLAS --- cmake/Modules/FindOpenBLAS.cmake | 15 +++++++++------ diff --git a/recipe/patches/0009-simplify-torch.utils.cpp_extension.include_paths-use.patch b/recipe/patches/0009-simplify-torch.utils.cpp_extension.include_paths-use.patch index 8b898e578..025dd3116 100644 --- a/recipe/patches/0009-simplify-torch.utils.cpp_extension.include_paths-use.patch +++ b/recipe/patches/0009-simplify-torch.utils.cpp_extension.include_paths-use.patch @@ -1,7 +1,7 @@ From 12a4473ae7a47da2a30121f329a2c3c8f3f456c5 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Thu, 23 Jan 2025 22:46:58 +1100 -Subject: [PATCH 09/15] simplify torch.utils.cpp_extension.include_paths; use +Subject: [PATCH 09/16] simplify torch.utils.cpp_extension.include_paths; use it in cpp_builder The /TH headers have not existed since pytorch 1.11 diff --git a/recipe/patches/0010-point-include-paths-to-PREFIX-include.patch b/recipe/patches/0010-point-include-paths-to-PREFIX-include.patch index 7c5efb37d..73ffc4614 100644 --- a/recipe/patches/0010-point-include-paths-to-PREFIX-include.patch +++ b/recipe/patches/0010-point-include-paths-to-PREFIX-include.patch @@ -1,7 +1,7 @@ From 0295752d2c44d86681d0381ef97d42ca0199ca56 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Thu, 23 Jan 2025 22:58:14 +1100 -Subject: [PATCH 10/15] point include paths to $PREFIX/include +Subject: [PATCH 10/16] point include paths to $PREFIX/include --- torch/utils/cpp_extension.py | 9 +++++++++ diff --git a/recipe/patches/0011-Add-conda-prefix-to-inductor-include-paths.patch b/recipe/patches/0011-Add-conda-prefix-to-inductor-include-paths.patch index b637adc6a..92de63a75 100644 --- a/recipe/patches/0011-Add-conda-prefix-to-inductor-include-paths.patch +++ b/recipe/patches/0011-Add-conda-prefix-to-inductor-include-paths.patch @@ -1,7 +1,7 @@ From a9ca43e842b6d550a364d62a286bcb161e1e8f04 Mon Sep 17 00:00:00 2001 From: Daniel Petry Date: Tue, 21 Jan 2025 17:45:23 -0600 -Subject: [PATCH 11/15] Add conda prefix to inductor include paths +Subject: [PATCH 11/16] Add conda prefix to inductor include paths Currently inductor doesn't look in conda's includes and libs. This results in errors when it tries to compile, if system versions are being used of diff --git a/recipe/patches/0012-make-ATEN_INCLUDE_DIR-relative-to-TORCH_INSTALL_PREF.patch b/recipe/patches/0012-make-ATEN_INCLUDE_DIR-relative-to-TORCH_INSTALL_PREF.patch index 36cddbe10..f6fc082ce 100644 --- a/recipe/patches/0012-make-ATEN_INCLUDE_DIR-relative-to-TORCH_INSTALL_PREF.patch +++ b/recipe/patches/0012-make-ATEN_INCLUDE_DIR-relative-to-TORCH_INSTALL_PREF.patch @@ -1,7 +1,7 @@ From 10ebfd7e5b04d022eab602889b4f06659b12b75a Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Tue, 28 Jan 2025 14:15:34 +1100 -Subject: [PATCH 12/15] make ATEN_INCLUDE_DIR relative to TORCH_INSTALL_PREFIX +Subject: [PATCH 12/16] make ATEN_INCLUDE_DIR relative to TORCH_INSTALL_PREFIX we cannot set CMAKE_INSTALL_PREFIX without the pytorch build complaining, but we can use TORCH_INSTALL_PREFIX, which is set correctly relative to our CMake files already: diff --git a/recipe/patches/0013-remove-DESTINATION-lib-from-CMake-install-TARGETS-di.patch b/recipe/patches/0013-remove-DESTINATION-lib-from-CMake-install-TARGETS-di.patch index 3a6f9915a..8da32aad6 100644 --- a/recipe/patches/0013-remove-DESTINATION-lib-from-CMake-install-TARGETS-di.patch +++ b/recipe/patches/0013-remove-DESTINATION-lib-from-CMake-install-TARGETS-di.patch @@ -1,7 +1,7 @@ From 24eece84ed217e14628983f0941c5ec274717b9f Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Tue, 28 Jan 2025 10:58:29 +1100 -Subject: [PATCH 13/15] remove `DESTINATION lib` from CMake `install(TARGETS` +Subject: [PATCH 13/16] remove `DESTINATION lib` from CMake `install(TARGETS` directives Suggested-By: Silvio Traversaro diff --git a/recipe/patches/0014-make-library-name-in-test_mutable_custom_op_fixed_la.patch b/recipe/patches/0014-make-library-name-in-test_mutable_custom_op_fixed_la.patch index 5bbdbb88a..5aed8a27c 100644 --- a/recipe/patches/0014-make-library-name-in-test_mutable_custom_op_fixed_la.patch +++ b/recipe/patches/0014-make-library-name-in-test_mutable_custom_op_fixed_la.patch @@ -1,7 +1,7 @@ From 93c1af8129efcacb58db9a8f2ec16eb6ec17dfed Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Thu, 30 Jan 2025 13:23:14 +1100 -Subject: [PATCH 14/15] make library name in +Subject: [PATCH 14/16] make library name in `test_mutable_custom_op_fixed_layout{,2}` unique Suggested-By: Daniel Petry diff --git a/recipe/patches/0015-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch b/recipe/patches/0015-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch index 51e98bbe8..8e7feb418 100644 --- a/recipe/patches/0015-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch +++ b/recipe/patches/0015-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch @@ -1,7 +1,7 @@ From b5084af57c14cdee26936fdfa6425598e8659eb6 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Thu, 30 Jan 2025 08:33:44 +1100 -Subject: [PATCH 15/15] avoid deprecated `find_package(CUDA)` in caffe2 CMake +Subject: [PATCH 15/16] avoid deprecated `find_package(CUDA)` in caffe2 CMake metadata vendor the not-available-anymore function torch_cuda_get_nvcc_gencode_flag from CMake diff --git a/recipe/patches/0016-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch b/recipe/patches/0016-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch new file mode 100644 index 000000000..7c85e1add --- /dev/null +++ b/recipe/patches/0016-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch @@ -0,0 +1,66 @@ +From 2869f014b34cc01662a9c3542d11d53dafd0bb0a Mon Sep 17 00:00:00 2001 +From: Xu Han +Date: Wed, 15 Jan 2025 23:43:41 +0000 +Subject: [PATCH 16/16] export AOTI_TORCH_EXPORT on Windows. (#140030) + +Fixes #139954 + +reproduce UT: +```cmd +pytest test/inductor/test_torchinductor_codegen_dynamic_shapes.py -k test_device_assert_dynamic_shapes_cpu +``` +Issue: +image + +After fixing: +![Image](https://github.com/user-attachments/assets/883846fb-8e92-4b9c-9400-daab32382a3a) + +Reland: +1. Declare export on Windows explicitly. +2. Support cpu, cuda and xpu devices. + +Pull Request resolved: https://github.com/pytorch/pytorch/pull/140030 +Approved by: https://github.com/jgong5, https://github.com/desertfire, https://github.com/malfet + +Co-authored-by: Nikita Shulga <2453524+malfet@users.noreply.github.com> +--- + CMakeLists.txt | 3 +++ + torch/csrc/inductor/aoti_torch/c/shim.h | 10 +++++++++- + 2 files changed, 12 insertions(+), 1 deletion(-) + +diff --git a/CMakeLists.txt b/CMakeLists.txt +index c8af5f00b5c..c1733a99e91 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -1097,6 +1097,9 @@ if(NOT MSVC) + append_cxx_flag_if_supported("-Wno-error=redundant-move" CMAKE_CXX_FLAGS) + endif() + else() ++ # Define export functions for AOTI. ++ add_compile_definitions(EXPORT_AOTI_FUNCTIONS) ++ + # skip unwanted includes from windows.h + add_compile_definitions(WIN32_LEAN_AND_MEAN) + # Windows SDK broke compatibility since version 25131, but introduced this +diff --git a/torch/csrc/inductor/aoti_torch/c/shim.h b/torch/csrc/inductor/aoti_torch/c/shim.h +index 4c6c9afcacc..b2202b24b91 100644 +--- a/torch/csrc/inductor/aoti_torch/c/shim.h ++++ b/torch/csrc/inductor/aoti_torch/c/shim.h +@@ -44,8 +44,16 @@ + // to symbol clashes at link time if libtorch is included in a DLL and binary + // that depends on the DLL. As a short term fix, we don't export the symbols. + // In the long term, this will need to be addressed when Windows is supported. +-// #define AOTI_TORCH_EXPORT __declspec(dllexport) ++#ifdef OVRSOURCE ++// Do not export AOTI on Windows for internal builds + #define AOTI_TORCH_EXPORT ++#else /* OVRSOURCE */ ++#ifdef EXPORT_AOTI_FUNCTIONS ++#define AOTI_TORCH_EXPORT __declspec(dllexport) ++#else ++#define AOTI_TORCH_EXPORT __declspec(dllimport) ++#endif ++#endif /* OVRSOURCE */ + #else // !_WIN32 + #define AOTI_TORCH_EXPORT + #endif // _WIN32 From ce47e0ab71c24e76e0d7d334947f04a50df0c832 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Tue, 4 Feb 2025 07:08:22 +1100 Subject: [PATCH 21/58] skip a minor tolerance violation on osx --- recipe/meta.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 533358199..7bfbc1779 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -456,8 +456,9 @@ outputs: {% set tests = tests ~ " test/inductor/test_torchinductor.py" %} # [py==312 and not (aarch64 or osx)] {% set skips = "(TestTorch and test_print)" %} - # tolerance violation with openblas + # minor tolerance violations on osx {% set skips = skips ~ " or test_1_sized_with_0_strided_cpu_float32" %} # [osx] + {% set skips = skips ~ " or test_batchnorm_nhwc_cpu" %} # [osx] # timeouts and failures on aarch, see https://github.com/conda-forge/pytorch-cpu-feedstock/pull/298#issuecomment-2555888508 {% set skips = skips ~ " or test_pynode_destruction_deadlock" %} # [aarch64] {% set skips = skips ~ " or (TestLinalgCPU and test_cholesky_cpu_float32)" %} # [aarch64] From ba2564b1a70011d975cb74c995cba5053fa083f3 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Tue, 4 Feb 2025 07:08:55 +1100 Subject: [PATCH 22/58] drop unsuccessful patch; now covered by skip --- recipe/meta.yaml | 1 - ...e-in-test_mutable_custom_op_fixed_la.patch | 57 ------------------- 2 files changed, 58 deletions(-) delete mode 100644 recipe/patches/0014-make-library-name-in-test_mutable_custom_op_fixed_la.patch diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 7bfbc1779..f112fe549 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -57,7 +57,6 @@ source: - patches/0011-Add-conda-prefix-to-inductor-include-paths.patch - patches/0012-make-ATEN_INCLUDE_DIR-relative-to-TORCH_INSTALL_PREF.patch - patches/0013-remove-DESTINATION-lib-from-CMake-install-TARGETS-di.patch # [win] - - patches/0014-make-library-name-in-test_mutable_custom_op_fixed_la.patch - patches/0015-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch # backport https://github.com/pytorch/pytorch/pull/140030 - patches/0016-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch diff --git a/recipe/patches/0014-make-library-name-in-test_mutable_custom_op_fixed_la.patch b/recipe/patches/0014-make-library-name-in-test_mutable_custom_op_fixed_la.patch deleted file mode 100644 index 5aed8a27c..000000000 --- a/recipe/patches/0014-make-library-name-in-test_mutable_custom_op_fixed_la.patch +++ /dev/null @@ -1,57 +0,0 @@ -From 93c1af8129efcacb58db9a8f2ec16eb6ec17dfed Mon Sep 17 00:00:00 2001 -From: "H. Vetinari" -Date: Thu, 30 Jan 2025 13:23:14 +1100 -Subject: [PATCH 14/16] make library name in - `test_mutable_custom_op_fixed_layout{,2}` unique - -Suggested-By: Daniel Petry ---- - test/inductor/test_torchinductor.py | 14 +++++++++----- - 1 file changed, 9 insertions(+), 5 deletions(-) - -diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py -index 7637068b606..2861214b5fc 100644 ---- a/test/inductor/test_torchinductor.py -+++ b/test/inductor/test_torchinductor.py -@@ -11040,7 +11040,8 @@ class CommonTemplate: - ) - @tf32_on_and_off(0.005) - def test_mutable_custom_op_fixed_layout2(self): -- with torch.library._scoped_library("mylib", "DEF") as lib: -+ unique_lib_name = f"mylib_{id(self)}" # Make unique name using test instance id -+ with torch.library._scoped_library(unique_lib_name, "DEF") as lib: - mod = nn.Conv2d(3, 128, 1, stride=1, bias=False).to(device=GPU_TYPE) - inp = torch.rand(2, 3, 128, 128, device=GPU_TYPE) - expected_stride = mod(inp).clone().stride() -@@ -11077,8 +11078,9 @@ class CommonTemplate: - def fn(x): - # Inductor changes the conv to be channels-last - z = mod(x) -- output = torch.ops.mylib.bar(z, torch._dynamo.is_compiling()) -- torch.ops.mylib.add_one(output) -+ mylib = importlib.import_module(f"torch.ops.{unique_lib_name}") -+ output = mylib.bar(z, torch._dynamo.is_compiling()) -+ mylib.add_one(output) - return output**2 - - with torch.no_grad(): -@@ -11098,7 +11100,8 @@ class CommonTemplate: - "defined in Python." - ) - def test_mutable_custom_op_fixed_layout(self): -- with torch.library._scoped_library("mylib", "DEF") as lib: -+ unique_lib_name = f"mylib_{id(self)}" # Make unique name using test instance id -+ with torch.library._scoped_library(unique_lib_name, "DEF") as lib: - lib.define( - "copy_(Tensor(a!) dst, Tensor src) -> ()", - tags=torch.Tag.needs_fixed_stride_order, -@@ -11114,7 +11117,8 @@ class CommonTemplate: - - def f(x): - full_default_3 = torch.full([3], 7.0, device="cpu") -- chunk_cat_default_1 = torch.ops.mylib.copy_.default(full_default_3, x) -+ mylib = importlib.import_module(f"torch.ops.{unique_lib_name}") -+ chunk_cat_default_1 = mylib.copy_.default(full_default_3, x) - mul_out = torch.mul(full_default_3, full_default_3) - return mul_out - From ab889b70943e3a49f15ed1cf2f0a0b3ec3cb57ff Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Tue, 4 Feb 2025 07:13:36 +1100 Subject: [PATCH 23/58] drop unnecessary CUDAFLAGS in bld.bat this was done for consistency with `build.sh`, but appears unnecessary on windows --- recipe/bld.bat | 2 -- 1 file changed, 2 deletions(-) diff --git a/recipe/bld.bat b/recipe/bld.bat index 4089b425f..394d9058e 100644 --- a/recipe/bld.bat +++ b/recipe/bld.bat @@ -93,8 +93,6 @@ if not "%cuda_compiler_version%" == "None" ( set MAGMA_HOME=%LIBRARY_PREFIX% set "PATH=%CUDA_BIN_PATH%;%PATH%" set CUDNN_INCLUDE_DIR=%LIBRARY_PREFIX%\include - @REM turn off very noisy nvcc warnings - set "CUDAFLAGS=-w --ptxas-options=-w" ) else ( set USE_CUDA=0 @REM MKLDNN is an Apache-2.0 licensed library for DNNs and is used From 194f2d9c53989fe5bd42258b3dd992394b0e039a Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Tue, 4 Feb 2025 11:51:13 +1100 Subject: [PATCH 24/58] skip test_batchnorm_nhwc_cpu on linux as well --- recipe/meta.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index f112fe549..ff58de560 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -455,9 +455,9 @@ outputs: {% set tests = tests ~ " test/inductor/test_torchinductor.py" %} # [py==312 and not (aarch64 or osx)] {% set skips = "(TestTorch and test_print)" %} - # minor tolerance violations on osx + # minor tolerance violations {% set skips = skips ~ " or test_1_sized_with_0_strided_cpu_float32" %} # [osx] - {% set skips = skips ~ " or test_batchnorm_nhwc_cpu" %} # [osx] + {% set skips = skips ~ " or test_batchnorm_nhwc_cpu" %} # [unix] # timeouts and failures on aarch, see https://github.com/conda-forge/pytorch-cpu-feedstock/pull/298#issuecomment-2555888508 {% set skips = skips ~ " or test_pynode_destruction_deadlock" %} # [aarch64] {% set skips = skips ~ " or (TestLinalgCPU and test_cholesky_cpu_float32)" %} # [aarch64] From 88048104e2b04fa3f0124366bd97916e437c81ce Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Tue, 4 Feb 2025 20:49:41 +1100 Subject: [PATCH 25/58] disable smoke test on aarch --- recipe/meta.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index ff58de560..6a3d5c4ef 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -436,7 +436,8 @@ outputs: - export TARGET_OS="macos-arm64" # [(osx and arm64)] - export TARGET_OS="macos-x86_64" # [(osx and x86_64)] - export OMP_NUM_THREADS=4 # [not win] - - python ./smoke_test.py --package torchonly + # aarch segfaults, presumably due to emulation + - python ./smoke_test.py --package torchonly # [not aarch] # a reasonably safe subset of tests that should run under 15 minutes {% set tests = " ".join([ From 93854a615b1087901e676ce61886755604ae36fb Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Wed, 5 Feb 2025 17:49:19 +1100 Subject: [PATCH 26/58] Revert "Unvendor pybind11 and eigen" keep it on windows though, where there's no issue This reverts commit 9fcb3a7850e987c6560fe186343f20b99196fd37. --- recipe/build.sh | 5 +---- recipe/meta.yaml | 12 ++++++------ 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/recipe/build.sh b/recipe/build.sh index 22dde8f0e..af8e7c4cb 100644 --- a/recipe/build.sh +++ b/recipe/build.sh @@ -89,8 +89,6 @@ export USE_SYSTEM_SLEEF=1 # use our protobuf export BUILD_CUSTOM_PROTOBUF=OFF rm -rf $PREFIX/bin/protoc -export USE_SYSTEM_PYBIND11=1 -export USE_SYSTEM_EIGEN_INSTALL=1 # prevent six from being downloaded > third_party/NNPACK/cmake/DownloadSix.cmake @@ -244,8 +242,7 @@ case ${PKG_NAME} in mv build/lib.*/torch/bin/* ${PREFIX}/bin/ mv build/lib.*/torch/lib/* ${PREFIX}/lib/ - # need to merge these now because we're using system pybind11, meaning the destination directory is not empty - rsync -a build/lib.*/torch/share/* ${PREFIX}/share/ + mv build/lib.*/torch/share/* ${PREFIX}/share/ mv build/lib.*/torch/include/{ATen,caffe2,tensorpipe,torch,c10} ${PREFIX}/include/ rm ${PREFIX}/lib/libtorch_python.* diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 6a3d5c4ef..25ff940fa 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -118,7 +118,6 @@ requirements: - protobuf - make # [linux] - sccache # [win] - - rsync # [unix] host: # GPU requirements - cudnn # [cuda_compiler_version != "None"] @@ -167,8 +166,8 @@ requirements: - libuv - pkg-config # [unix] - typing_extensions - - pybind11 - - eigen + - pybind11 # [win] + - eigen # [win] - zlib run: # GPU requirements without run_exports @@ -312,13 +311,13 @@ outputs: - intel-openmp {{ mkl }} # [win] - libabseil - libprotobuf + - eigen # [win] + - pybind11 # [win] - sleef - libuv - pkg-config # [unix] - typing_extensions - {{ pin_subpackage('libtorch', exact=True) }} - - pybind11 - - eigen run: - {{ pin_subpackage('libtorch', exact=True) }} # [megabuild] # for non-megabuild, allow libtorch from any python version; @@ -340,7 +339,7 @@ outputs: - jinja2 - networkx - optree >=0.13.0 - - pybind11 + - pybind11 # [win] - setuptools # sympy 1.13.2 was reported to result in test failures on Windows and mac # https://github.com/pytorch/pytorch/pull/133235 @@ -553,6 +552,7 @@ about: - LICENSE - NOTICE - third_party/CMake/Copyright.txt + - third_party/pybind11/LICENSE # [unix] summary: PyTorch is an optimized tensor library for deep learning using GPUs and CPUs. description: | PyTorch is a Python package that provides two high-level features: From b4ab27bee428db88585a102a8913d14ca657bd8e Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Wed, 5 Feb 2025 17:52:27 +1100 Subject: [PATCH 27/58] use different formulation for ptxas warning filter --- recipe/build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recipe/build.sh b/recipe/build.sh index af8e7c4cb..51e01898d 100644 --- a/recipe/build.sh +++ b/recipe/build.sh @@ -218,7 +218,7 @@ elif [[ ${cuda_compiler_version} != "None" ]]; then export MAGMA_HOME="${PREFIX}" export USE_MAGMA=1 # turn off noisy nvcc warnings - export CMAKE_CUDA_FLAGS="-w --ptxas-options=-w" + export CMAKE_CUDA_FLAGS="-w -Xptxas=\"-w\"" else if [[ "$target_platform" != *-64 ]]; then # Breakpad seems to not work on aarch64 or ppc64le From f72a13e4b403c544fc060b098ab347a07fbd691a Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Wed, 5 Feb 2025 17:59:56 +1100 Subject: [PATCH 28/58] re-add zlib to pytorch's host deps --- recipe/meta.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 25ff940fa..2908639a4 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -271,6 +271,7 @@ outputs: - make # [linux] - sccache # [win] host: + - {{ pin_subpackage('libtorch', exact=True) }} # GPU requirements - cudnn # [cuda_compiler_version != "None"] - nccl # [cuda_compiler_version != "None" and linux] @@ -317,7 +318,7 @@ outputs: - libuv - pkg-config # [unix] - typing_extensions - - {{ pin_subpackage('libtorch', exact=True) }} + - zlib run: - {{ pin_subpackage('libtorch', exact=True) }} # [megabuild] # for non-megabuild, allow libtorch from any python version; From b1658efe859efc672eb9e10d9dd5a3b4d9ebce49 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Thu, 6 Feb 2025 07:34:12 +1100 Subject: [PATCH 29/58] remove smoke test --- recipe/meta.yaml | 27 --- recipe/smoke_test.py | 388 ------------------------------------------- 2 files changed, 415 deletions(-) delete mode 100644 recipe/smoke_test.py diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 2908639a4..2664cb59a 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -387,8 +387,6 @@ outputs: # tools/ is needed to optimise test run # as of pytorch=2.0.0, there is a bug when trying to run tests without the tools - tools - files: - - smoke_test.py commands: # Run pip check so as to ensure that all pytorch packages are installed # https://github.com/conda-forge/pytorch-cpu-feedstock/issues/24 @@ -414,31 +412,6 @@ outputs: - if not exist %LIBRARY_BIN%\torch_python.dll exit 1 # [win] - if not exist %LIBRARY_LIB%\torch_python.lib exit 1 # [win] - # See here for environment variables needed by the smoke test script - # https://github.com/pytorch/pytorch/blob/266fd35c5842902f6304aa8e7713b252cbfb243c/.ci/pytorch/smoke_test/smoke_test.py#L16 - - set MATRIX_GPU_ARCH_VERSION="{{ '.'.join((cuda_compiler_version or "").split('.')[:2]) }}" # [(cuda_compiler_version != "None") and (win)] - - set MATRIX_GPU_ARCH_TYPE="cuda" # [(cuda_compiler_version != "None") and (win)] - - set MATRIX_GPU_ARCH_VERSION="none" # [(cuda_compiler_version == "None") and (win)] - - set MATRIX_GPU_ARCH_TYPE="none" # [(cuda_compiler_version == "None") and (win)] - - set MATRIX_CHANNEL="defaults" # [win] - - set MATRIX_STABLE_VERSION={{ version }} # [win] - - set MATRIX_PACKAGE_TYPE="conda" # [win] - - set TARGET_OS="windows" # [win] - - set OMP_NUM_THREADS=4 # [win] - - export MATRIX_GPU_ARCH_VERSION="{{ '.'.join((cuda_compiler_version or "").split('.')[:2]) }}" # [(cuda_compiler_version != "None") and (linux and x86_64)] - - export MATRIX_GPU_ARCH_TYPE="cuda" # [(cuda_compiler_version != "None") and (linux and x86_64)] - - export MATRIX_GPU_ARCH_VERSION="none" # [(cuda_compiler_version == "None") and (not win)] - - export MATRIX_GPU_ARCH_TYPE="none" # [(cuda_compiler_version == "None") and (not win)] - - export MATRIX_CHANNEL="defaults" # [not win] - - export MATRIX_STABLE_VERSION="{{ version }}" # [not win] - - export MATRIX_PACKAGE_TYPE="conda" # [not win] - - export TARGET_OS="linux" # [linux] - - export TARGET_OS="macos-arm64" # [(osx and arm64)] - - export TARGET_OS="macos-x86_64" # [(osx and x86_64)] - - export OMP_NUM_THREADS=4 # [not win] - # aarch segfaults, presumably due to emulation - - python ./smoke_test.py --package torchonly # [not aarch] - # a reasonably safe subset of tests that should run under 15 minutes {% set tests = " ".join([ "test/test_autograd.py", diff --git a/recipe/smoke_test.py b/recipe/smoke_test.py deleted file mode 100644 index c70185c05..000000000 --- a/recipe/smoke_test.py +++ /dev/null @@ -1,388 +0,0 @@ -import argparse -# from https://github.com/pytorch/pytorch/blob/v2.6.0/.ci/pytorch/smoke_test/smoke_test.py -# update content below for new versions! -# -------------------------------------- -import importlib -import json -import os -import re -import subprocess -import sys -from pathlib import Path - -import torch -import torch._dynamo -import torch.nn as nn -import torch.nn.functional as F - - -if "MATRIX_GPU_ARCH_VERSION" in os.environ: - gpu_arch_ver = os.getenv("MATRIX_GPU_ARCH_VERSION") -else: - gpu_arch_ver = os.getenv("GPU_ARCH_VERSION") # Use fallback if available -gpu_arch_type = os.getenv("MATRIX_GPU_ARCH_TYPE") -channel = os.getenv("MATRIX_CHANNEL") -package_type = os.getenv("MATRIX_PACKAGE_TYPE") -target_os = os.getenv("TARGET_OS", sys.platform) -BASE_DIR = Path(__file__).parent.parent.parent - -is_cuda_system = gpu_arch_type == "cuda" -NIGHTLY_ALLOWED_DELTA = 3 - -MODULES = [ - { - "name": "torchvision", - "repo": "https://github.com/pytorch/vision.git", - "smoke_test": "./vision/test/smoke_test.py", - "extension": "extension", - "repo_name": "vision", - }, - { - "name": "torchaudio", - "repo": "https://github.com/pytorch/audio.git", - "smoke_test": "./audio/test/smoke_test/smoke_test.py --no-ffmpeg", - "extension": "_extension", - "repo_name": "audio", - }, -] - - -class Net(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 32, 3, 1) - self.conv2 = nn.Conv2d(32, 64, 3, 1) - self.fc1 = nn.Linear(9216, 1) - - def forward(self, x): - x = self.conv1(x) - x = self.conv2(x) - x = F.max_pool2d(x, 2) - x = torch.flatten(x, 1) - output = self.fc1(x) - return output - - -def load_json_from_basedir(filename: str): - try: - with open(BASE_DIR / filename) as fptr: - return json.load(fptr) - except FileNotFoundError as exc: - raise ImportError(f"File {filename} not found error: {exc.strerror}") from exc - except json.JSONDecodeError as exc: - raise ImportError(f"Invalid JSON {filename}") from exc - - -def read_release_matrix(): - return load_json_from_basedir("release_matrix.json") - - -def test_numpy(): - import numpy as np - - x = np.arange(5) - torch.tensor(x) - - -def check_version(package: str) -> None: - release_version = os.getenv("RELEASE_VERSION") - # if release_version is specified, use it to validate the packages - if release_version: - release_matrix = read_release_matrix() - stable_version = release_matrix["torch"] - else: - stable_version = os.getenv("MATRIX_STABLE_VERSION") - - # only makes sense to check nightly package where dates are known - if channel == "nightly": - check_nightly_binaries_date(package) - elif stable_version is not None: - if not torch.__version__.startswith(stable_version): - raise RuntimeError( - f"Torch version mismatch, expected {stable_version} for channel {channel}. But its {torch.__version__}" - ) - - if release_version and package == "all": - for module in MODULES: - imported_module = importlib.import_module(module["name"]) - module_version = imported_module.__version__ - if not module_version.startswith(release_matrix[module["name"]]): - raise RuntimeError( - f"{module['name']} version mismatch, expected: \ - {release_matrix[module['name']]} for channel {channel}. But its {module_version}" - ) - else: - print(f"{module['name']} version actual: {module_version} expected: \ - {release_matrix[module['name']]} for channel {channel}.") - - else: - print(f"Skip version check for channel {channel} as stable version is None") - - -def check_nightly_binaries_date(package: str) -> None: - from datetime import datetime - - format_dt = "%Y%m%d" - - date_t_str = re.findall("dev\\d+", torch.__version__) - date_t_delta = datetime.now() - datetime.strptime(date_t_str[0][3:], format_dt) - if date_t_delta.days >= NIGHTLY_ALLOWED_DELTA: - raise RuntimeError( - f"the binaries are from {date_t_str} and are more than {NIGHTLY_ALLOWED_DELTA} days old!" - ) - - if package == "all": - for module in MODULES: - imported_module = importlib.import_module(module["name"]) - module_version = imported_module.__version__ - date_m_str = re.findall("dev\\d+", module_version) - date_m_delta = datetime.now() - datetime.strptime( - date_m_str[0][3:], format_dt - ) - print(f"Nightly date check for {module['name']} version {module_version}") - if date_m_delta.days > NIGHTLY_ALLOWED_DELTA: - raise RuntimeError( - f"Expected {module['name']} to be less then {NIGHTLY_ALLOWED_DELTA} days. But its {date_m_delta}" - ) - - -def test_cuda_runtime_errors_captured() -> None: - cuda_exception_missed = True - try: - print("Testing test_cuda_runtime_errors_captured") - torch._assert_async(torch.tensor(0, device="cuda")) - torch._assert_async(torch.tensor(0 + 0j, device="cuda")) - except RuntimeError as e: - if re.search("CUDA", f"{e}"): - print(f"Caught CUDA exception with success: {e}") - cuda_exception_missed = False - else: - raise e - if cuda_exception_missed: - raise RuntimeError("Expected CUDA RuntimeError but have not received!") - - -def smoke_test_cuda( - package: str, runtime_error_check: str, torch_compile_check: str -) -> None: - if not torch.cuda.is_available() and is_cuda_system: - raise RuntimeError(f"Expected CUDA {gpu_arch_ver}. However CUDA is not loaded.") - - if package == "all" and is_cuda_system: - for module in MODULES: - imported_module = importlib.import_module(module["name"]) - # TBD for vision move extension module to private so it will - # be _extention. - version = "N/A" - if module["extension"] == "extension": - version = imported_module.extension._check_cuda_version() - else: - version = imported_module._extension._check_cuda_version() - print(f"{module['name']} CUDA: {version}") - - # torch.compile is available on macos-arm64 and Linux for python 3.8-3.13 - if ( - torch_compile_check == "enabled" - and sys.version_info < (3, 14, 0) - and target_os in ["linux", "linux-aarch64", "macos-arm64", "darwin"] - ): - smoke_test_compile("cuda" if torch.cuda.is_available() else "cpu") - - if torch.cuda.is_available(): - if torch.version.cuda != gpu_arch_ver: - raise RuntimeError( - f"Wrong CUDA version. Loaded: {torch.version.cuda} Expected: {gpu_arch_ver}" - ) - print(f"torch cuda: {torch.version.cuda}") - # todo add cudnn version validation - print(f"torch cudnn: {torch.backends.cudnn.version()}") - print(f"cuDNN enabled? {torch.backends.cudnn.enabled}") - - torch.cuda.init() - print("CUDA initialized successfully") - print(f"Number of CUDA devices: {torch.cuda.device_count()}") - for i in range(torch.cuda.device_count()): - print(f"Device {i}: {torch.cuda.get_device_name(i)}") - - # nccl is availbale only on Linux - if sys.platform in ["linux", "linux2"]: - print(f"torch nccl version: {torch.cuda.nccl.version()}") - - if runtime_error_check == "enabled": - test_cuda_runtime_errors_captured() - - -def smoke_test_conv2d() -> None: - import torch.nn as nn - - print("Testing smoke_test_conv2d") - # With square kernels and equal stride - m = nn.Conv2d(16, 33, 3, stride=2) - # non-square kernels and unequal stride and with padding - m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2)) - assert m is not None - # non-square kernels and unequal stride and with padding and dilation - basic_conv = nn.Conv2d( - 16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1) - ) - input = torch.randn(20, 16, 50, 100) - output = basic_conv(input) - - if is_cuda_system: - print("Testing smoke_test_conv2d with cuda") - conv = nn.Conv2d(3, 3, 3).cuda() - x = torch.randn(1, 3, 24, 24, device="cuda") - with torch.cuda.amp.autocast(): - out = conv(x) - assert out is not None - - supported_dtypes = [torch.float16, torch.float32, torch.float64] - for dtype in supported_dtypes: - print(f"Testing smoke_test_conv2d with cuda for {dtype}") - conv = basic_conv.to(dtype).cuda() - input = torch.randn(20, 16, 50, 100, device="cuda").type(dtype) - output = conv(input) - assert output is not None - - -def test_linalg(device="cpu") -> None: - print(f"Testing smoke_test_linalg on {device}") - A = torch.randn(5, 3, device=device) - U, S, Vh = torch.linalg.svd(A, full_matrices=False) - assert ( - U.shape == A.shape - and S.shape == torch.Size([3]) - and Vh.shape == torch.Size([3, 3]) - ) - torch.dist(A, U @ torch.diag(S) @ Vh) - - U, S, Vh = torch.linalg.svd(A) - assert ( - U.shape == torch.Size([5, 5]) - and S.shape == torch.Size([3]) - and Vh.shape == torch.Size([3, 3]) - ) - torch.dist(A, U[:, :3] @ torch.diag(S) @ Vh) - - A = torch.randn(7, 5, 3, device=device) - U, S, Vh = torch.linalg.svd(A, full_matrices=False) - torch.dist(A, U @ torch.diag_embed(S) @ Vh) - - if device == "cuda": - supported_dtypes = [torch.float32, torch.float64] - for dtype in supported_dtypes: - print(f"Testing smoke_test_linalg with cuda for {dtype}") - A = torch.randn(20, 16, 50, 100, device=device, dtype=dtype) - torch.linalg.svd(A) - - -def smoke_test_compile(device: str = "cpu") -> None: - supported_dtypes = [torch.float16, torch.float32, torch.float64] - - def foo(x: torch.Tensor) -> torch.Tensor: - return torch.sin(x) + torch.cos(x) - - for dtype in supported_dtypes: - print(f"Testing smoke_test_compile for {device} and {dtype}") - x = torch.rand(3, 3, device=device).type(dtype) - x_eager = foo(x) - x_pt2 = torch.compile(foo)(x) - torch.testing.assert_close(x_eager, x_pt2) - - # Check that SIMD were detected for the architecture - if device == "cpu": - from torch._inductor.codecache import pick_vec_isa - - isa = pick_vec_isa() - if not isa: - raise RuntimeError("Can't detect vectorized ISA for CPU") - print(f"Picked CPU ISA {type(isa).__name__} bit width {isa.bit_width()}") - - # Reset torch dynamo since we are changing mode - torch._dynamo.reset() - dtype = torch.float32 - torch.set_float32_matmul_precision("high") - print(f"Testing smoke_test_compile with mode 'max-autotune' for {dtype}") - x = torch.rand(64, 1, 28, 28, device=device).type(torch.float32) - model = Net().to(device=device) - x_pt2 = torch.compile(model, mode="max-autotune")(x) - - -def smoke_test_modules(): - cwd = os.getcwd() - for module in MODULES: - if module["repo"]: - if not os.path.exists(f"{cwd}/{module['repo_name']}"): - print(f"Path does not exist: {cwd}/{module['repo_name']}") - try: - subprocess.check_output( - f"git clone --depth 1 {module['repo']}", - stderr=subprocess.STDOUT, - shell=True, - ) - except subprocess.CalledProcessError as exc: - raise RuntimeError( - f"Cloning {module['repo']} FAIL: {exc.returncode} Output: {exc.output}" - ) from exc - try: - smoke_test_command = f"python3 {module['smoke_test']}" - if target_os == "windows": - smoke_test_command = f"python {module['smoke_test']}" - output = subprocess.check_output( - smoke_test_command, - stderr=subprocess.STDOUT, - shell=True, - universal_newlines=True, - ) - except subprocess.CalledProcessError as exc: - raise RuntimeError( - f"Module {module['name']} FAIL: {exc.returncode} Output: {exc.output}" - ) from exc - else: - print(f"Output: \n{output}\n") - - -def main() -> None: - parser = argparse.ArgumentParser() - parser.add_argument( - "--package", - help="Package to include in smoke testing", - type=str, - choices=["all", "torchonly"], - default="all", - ) - parser.add_argument( - "--runtime-error-check", - help="No Runtime Error check", - type=str, - choices=["enabled", "disabled"], - default="enabled", - ) - parser.add_argument( - "--torch-compile-check", - help="Check torch compile", - type=str, - choices=["enabled", "disabled"], - default="enabled", - ) - options = parser.parse_args() - print(f"torch: {torch.__version__}") - print(torch.__config__.parallel_info()) - - check_version(options.package) - smoke_test_conv2d() - test_linalg() - test_numpy() - if is_cuda_system: - test_linalg("cuda") - - if options.package == "all": - smoke_test_modules() - - smoke_test_cuda( - options.package, options.runtime_error_check, options.torch_compile_check - ) - - -if __name__ == "__main__": - main() From 8a43a637dc681c9e5b5ed7aa3f999972977b06f5 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Fri, 7 Feb 2025 23:00:11 +1100 Subject: [PATCH 30/58] deactivate pytest-xdist for linux-64+CUDA+MKL --- recipe/meta.yaml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 2664cb59a..ba91b41a3 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -374,7 +374,7 @@ outputs: # Required by run_test.py - pytest-flakefinder - pytest-rerunfailures - - pytest-xdist + - pytest-xdist # [not (linux64 and blas_impl == "mkl" and cuda_compiler_version != "None")] # danpetry/TF: Pytorch includes their own edited version of pytest-shard and adding # it into the test deps as well results in the --shard-id option being added twice. # https://github.com/pytorch/pytorch/blob/main/test/pytest_shard_custom.py @@ -484,7 +484,10 @@ outputs: - export OMP_NUM_THREADS=2 # [unix] # reduced paralellism to avoid OOM; test only one python version on aarch because emulation is super-slow # disable hypothesis because it randomly yields health check errors - - python -m pytest -n 2 {{ tests }} -k "not ({{ skips }})" -m "not hypothesis" --durations=50 # [unix and (not aarch64 or py==312)] + # MKL+CUDA somehow crashes pytest, see https://github.com/conda-forge/pytorch-cpu-feedstock/issues/348 + - export jobs="" # [linux64 and blas_impl == "mkl" and cuda_compiler_version != "None"] + - export jobs="-n 2" # [not (linux64 and blas_impl == "mkl" and cuda_compiler_version != "None") and unix] + - python -m pytest $jobs {{ tests }} -k "not ({{ skips }})" -m "not hypothesis" --durations=50 # [unix and (not aarch64 or py==312)] - python -m pytest -v -s {{ tests }} -k "not ({{ skips }})" -m "not hypothesis" --durations=50 # [win] # regression test for https://github.com/conda-forge/pytorch-cpu-feedstock/issues/329, where we picked up From 1da103764f97f563c5caac1c318d9506dad34445 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Fri, 7 Feb 2025 23:07:48 +1100 Subject: [PATCH 31/58] temporary: skip known-passing builds to conserve resources --- recipe/meta.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index ba91b41a3..1bae506ee 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -70,6 +70,8 @@ build: number: {{ build }} # cuda 11.8 was dropped due to maintenance effort, see discussion in #177 skip: true # [cuda_compiler_version == "11.8"] + # skip known-passing builds while bringing up GPU builds + skip: true # [cuda_compiler_version == "None" or win or (linux64 and blas_impl != "mkl")] # This logic allows two rc variants to be defined in the conda_build_config, but only one to actually be built. # We want to be able to define two variants in the cbc so we can assign different labels to each in the upload channel # (by zipping is_rc with channel_targets). This prevents rc builds being used unless specifically requested. From 4784f25e5d2ee8a5fece2ec63959c35feb144a04 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Sat, 8 Feb 2025 09:56:33 +1100 Subject: [PATCH 32/58] Revert "deactivate pytest-xdist for linux-64+CUDA+MKL" This reverts commit 8a43a637dc681c9e5b5ed7aa3f999972977b06f5. --- recipe/meta.yaml | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 1bae506ee..28eeb1d98 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -376,7 +376,7 @@ outputs: # Required by run_test.py - pytest-flakefinder - pytest-rerunfailures - - pytest-xdist # [not (linux64 and blas_impl == "mkl" and cuda_compiler_version != "None")] + - pytest-xdist # danpetry/TF: Pytorch includes their own edited version of pytest-shard and adding # it into the test deps as well results in the --shard-id option being added twice. # https://github.com/pytorch/pytorch/blob/main/test/pytest_shard_custom.py @@ -486,10 +486,7 @@ outputs: - export OMP_NUM_THREADS=2 # [unix] # reduced paralellism to avoid OOM; test only one python version on aarch because emulation is super-slow # disable hypothesis because it randomly yields health check errors - # MKL+CUDA somehow crashes pytest, see https://github.com/conda-forge/pytorch-cpu-feedstock/issues/348 - - export jobs="" # [linux64 and blas_impl == "mkl" and cuda_compiler_version != "None"] - - export jobs="-n 2" # [not (linux64 and blas_impl == "mkl" and cuda_compiler_version != "None") and unix] - - python -m pytest $jobs {{ tests }} -k "not ({{ skips }})" -m "not hypothesis" --durations=50 # [unix and (not aarch64 or py==312)] + - python -m pytest -n 2 {{ tests }} -k "not ({{ skips }})" -m "not hypothesis" --durations=50 # [unix and (not aarch64 or py==312)] - python -m pytest -v -s {{ tests }} -k "not ({{ skips }})" -m "not hypothesis" --durations=50 # [win] # regression test for https://github.com/conda-forge/pytorch-cpu-feedstock/issues/329, where we picked up From 51109f4ed3d6be0163997fd1e2bfafaca6772ced Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Sat, 8 Feb 2025 09:57:24 +1100 Subject: [PATCH 33/58] fix stray `)` that broke pytest --- recipe/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 28eeb1d98..057b956fc 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -461,7 +461,7 @@ outputs: {% set skips = skips ~ " or (GPUTests and test_scatter_reduce2)" %} # [linux and cuda_compiler_version != "None"] # MKL problems {% set skips = skips ~ " or (TestLinalgCPU and test_inverse_errors_large_cpu)" %} # [linux and blas_impl == "mkl" and cuda_compiler_version != "None"] - {% set skips = skips ~ " or test_reentrant_parent_error_on_cpu_cuda)" %} # [linux and blas_impl == "mkl" and cuda_compiler_version != "None"] + {% set skips = skips ~ " or test_reentrant_parent_error_on_cpu_cuda" %} # [linux and blas_impl == "mkl" and cuda_compiler_version != "None"] # non-MKL problems {% set skips = skips ~ " or test_cross_entropy_loss_2d_out_of_bounds_class_index_cuda" %} # [linux and blas_impl != "mkl" and cuda_compiler_version != "None"] {% set skips = skips ~ " or test_cublas_config_nondeterministic_alert_cuda " %} # [linux and blas_impl != "mkl" and cuda_compiler_version != "None"] From 796a52b9ab85ab31f03f3281e9b88d9db9925399 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Sat, 8 Feb 2025 14:16:57 +1100 Subject: [PATCH 34/58] fix the way we look for `$PREFIX/include` in cross-compilation --- recipe/meta.yaml | 4 ++-- ...-of-python-3-and-error-without-numpy.patch | 2 +- recipe/patches/0002-Help-find-numpy.patch | 2 +- .../patches/0003-Update-sympy-version.patch | 2 +- .../0004-Fix-duplicate-linker-script.patch | 2 +- ...-Allow-overriding-CUDA-related-paths.patch | 2 +- ...AS_USE_CBLAS_DOT-for-OpenBLAS-builds.patch | 2 +- recipe/patches/0007-fix-issue-142484.patch | 2 +- recipe/patches/0008-Fix-FindOpenBLAS.patch | 2 +- ...tils.cpp_extension.include_paths-use.patch | 2 +- ...oint-include-paths-to-PREFIX-include.patch | 23 +++++++++++++------ ...nda-prefix-to-inductor-include-paths.patch | 4 ++-- ...E_DIR-relative-to-TORCH_INSTALL_PREF.patch | 4 ++-- ...ON-lib-from-CMake-install-TARGETS-di.patch | 4 ++-- ...find_package-CUDA-in-caffe2-CMake-m.patch} | 4 ++-- ...OTI_TORCH_EXPORT-on-Windows.-140030.patch} | 4 ++-- 16 files changed, 37 insertions(+), 28 deletions(-) rename recipe/patches/{0015-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch => 0014-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch} (99%) rename recipe/patches/{0016-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch => 0015-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch} (94%) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 057b956fc..5ab473448 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -57,9 +57,9 @@ source: - patches/0011-Add-conda-prefix-to-inductor-include-paths.patch - patches/0012-make-ATEN_INCLUDE_DIR-relative-to-TORCH_INSTALL_PREF.patch - patches/0013-remove-DESTINATION-lib-from-CMake-install-TARGETS-di.patch # [win] - - patches/0015-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch + - patches/0014-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch # backport https://github.com/pytorch/pytorch/pull/140030 - - patches/0016-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch + - patches/0015-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch - patches_submodules/fbgemm/0001-remove-DESTINATION-lib-from-CMake-install-directives.patch # [win] - patches_submodules/tensorpipe/0001-switch-away-from-find_package-CUDA.patch # backport https://github.com/google/XNNPACK/commit/5f23827e66cca435fa400b6e221892ac95af0079 diff --git a/recipe/patches/0001-Force-usage-of-python-3-and-error-without-numpy.patch b/recipe/patches/0001-Force-usage-of-python-3-and-error-without-numpy.patch index 716cafecd..5f175049b 100644 --- a/recipe/patches/0001-Force-usage-of-python-3-and-error-without-numpy.patch +++ b/recipe/patches/0001-Force-usage-of-python-3-and-error-without-numpy.patch @@ -1,7 +1,7 @@ From b1493b8712c1fc4ad02b2640c191f3c7f1fc6c9d Mon Sep 17 00:00:00 2001 From: Mark Harfouche Date: Sun, 1 Sep 2024 17:35:40 -0400 -Subject: [PATCH 01/16] Force usage of python 3 and error without numpy +Subject: [PATCH 01/15] Force usage of python 3 and error without numpy --- cmake/Dependencies.cmake | 6 +++--- diff --git a/recipe/patches/0002-Help-find-numpy.patch b/recipe/patches/0002-Help-find-numpy.patch index 26bb6ac87..653c4b5d4 100644 --- a/recipe/patches/0002-Help-find-numpy.patch +++ b/recipe/patches/0002-Help-find-numpy.patch @@ -1,7 +1,7 @@ From e88ebf63cc47b4471e6be3142cda1c2483b4dc9b Mon Sep 17 00:00:00 2001 From: Mark Harfouche Date: Tue, 1 Oct 2024 00:28:40 -0400 -Subject: [PATCH 02/16] Help find numpy +Subject: [PATCH 02/15] Help find numpy --- tools/setup_helpers/cmake.py | 6 ++++++ diff --git a/recipe/patches/0003-Update-sympy-version.patch b/recipe/patches/0003-Update-sympy-version.patch index 89a20693f..52df04c9f 100644 --- a/recipe/patches/0003-Update-sympy-version.patch +++ b/recipe/patches/0003-Update-sympy-version.patch @@ -1,7 +1,7 @@ From 3fb6b3704a6359521e186bfd4c6644a56aa08d90 Mon Sep 17 00:00:00 2001 From: Jeongseok Lee Date: Thu, 17 Oct 2024 15:04:05 -0700 -Subject: [PATCH 03/16] Update sympy version +Subject: [PATCH 03/15] Update sympy version --- setup.py | 2 +- diff --git a/recipe/patches/0004-Fix-duplicate-linker-script.patch b/recipe/patches/0004-Fix-duplicate-linker-script.patch index 13aeaf092..8458e4a82 100644 --- a/recipe/patches/0004-Fix-duplicate-linker-script.patch +++ b/recipe/patches/0004-Fix-duplicate-linker-script.patch @@ -1,7 +1,7 @@ From be785be20dab23d5cee88e13adf40150ce9ead3c Mon Sep 17 00:00:00 2001 From: Jeongseok Lee Date: Sun, 3 Nov 2024 01:12:36 -0700 -Subject: [PATCH 04/16] Fix duplicate linker script +Subject: [PATCH 04/15] Fix duplicate linker script --- setup.py | 4 +++- diff --git a/recipe/patches/0005-Allow-overriding-CUDA-related-paths.patch b/recipe/patches/0005-Allow-overriding-CUDA-related-paths.patch index a349fb8a7..23d83bab0 100644 --- a/recipe/patches/0005-Allow-overriding-CUDA-related-paths.patch +++ b/recipe/patches/0005-Allow-overriding-CUDA-related-paths.patch @@ -1,7 +1,7 @@ From e0cb086099287bd51fdbe8e6f847ec2d0646f085 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Wed, 27 Nov 2024 13:47:23 +0100 -Subject: [PATCH 05/16] Allow overriding CUDA-related paths +Subject: [PATCH 05/15] Allow overriding CUDA-related paths --- cmake/Modules/FindCUDAToolkit.cmake | 2 +- diff --git a/recipe/patches/0006-Use-BLAS_USE_CBLAS_DOT-for-OpenBLAS-builds.patch b/recipe/patches/0006-Use-BLAS_USE_CBLAS_DOT-for-OpenBLAS-builds.patch index d2672ba5f..2ababacc8 100644 --- a/recipe/patches/0006-Use-BLAS_USE_CBLAS_DOT-for-OpenBLAS-builds.patch +++ b/recipe/patches/0006-Use-BLAS_USE_CBLAS_DOT-for-OpenBLAS-builds.patch @@ -1,7 +1,7 @@ From 7e7547dab6c26e7fd324fde6cb6aad5d57bebcf9 Mon Sep 17 00:00:00 2001 From: Isuru Fernando Date: Wed, 18 Dec 2024 03:59:00 +0000 -Subject: [PATCH 06/16] Use BLAS_USE_CBLAS_DOT for OpenBLAS builds +Subject: [PATCH 06/15] Use BLAS_USE_CBLAS_DOT for OpenBLAS builds There are two calling conventions for *dotu functions diff --git a/recipe/patches/0007-fix-issue-142484.patch b/recipe/patches/0007-fix-issue-142484.patch index 1b651ad27..30674b101 100644 --- a/recipe/patches/0007-fix-issue-142484.patch +++ b/recipe/patches/0007-fix-issue-142484.patch @@ -1,7 +1,7 @@ From 63f0d3218792d874650a7926f2b956ecbe74eac0 Mon Sep 17 00:00:00 2001 From: "Zheng, Zhaoqiong" Date: Fri, 27 Dec 2024 13:49:36 +0800 -Subject: [PATCH 07/16] fix issue 142484 +Subject: [PATCH 07/15] fix issue 142484 From https://github.com/pytorch/pytorch/pull/143894 --- diff --git a/recipe/patches/0008-Fix-FindOpenBLAS.patch b/recipe/patches/0008-Fix-FindOpenBLAS.patch index 5c6844148..6a4307872 100644 --- a/recipe/patches/0008-Fix-FindOpenBLAS.patch +++ b/recipe/patches/0008-Fix-FindOpenBLAS.patch @@ -1,7 +1,7 @@ From 6e00778c46305f6a36670fa99a326c2426203a42 Mon Sep 17 00:00:00 2001 From: Bas Zalmstra Date: Thu, 16 May 2024 10:46:49 +0200 -Subject: [PATCH 08/16] Fix FindOpenBLAS +Subject: [PATCH 08/15] Fix FindOpenBLAS --- cmake/Modules/FindOpenBLAS.cmake | 15 +++++++++------ diff --git a/recipe/patches/0009-simplify-torch.utils.cpp_extension.include_paths-use.patch b/recipe/patches/0009-simplify-torch.utils.cpp_extension.include_paths-use.patch index 025dd3116..8b898e578 100644 --- a/recipe/patches/0009-simplify-torch.utils.cpp_extension.include_paths-use.patch +++ b/recipe/patches/0009-simplify-torch.utils.cpp_extension.include_paths-use.patch @@ -1,7 +1,7 @@ From 12a4473ae7a47da2a30121f329a2c3c8f3f456c5 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Thu, 23 Jan 2025 22:46:58 +1100 -Subject: [PATCH 09/16] simplify torch.utils.cpp_extension.include_paths; use +Subject: [PATCH 09/15] simplify torch.utils.cpp_extension.include_paths; use it in cpp_builder The /TH headers have not existed since pytorch 1.11 diff --git a/recipe/patches/0010-point-include-paths-to-PREFIX-include.patch b/recipe/patches/0010-point-include-paths-to-PREFIX-include.patch index 73ffc4614..cace95f62 100644 --- a/recipe/patches/0010-point-include-paths-to-PREFIX-include.patch +++ b/recipe/patches/0010-point-include-paths-to-PREFIX-include.patch @@ -1,24 +1,33 @@ -From 0295752d2c44d86681d0381ef97d42ca0199ca56 Mon Sep 17 00:00:00 2001 +From 8965e3dbd5a2ec0134a0e603e705896ac59b1882 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Thu, 23 Jan 2025 22:58:14 +1100 -Subject: [PATCH 10/16] point include paths to $PREFIX/include +Subject: [PATCH 10/15] point include paths to $PREFIX/include --- - torch/utils/cpp_extension.py | 9 +++++++++ - 1 file changed, 9 insertions(+) + torch/utils/cpp_extension.py | 18 ++++++++++++++++++ + 1 file changed, 18 insertions(+) diff --git a/torch/utils/cpp_extension.py b/torch/utils/cpp_extension.py -index 23e2499903c..f8acd57c62e 100644 +index 23e2499903c..ea5516ba6f6 100644 --- a/torch/utils/cpp_extension.py +++ b/torch/utils/cpp_extension.py -@@ -1208,10 +1208,19 @@ def include_paths(device_type: str = "cpu") -> List[str]: +@@ -1208,10 +1208,28 @@ def include_paths(device_type: str = "cpu") -> List[str]: A list of include path strings. """ lib_include = os.path.join(_TORCH_PATH, 'include') -+ if os.environ.get("CONDA_BUILD", None) is not None: ++ if (os.environ.get("CONDA_BUILD", None) is not None ++ and os.environ.get("CONDA_BUILD_CROSS_COMPILATION", None) is not None): ++ # to avoid problems in cross-compilation, we need to point to the same environment ++ # where the currently running pytorch is -- i.e. the BUILD_PREFIX. See ++ # https://github.com/conda-forge/pytorch-cpu-feedstock/issues/349 ++ pieces = [os.environ["BUILD_PREFIX"]] + IS_WINDOWS * ["Library"] + ["include"] ++ lib_include = os.path.join(*pieces) ++ elif os.environ.get("CONDA_BUILD", None) is not None: ++ # regular build (& testing) phase --> PREFIX is set + pieces = [os.environ["PREFIX"]] + IS_WINDOWS * ["Library"] + ["include"] + lib_include = os.path.join(*pieces) + elif os.environ.get("CONDA_PREFIX", None) is not None: ++ # final environment + pieces = [os.environ["CONDA_PREFIX"]] + IS_WINDOWS * ["Library"] + ["include"] + lib_include = os.path.join(*pieces) paths = [ diff --git a/recipe/patches/0011-Add-conda-prefix-to-inductor-include-paths.patch b/recipe/patches/0011-Add-conda-prefix-to-inductor-include-paths.patch index 92de63a75..d16e40e48 100644 --- a/recipe/patches/0011-Add-conda-prefix-to-inductor-include-paths.patch +++ b/recipe/patches/0011-Add-conda-prefix-to-inductor-include-paths.patch @@ -1,7 +1,7 @@ -From a9ca43e842b6d550a364d62a286bcb161e1e8f04 Mon Sep 17 00:00:00 2001 +From 7bcc04c68f85133e748165712ba413bc1aad3c54 Mon Sep 17 00:00:00 2001 From: Daniel Petry Date: Tue, 21 Jan 2025 17:45:23 -0600 -Subject: [PATCH 11/16] Add conda prefix to inductor include paths +Subject: [PATCH 11/15] Add conda prefix to inductor include paths Currently inductor doesn't look in conda's includes and libs. This results in errors when it tries to compile, if system versions are being used of diff --git a/recipe/patches/0012-make-ATEN_INCLUDE_DIR-relative-to-TORCH_INSTALL_PREF.patch b/recipe/patches/0012-make-ATEN_INCLUDE_DIR-relative-to-TORCH_INSTALL_PREF.patch index f6fc082ce..7694c2238 100644 --- a/recipe/patches/0012-make-ATEN_INCLUDE_DIR-relative-to-TORCH_INSTALL_PREF.patch +++ b/recipe/patches/0012-make-ATEN_INCLUDE_DIR-relative-to-TORCH_INSTALL_PREF.patch @@ -1,7 +1,7 @@ -From 10ebfd7e5b04d022eab602889b4f06659b12b75a Mon Sep 17 00:00:00 2001 +From 91515b50ac575123275716f735c9efc45ca801cb Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Tue, 28 Jan 2025 14:15:34 +1100 -Subject: [PATCH 12/16] make ATEN_INCLUDE_DIR relative to TORCH_INSTALL_PREFIX +Subject: [PATCH 12/15] make ATEN_INCLUDE_DIR relative to TORCH_INSTALL_PREFIX we cannot set CMAKE_INSTALL_PREFIX without the pytorch build complaining, but we can use TORCH_INSTALL_PREFIX, which is set correctly relative to our CMake files already: diff --git a/recipe/patches/0013-remove-DESTINATION-lib-from-CMake-install-TARGETS-di.patch b/recipe/patches/0013-remove-DESTINATION-lib-from-CMake-install-TARGETS-di.patch index 8da32aad6..d34796fe2 100644 --- a/recipe/patches/0013-remove-DESTINATION-lib-from-CMake-install-TARGETS-di.patch +++ b/recipe/patches/0013-remove-DESTINATION-lib-from-CMake-install-TARGETS-di.patch @@ -1,7 +1,7 @@ -From 24eece84ed217e14628983f0941c5ec274717b9f Mon Sep 17 00:00:00 2001 +From cdfb398e25f4082878db4413a298abd90e5ce6fe Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Tue, 28 Jan 2025 10:58:29 +1100 -Subject: [PATCH 13/16] remove `DESTINATION lib` from CMake `install(TARGETS` +Subject: [PATCH 13/15] remove `DESTINATION lib` from CMake `install(TARGETS` directives Suggested-By: Silvio Traversaro diff --git a/recipe/patches/0015-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch b/recipe/patches/0014-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch similarity index 99% rename from recipe/patches/0015-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch rename to recipe/patches/0014-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch index 8e7feb418..842150cdc 100644 --- a/recipe/patches/0015-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch +++ b/recipe/patches/0014-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch @@ -1,7 +1,7 @@ -From b5084af57c14cdee26936fdfa6425598e8659eb6 Mon Sep 17 00:00:00 2001 +From 51b4b5b5df27cafd2ba286550764b2b893e7f8b2 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Thu, 30 Jan 2025 08:33:44 +1100 -Subject: [PATCH 15/16] avoid deprecated `find_package(CUDA)` in caffe2 CMake +Subject: [PATCH 14/15] avoid deprecated `find_package(CUDA)` in caffe2 CMake metadata vendor the not-available-anymore function torch_cuda_get_nvcc_gencode_flag from CMake diff --git a/recipe/patches/0016-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch b/recipe/patches/0015-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch similarity index 94% rename from recipe/patches/0016-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch rename to recipe/patches/0015-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch index 7c85e1add..60c8dafe6 100644 --- a/recipe/patches/0016-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch +++ b/recipe/patches/0015-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch @@ -1,7 +1,7 @@ -From 2869f014b34cc01662a9c3542d11d53dafd0bb0a Mon Sep 17 00:00:00 2001 +From ba332a8f5f5d22892c86b40153e93ed801cceb4f Mon Sep 17 00:00:00 2001 From: Xu Han Date: Wed, 15 Jan 2025 23:43:41 +0000 -Subject: [PATCH 16/16] export AOTI_TORCH_EXPORT on Windows. (#140030) +Subject: [PATCH 15/15] export AOTI_TORCH_EXPORT on Windows. (#140030) Fixes #139954 From d72a0fd9ccaa57e8379a9aa906d10971df5d8cd0 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Sat, 8 Feb 2025 14:25:02 +1100 Subject: [PATCH 35/58] Revert "temporary: skip known-passing builds to conserve resources" This reverts commit 1da103764f97f563c5caac1c318d9506dad34445. --- recipe/meta.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 5ab473448..808437048 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -70,8 +70,6 @@ build: number: {{ build }} # cuda 11.8 was dropped due to maintenance effort, see discussion in #177 skip: true # [cuda_compiler_version == "11.8"] - # skip known-passing builds while bringing up GPU builds - skip: true # [cuda_compiler_version == "None" or win or (linux64 and blas_impl != "mkl")] # This logic allows two rc variants to be defined in the conda_build_config, but only one to actually be built. # We want to be able to define two variants in the cbc so we can assign different labels to each in the upload channel # (by zipping is_rc with channel_targets). This prevents rc builds being used unless specifically requested. From 96d395436337636f47db92fc1a29ee84fd669003 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Sat, 8 Feb 2025 14:25:51 +1100 Subject: [PATCH 36/58] Reapply "Unvendor pybind11 and eigen" This reverts commit 93854a615b1087901e676ce61886755604ae36fb. --- recipe/build.sh | 5 ++++- recipe/meta.yaml | 12 ++++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/recipe/build.sh b/recipe/build.sh index 51e01898d..d61473635 100644 --- a/recipe/build.sh +++ b/recipe/build.sh @@ -89,6 +89,8 @@ export USE_SYSTEM_SLEEF=1 # use our protobuf export BUILD_CUSTOM_PROTOBUF=OFF rm -rf $PREFIX/bin/protoc +export USE_SYSTEM_PYBIND11=1 +export USE_SYSTEM_EIGEN_INSTALL=1 # prevent six from being downloaded > third_party/NNPACK/cmake/DownloadSix.cmake @@ -242,7 +244,8 @@ case ${PKG_NAME} in mv build/lib.*/torch/bin/* ${PREFIX}/bin/ mv build/lib.*/torch/lib/* ${PREFIX}/lib/ - mv build/lib.*/torch/share/* ${PREFIX}/share/ + # need to merge these now because we're using system pybind11, meaning the destination directory is not empty + rsync -a build/lib.*/torch/share/* ${PREFIX}/share/ mv build/lib.*/torch/include/{ATen,caffe2,tensorpipe,torch,c10} ${PREFIX}/include/ rm ${PREFIX}/lib/libtorch_python.* diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 808437048..7c36cf2c2 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -118,6 +118,7 @@ requirements: - protobuf - make # [linux] - sccache # [win] + - rsync # [unix] host: # GPU requirements - cudnn # [cuda_compiler_version != "None"] @@ -166,8 +167,8 @@ requirements: - libuv - pkg-config # [unix] - typing_extensions - - pybind11 # [win] - - eigen # [win] + - pybind11 + - eigen - zlib run: # GPU requirements without run_exports @@ -312,8 +313,8 @@ outputs: - intel-openmp {{ mkl }} # [win] - libabseil - libprotobuf - - eigen # [win] - - pybind11 # [win] + - pybind11 + - eigen - sleef - libuv - pkg-config # [unix] @@ -340,7 +341,7 @@ outputs: - jinja2 - networkx - optree >=0.13.0 - - pybind11 # [win] + - pybind11 - setuptools # sympy 1.13.2 was reported to result in test failures on Windows and mac # https://github.com/pytorch/pytorch/pull/133235 @@ -526,7 +527,6 @@ about: - LICENSE - NOTICE - third_party/CMake/Copyright.txt - - third_party/pybind11/LICENSE # [unix] summary: PyTorch is an optimized tensor library for deep learning using GPUs and CPUs. description: | PyTorch is a Python package that provides two high-level features: From 051b686919d1b9599ec7291938af78719d4b8613 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Sat, 8 Feb 2025 14:33:43 +1100 Subject: [PATCH 37/58] different spelling for handing `-w` to `-Xptxas` --- recipe/build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recipe/build.sh b/recipe/build.sh index d61473635..d94dfd852 100644 --- a/recipe/build.sh +++ b/recipe/build.sh @@ -220,7 +220,7 @@ elif [[ ${cuda_compiler_version} != "None" ]]; then export MAGMA_HOME="${PREFIX}" export USE_MAGMA=1 # turn off noisy nvcc warnings - export CMAKE_CUDA_FLAGS="-w -Xptxas=\"-w\"" + export CMAKE_CUDA_FLAGS="-w -Xptxas -w" else if [[ "$target_platform" != *-64 ]]; then # Breakpad seems to not work on aarch64 or ppc64le From 71e319d7ac35ebaea824e30b7a506a91eb2db316 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Sat, 8 Feb 2025 18:50:47 +1100 Subject: [PATCH 38/58] skip test_mutable_custom_op_fixed_layout everywhere --- recipe/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 7c36cf2c2..42ec6bbe8 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -446,7 +446,7 @@ outputs: {% set skips = skips ~ " or (TestAutograd and test_profiler_propagation)" %} # tests that fail due to resource clean-up issues (non-unique temporary libraries), see # https://github.com/conda-forge/pytorch-cpu-feedstock/pull/318#issuecomment-2620080859 - {% set skips = skips ~ " or test_mutable_custom_op_fixed_layout" %} # [cuda_compiler_version != "None"] + {% set skips = skips ~ " or test_mutable_custom_op_fixed_layout" %} # trivial accuracy problems {% set skips = skips ~ " or test_BCELoss_weights_no_reduce_cuda" %} # [unix and cuda_compiler_version != "None"] {% set skips = skips ~ " or test_ctc_loss_cudnn_tensor_cuda " %} # [unix and cuda_compiler_version != "None"] From de7707c9643e344c06bce1251ac86100d979950e Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Sat, 8 Feb 2025 19:12:36 +1100 Subject: [PATCH 39/58] robustify CONDA_BUILD_CROSS_COMPILATION handling --- .../0010-point-include-paths-to-PREFIX-include.patch | 6 +++--- .../0011-Add-conda-prefix-to-inductor-include-paths.patch | 2 +- ...ke-ATEN_INCLUDE_DIR-relative-to-TORCH_INSTALL_PREF.patch | 2 +- ...move-DESTINATION-lib-from-CMake-install-TARGETS-di.patch | 2 +- ...oid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch | 2 +- .../0015-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/recipe/patches/0010-point-include-paths-to-PREFIX-include.patch b/recipe/patches/0010-point-include-paths-to-PREFIX-include.patch index cace95f62..4a03ba8d4 100644 --- a/recipe/patches/0010-point-include-paths-to-PREFIX-include.patch +++ b/recipe/patches/0010-point-include-paths-to-PREFIX-include.patch @@ -1,4 +1,4 @@ -From 8965e3dbd5a2ec0134a0e603e705896ac59b1882 Mon Sep 17 00:00:00 2001 +From 2e9805edf7c26bf7890a8704460047592fff3a79 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Thu, 23 Jan 2025 22:58:14 +1100 Subject: [PATCH 10/15] point include paths to $PREFIX/include @@ -8,7 +8,7 @@ Subject: [PATCH 10/15] point include paths to $PREFIX/include 1 file changed, 18 insertions(+) diff --git a/torch/utils/cpp_extension.py b/torch/utils/cpp_extension.py -index 23e2499903c..ea5516ba6f6 100644 +index 23e2499903c..a8caba3c058 100644 --- a/torch/utils/cpp_extension.py +++ b/torch/utils/cpp_extension.py @@ -1208,10 +1208,28 @@ def include_paths(device_type: str = "cpu") -> List[str]: @@ -16,7 +16,7 @@ index 23e2499903c..ea5516ba6f6 100644 """ lib_include = os.path.join(_TORCH_PATH, 'include') + if (os.environ.get("CONDA_BUILD", None) is not None -+ and os.environ.get("CONDA_BUILD_CROSS_COMPILATION", None) is not None): ++ and os.environ.get("CONDA_BUILD_CROSS_COMPILATION", None) not in (None, "", "0")): + # to avoid problems in cross-compilation, we need to point to the same environment + # where the currently running pytorch is -- i.e. the BUILD_PREFIX. See + # https://github.com/conda-forge/pytorch-cpu-feedstock/issues/349 diff --git a/recipe/patches/0011-Add-conda-prefix-to-inductor-include-paths.patch b/recipe/patches/0011-Add-conda-prefix-to-inductor-include-paths.patch index d16e40e48..00619acaf 100644 --- a/recipe/patches/0011-Add-conda-prefix-to-inductor-include-paths.patch +++ b/recipe/patches/0011-Add-conda-prefix-to-inductor-include-paths.patch @@ -1,4 +1,4 @@ -From 7bcc04c68f85133e748165712ba413bc1aad3c54 Mon Sep 17 00:00:00 2001 +From e8eef4b33903af5886cbde7b4342ebc2705933ef Mon Sep 17 00:00:00 2001 From: Daniel Petry Date: Tue, 21 Jan 2025 17:45:23 -0600 Subject: [PATCH 11/15] Add conda prefix to inductor include paths diff --git a/recipe/patches/0012-make-ATEN_INCLUDE_DIR-relative-to-TORCH_INSTALL_PREF.patch b/recipe/patches/0012-make-ATEN_INCLUDE_DIR-relative-to-TORCH_INSTALL_PREF.patch index 7694c2238..125face33 100644 --- a/recipe/patches/0012-make-ATEN_INCLUDE_DIR-relative-to-TORCH_INSTALL_PREF.patch +++ b/recipe/patches/0012-make-ATEN_INCLUDE_DIR-relative-to-TORCH_INSTALL_PREF.patch @@ -1,4 +1,4 @@ -From 91515b50ac575123275716f735c9efc45ca801cb Mon Sep 17 00:00:00 2001 +From 7c955a22b748da66317f69f49f2e99d826083a0d Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Tue, 28 Jan 2025 14:15:34 +1100 Subject: [PATCH 12/15] make ATEN_INCLUDE_DIR relative to TORCH_INSTALL_PREFIX diff --git a/recipe/patches/0013-remove-DESTINATION-lib-from-CMake-install-TARGETS-di.patch b/recipe/patches/0013-remove-DESTINATION-lib-from-CMake-install-TARGETS-di.patch index d34796fe2..36b52de77 100644 --- a/recipe/patches/0013-remove-DESTINATION-lib-from-CMake-install-TARGETS-di.patch +++ b/recipe/patches/0013-remove-DESTINATION-lib-from-CMake-install-TARGETS-di.patch @@ -1,4 +1,4 @@ -From cdfb398e25f4082878db4413a298abd90e5ce6fe Mon Sep 17 00:00:00 2001 +From c4cc82934faf2e32ab283a2ec2d9266049db9872 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Tue, 28 Jan 2025 10:58:29 +1100 Subject: [PATCH 13/15] remove `DESTINATION lib` from CMake `install(TARGETS` diff --git a/recipe/patches/0014-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch b/recipe/patches/0014-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch index 842150cdc..9879f1267 100644 --- a/recipe/patches/0014-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch +++ b/recipe/patches/0014-avoid-deprecated-find_package-CUDA-in-caffe2-CMake-m.patch @@ -1,4 +1,4 @@ -From 51b4b5b5df27cafd2ba286550764b2b893e7f8b2 Mon Sep 17 00:00:00 2001 +From e644304ce9c67c3f4185141dc603f8196e32a7cd Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Thu, 30 Jan 2025 08:33:44 +1100 Subject: [PATCH 14/15] avoid deprecated `find_package(CUDA)` in caffe2 CMake diff --git a/recipe/patches/0015-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch b/recipe/patches/0015-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch index 60c8dafe6..d5f1cbfa3 100644 --- a/recipe/patches/0015-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch +++ b/recipe/patches/0015-export-AOTI_TORCH_EXPORT-on-Windows.-140030.patch @@ -1,4 +1,4 @@ -From ba332a8f5f5d22892c86b40153e93ed801cceb4f Mon Sep 17 00:00:00 2001 +From afc5756195b26f0fcbe0ee96a267149db0bbe71c Mon Sep 17 00:00:00 2001 From: Xu Han Date: Wed, 15 Jan 2025 23:43:41 +0000 Subject: [PATCH 15/15] export AOTI_TORCH_EXPORT on Windows. (#140030) From 381fcb865078993f9de474730a1da4e9c513e770 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Sun, 9 Feb 2025 18:53:27 +1100 Subject: [PATCH 40/58] set CUDA_VERSION during build; test `torch.version.cuda` is correct --- recipe/bld.bat | 1 + recipe/build.sh | 1 + recipe/meta.yaml | 1 + 3 files changed, 3 insertions(+) diff --git a/recipe/bld.bat b/recipe/bld.bat index 394d9058e..ac05799c9 100644 --- a/recipe/bld.bat +++ b/recipe/bld.bat @@ -93,6 +93,7 @@ if not "%cuda_compiler_version%" == "None" ( set MAGMA_HOME=%LIBRARY_PREFIX% set "PATH=%CUDA_BIN_PATH%;%PATH%" set CUDNN_INCLUDE_DIR=%LIBRARY_PREFIX%\include + set "CUDA_VERSION=%cuda_compiler_version%" ) else ( set USE_CUDA=0 @REM MKLDNN is an Apache-2.0 licensed library for DNNs and is used diff --git a/recipe/build.sh b/recipe/build.sh index d94dfd852..4438676d5 100644 --- a/recipe/build.sh +++ b/recipe/build.sh @@ -219,6 +219,7 @@ elif [[ ${cuda_compiler_version} != "None" ]]; then export USE_STATIC_CUDNN=0 export MAGMA_HOME="${PREFIX}" export USE_MAGMA=1 + export CUDA_VERSION=$cuda_compiler_version # turn off noisy nvcc warnings export CMAKE_CUDA_FLAGS="-w -Xptxas -w" else diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 42ec6bbe8..f8008f56f 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -403,6 +403,7 @@ outputs: - python -c "import torch; assert torch.backends.cuda.is_built()" # [linux64 and (cuda_compiler_version != "None")] - python -c "import torch; assert torch.backends.cudnn.is_available()" # [linux64 and (cuda_compiler_version != "None")] - python -c "import torch; assert torch.backends.cudnn.enabled" # [linux64 and (cuda_compiler_version != "None")] + - python -c "import torch; assert torch.version.cuda is not None" # [cuda_compiler_version != "None"] # At conda-forge, we target versions of OSX that are too old for MPS support # But if users install a newer version of OSX, they will have MPS support # https://github.com/conda-forge/pytorch-cpu-feedstock/pull/123#issuecomment-1186355073 From 04e143798b7a0ba528997beda512753aa9fb0b32 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Sun, 9 Feb 2025 18:55:27 +1100 Subject: [PATCH 41/58] enable `torch.backends` tests also for win+CUDA --- recipe/meta.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index f8008f56f..a0872e18b 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -399,10 +399,10 @@ outputs: - python -c "import torch; import numpy" - python -c "import numpy; import torch" # distributed support is enabled by default on linux; for mac, we enable it manually in build.sh - - python -c "import torch; assert torch.distributed.is_available()" # [linux or osx] - - python -c "import torch; assert torch.backends.cuda.is_built()" # [linux64 and (cuda_compiler_version != "None")] - - python -c "import torch; assert torch.backends.cudnn.is_available()" # [linux64 and (cuda_compiler_version != "None")] - - python -c "import torch; assert torch.backends.cudnn.enabled" # [linux64 and (cuda_compiler_version != "None")] + - python -c "import torch; assert torch.distributed.is_available()" # [linux or osx] + - python -c "import torch; assert torch.backends.cuda.is_built()" # [cuda_compiler_version != "None"] + - python -c "import torch; assert torch.backends.cudnn.is_available()" # [cuda_compiler_version != "None"] + - python -c "import torch; assert torch.backends.cudnn.enabled" # [cuda_compiler_version != "None"] - python -c "import torch; assert torch.version.cuda is not None" # [cuda_compiler_version != "None"] # At conda-forge, we target versions of OSX that are too old for MPS support # But if users install a newer version of OSX, they will have MPS support From 272621ba8615899dcfcf29dc72db5a705e46199c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Mon, 10 Feb 2025 16:02:28 +0100 Subject: [PATCH 42/58] Build with `-Wl,-z,noexecstack` to fix glibc 2.41 compatibility Explicitly pass `-Wl,-z,noexecstack` to the linker, to ensure that `libpytorch_cpu.so` is compiled without an executable stack. This is necessary because the raw assembly in oneDNN triggers: ``` $BUILD_PREFIX/bin/../lib/gcc/x86_64-conda-linux-gnu/13.3.0/../../../../x86_64-conda-linux-gnu/bin/ld: warning: ittptmark64.S.o: missing .note.GNU-stack section implies executable stack ``` ...and glibc 2.41 no longer permits loading libraries with executable stack. Fixes #350 --- recipe/build.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/recipe/build.sh b/recipe/build.sh index 4438676d5..45bde74c5 100644 --- a/recipe/build.sh +++ b/recipe/build.sh @@ -54,6 +54,10 @@ export _GLIBCXX_USE_CXX11_ABI=1 if [[ "$target_platform" == "osx-64" ]]; then export CXXFLAGS="$CXXFLAGS -DTARGET_OS_OSX=1" export CFLAGS="$CFLAGS -DTARGET_OS_OSX=1" +elif [[ "$target_platform" == linux-* ]]; then + # Explicitly force non-executable stack to fix compatibility with glibc 2.41, due to: + # ittptmark64.S.o: missing .note.GNU-stack section implies executable stack + LDFLAGS="${LDFLAGS} -Wl,-z,noexecstack" fi # Dynamic libraries need to be lazily loaded so that torch From b254812a7d1b8596bbc6cc15616c40902d31fa27 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Tue, 11 Feb 2025 08:30:34 +1100 Subject: [PATCH 43/58] add v2.5.x to abi_migration_branches --- conda-forge.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/conda-forge.yml b/conda-forge.yml index 47ae951d6..d4eae11d6 100644 --- a/conda-forge.yml +++ b/conda-forge.yml @@ -5,6 +5,9 @@ azure: settings_win: variables: CONDA_BLD_PATH: C:\\bld\\ +bot: + abi_migration_branches: + - v2.5.x build_platform: linux_aarch64: linux_64 osx_arm64: osx_64 From 2eb8ef4baeeae20d0b8dbaf9424859289a823e46 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Tue, 11 Feb 2025 14:04:05 +1100 Subject: [PATCH 44/58] actually move `_C.lib` back to `%SP_DIR%\torch\lib` --- recipe/bld.bat | 6 +++--- recipe/meta.yaml | 8 +++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/recipe/bld.bat b/recipe/bld.bat index ac05799c9..9cddfeff5 100644 --- a/recipe/bld.bat +++ b/recipe/bld.bat @@ -204,7 +204,7 @@ if "%PKG_NAME%" == "libtorch" ( @REM Remove the python binary file, that is placed in the site-packages @REM directory by the specific python specific pytorch package. - del %LIBRARY_BIN%\torch_python.* %LIBRARY_LIB%\torch_python.* %LIBRARY_LIB%\_C.lib + del %LIBRARY_BIN%\torch_python.* %LIBRARY_LIB%\torch_python.* if %ERRORLEVEL% neq 0 exit 1 popd @@ -227,8 +227,8 @@ if "%PKG_NAME%" == "libtorch" ( @REM Copy libtorch_python.lib back -- that's much easier than the for loop @REM needed to remove everything else. - robocopy /NP /NFL /NDL /NJH /E %LIBRARY_LIB%\ torch\lib\ torch_python.lib - robocopy /NP /NFL /NDL /NJH /E %LIBRARY_LIB%\ torch\lib\ _C.lib + mkdir %SP_DIR%\torch\lib + robocopy /NP /NFL /NDL /NJH /E /MOV %LIBRARY_LIB%\ %SP_DIR%\torch\lib\ torch_python.lib _C.lib ) @REM Show the sccache stats. diff --git a/recipe/meta.yaml b/recipe/meta.yaml index a0872e18b..0516a2c22 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -382,6 +382,7 @@ outputs: # - pytest-shard imports: - torch + - torch._C source_files: # Only include the source_files if we are actually going to run the tests. - test @@ -410,9 +411,10 @@ outputs: # - python -c "import torch; assert torch.backends.mps.is_available()" # [osx] # python-version-specific library (default location in SP_DIR symlinks back to this) - - test -f $PREFIX/lib/libtorch_python${SHLIB_EXT} # [unix] - - if not exist %LIBRARY_BIN%\torch_python.dll exit 1 # [win] - - if not exist %LIBRARY_LIB%\torch_python.lib exit 1 # [win] + - test -f $PREFIX/lib/libtorch_python${SHLIB_EXT} # [unix] + - if not exist %LIBRARY_BIN%\torch_python.dll exit 1 # [win] + - if not exist %SP_DIR%\torch\lib\torch_python.lib exit 1 # [win] + - if not exist %SP_DIR%\torch\lib\_C.lib exit 1 # [win] # a reasonably safe subset of tests that should run under 15 minutes {% set tests = " ".join([ From 22ee0fe87928e82683bb299759e98591e2e24c93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Wed, 12 Feb 2025 13:30:08 +1100 Subject: [PATCH 45/58] avoid very verbose logs from setuptools Co-Authored-By: H. Vetinari --- recipe/bld.bat | 2 +- recipe/build.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/recipe/bld.bat b/recipe/bld.bat index 9cddfeff5..7f3ce96e4 100644 --- a/recipe/bld.bat +++ b/recipe/bld.bat @@ -161,7 +161,7 @@ if EXIST build ( if %ERRORLEVEL% neq 0 exit 1 ) -%PYTHON% -m pip %PIP_ACTION% . --no-build-isolation --no-deps %PIP_VERBOSITY% --no-clean +%PYTHON% -m pip %PIP_ACTION% . --no-build-isolation --no-deps %PIP_VERBOSITY% --no-clean --config-settings=--global-option=-q if %ERRORLEVEL% neq 0 exit 1 @REM Here we split the build into two parts. diff --git a/recipe/build.sh b/recipe/build.sh index 45bde74c5..85d408b2f 100644 --- a/recipe/build.sh +++ b/recipe/build.sh @@ -245,7 +245,7 @@ case ${PKG_NAME} in libtorch) # Call setup.py directly to avoid spending time on unnecessarily # packing and unpacking the wheel. - $PREFIX/bin/python setup.py build + $PREFIX/bin/python setup.py -q build mv build/lib.*/torch/bin/* ${PREFIX}/bin/ mv build/lib.*/torch/lib/* ${PREFIX}/lib/ @@ -258,7 +258,7 @@ case ${PKG_NAME} in cp build/CMakeCache.txt build/CMakeCache.txt.orig ;; pytorch) - $PREFIX/bin/python -m pip install . --no-deps --no-build-isolation -v --no-clean \ + $PREFIX/bin/python -m pip install . --no-deps --no-build-isolation -v --no-clean --config-settings=--global-option=-q \ | sed "s,${CXX},\$\{CXX\},g" \ | sed "s,${PREFIX},\$\{PREFIX\},g" # Keep this in ${PREFIX}/lib so that the library can be found by From 47f7f215c285e2a140a37b0330279af31b0d6891 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Wed, 12 Feb 2025 13:35:57 +1100 Subject: [PATCH 46/58] homogenize pytest invocation; also use -v on unix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Suggested-By: Michał Górny --- recipe/meta.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 0516a2c22..2773f7b66 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -488,8 +488,7 @@ outputs: - export OMP_NUM_THREADS=2 # [unix] # reduced paralellism to avoid OOM; test only one python version on aarch because emulation is super-slow # disable hypothesis because it randomly yields health check errors - - python -m pytest -n 2 {{ tests }} -k "not ({{ skips }})" -m "not hypothesis" --durations=50 # [unix and (not aarch64 or py==312)] - - python -m pytest -v -s {{ tests }} -k "not ({{ skips }})" -m "not hypothesis" --durations=50 # [win] + - python -m pytest -n 2 -v {{ tests }} -k "not ({{ skips }})" -m "not hypothesis" --durations=50 # [not aarch64 or py==312] # regression test for https://github.com/conda-forge/pytorch-cpu-feedstock/issues/329, where we picked up # duplicate `.pyc` files due to newest py-ver (3.13) in the build environment not matching the one in host; From 35d03ed76ca3b6e4ebb770c9d0c36a4b57cdbff3 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Wed, 12 Feb 2025 14:29:10 +1100 Subject: [PATCH 47/58] filter out ptxas advisories --- recipe/build.sh | 5 +++-- recipe/meta.yaml | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/recipe/build.sh b/recipe/build.sh index 85d408b2f..feabd568b 100644 --- a/recipe/build.sh +++ b/recipe/build.sh @@ -224,7 +224,8 @@ elif [[ ${cuda_compiler_version} != "None" ]]; then export MAGMA_HOME="${PREFIX}" export USE_MAGMA=1 export CUDA_VERSION=$cuda_compiler_version - # turn off noisy nvcc warnings + # ptxas advisories do not get ignored correctly, see + # https://github.com/conda-forge/cuda-nvcc-feedstock/issues/60 export CMAKE_CUDA_FLAGS="-w -Xptxas -w" else if [[ "$target_platform" != *-64 ]]; then @@ -245,7 +246,7 @@ case ${PKG_NAME} in libtorch) # Call setup.py directly to avoid spending time on unnecessarily # packing and unpacking the wheel. - $PREFIX/bin/python setup.py -q build + $PREFIX/bin/python setup.py -q build | stdbuf -oL grep -vE "Advisory: Modifier '\.sp::ordered_metadata'" mv build/lib.*/torch/bin/* ${PREFIX}/bin/ mv build/lib.*/torch/lib/* ${PREFIX}/lib/ diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 2773f7b66..24d72e618 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -118,6 +118,7 @@ requirements: - protobuf - make # [linux] - sccache # [win] + - grep # [unix] - rsync # [unix] host: # GPU requirements From 6d2a9a6f6f39b8cdcfa234c7b11470c4aded55a0 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Wed, 12 Feb 2025 14:47:37 +1100 Subject: [PATCH 48/58] suppress warning block in pytest results --- recipe/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 24d72e618..dd4417e7b 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -489,7 +489,7 @@ outputs: - export OMP_NUM_THREADS=2 # [unix] # reduced paralellism to avoid OOM; test only one python version on aarch because emulation is super-slow # disable hypothesis because it randomly yields health check errors - - python -m pytest -n 2 -v {{ tests }} -k "not ({{ skips }})" -m "not hypothesis" --durations=50 # [not aarch64 or py==312] + - pytest -n 2 -v {{ tests }} -k "not ({{ skips }})" -m "not hypothesis" --durations=50 --disable-warnings # [not aarch64 or py==312] # regression test for https://github.com/conda-forge/pytorch-cpu-feedstock/issues/329, where we picked up # duplicate `.pyc` files due to newest py-ver (3.13) in the build environment not matching the one in host; From d31229e0ec0e963539aca87d5430792140135eba Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Wed, 12 Feb 2025 14:53:07 +1100 Subject: [PATCH 49/58] run tests sequentially, but increase OMP_NUM_THREADS again --- recipe/meta.yaml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index dd4417e7b..8c4c5f512 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -376,7 +376,8 @@ outputs: # Required by run_test.py - pytest-flakefinder - pytest-rerunfailures - - pytest-xdist + # disabled because GPU tests might run OOM + # - pytest-xdist # danpetry/TF: Pytorch includes their own edited version of pytest-shard and adding # it into the test deps as well results in the --shard-id option being added twice. # https://github.com/pytorch/pytorch/blob/main/test/pytest_shard_custom.py @@ -486,10 +487,10 @@ outputs: # the whole test suite takes forever, but we should get a good enough coverage # for potential packaging problems by running a fixed subset - - export OMP_NUM_THREADS=2 # [unix] + - export OMP_NUM_THREADS=4 # [unix] # reduced paralellism to avoid OOM; test only one python version on aarch because emulation is super-slow # disable hypothesis because it randomly yields health check errors - - pytest -n 2 -v {{ tests }} -k "not ({{ skips }})" -m "not hypothesis" --durations=50 --disable-warnings # [not aarch64 or py==312] + - pytest -v {{ tests }} -k "not ({{ skips }})" -m "not hypothesis" --durations=50 --disable-warnings # [not aarch64 or py==312] # regression test for https://github.com/conda-forge/pytorch-cpu-feedstock/issues/329, where we picked up # duplicate `.pyc` files due to newest py-ver (3.13) in the build environment not matching the one in host; From 364ad9d18c4c5771cdca682c5e07f4d18dfad191 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Wed, 12 Feb 2025 15:08:40 +1100 Subject: [PATCH 50/58] skip test failures that were not due to OOM --- recipe/meta.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 8c4c5f512..463a6e09f 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -463,12 +463,19 @@ outputs: {% set skips = skips ~ " or test_indirect_device_assert" %} # [linux and cuda_compiler_version != "None"] # test that fails to find temporary resource {% set skips = skips ~ " or (GPUTests and test_scatter_reduce2)" %} # [linux and cuda_compiler_version != "None"] + # ROCM test whose skip doesn't trigger + {% set skips = skips ~ " or test_ck_blas_library_cpu" %} # [linux and cuda_compiler_version != "None"] + # problem with finding output of `torch.cuda.tunable.write_file()` + {% set skips = skips ~ " or test_matmul_offline_tunableop_cuda_float16" %} # [linux and cuda_compiler_version != "None"] + # catastropic accuracy failure in convolution + {% set skips = skips ~ " or test_Conv3d_1x1x1_no_bias_cuda" %} # [linux and cuda_compiler_version != "None"] # MKL problems {% set skips = skips ~ " or (TestLinalgCPU and test_inverse_errors_large_cpu)" %} # [linux and blas_impl == "mkl" and cuda_compiler_version != "None"] {% set skips = skips ~ " or test_reentrant_parent_error_on_cpu_cuda" %} # [linux and blas_impl == "mkl" and cuda_compiler_version != "None"] # non-MKL problems {% set skips = skips ~ " or test_cross_entropy_loss_2d_out_of_bounds_class_index_cuda" %} # [linux and blas_impl != "mkl" and cuda_compiler_version != "None"] {% set skips = skips ~ " or test_cublas_config_nondeterministic_alert_cuda " %} # [linux and blas_impl != "mkl" and cuda_compiler_version != "None"] + {% set skips = skips ~ " or test_gather_scatter_cpu or test_index_put2_cpu " %} # [linux and blas_impl != "mkl" and cuda_compiler_version != "None"] # these tests are failing with low -n values {% set skips = skips ~ " or test_base_does_not_require_grad_mode_nothing" %} {% set skips = skips ~ " or test_base_does_not_require_grad_mode_warn" %} From f14feb5e3e043b2a59fd6776d504ba72f052e0ff Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Wed, 12 Feb 2025 15:13:31 +1100 Subject: [PATCH 51/58] temporary: restrict jobs to linux64+CUDA --- recipe/meta.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 463a6e09f..feec1ec3f 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -70,6 +70,8 @@ build: number: {{ build }} # cuda 11.8 was dropped due to maintenance effort, see discussion in #177 skip: true # [cuda_compiler_version == "11.8"] + # temporary + skip: true # [not (linux64 and cuda_compiler_version != "None")] # This logic allows two rc variants to be defined in the conda_build_config, but only one to actually be built. # We want to be able to define two variants in the cbc so we can assign different labels to each in the upload channel # (by zipping is_rc with channel_targets). This prevents rc builds being used unless specifically requested. From 2e755104c699ddef12528e7625b220ac00f98a88 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Thu, 13 Feb 2025 09:41:18 +1100 Subject: [PATCH 52/58] minor skip updates --- recipe/meta.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index feec1ec3f..1e367fb8e 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -462,6 +462,8 @@ outputs: {% set skips = skips ~ " or test_sdpa_inference_mode_aot_compile" %} # [linux and cuda_compiler_version != "None"] {% set skips = skips ~ " or (TestNN and test_grid_sample)" %} # [linux and cuda_compiler_version != "None"] # don't mess with tests that rely on GPU failure handling + {% set skips = skips ~ " or test_cublas_config_nondeterministic_alert_cuda" %} # [linux and cuda_compiler_version != "None"] + {% set skips = skips ~ " or test_cross_entropy_loss_2d_out_of_bounds_class" %} # [linux and cuda_compiler_version != "None"] {% set skips = skips ~ " or test_indirect_device_assert" %} # [linux and cuda_compiler_version != "None"] # test that fails to find temporary resource {% set skips = skips ~ " or (GPUTests and test_scatter_reduce2)" %} # [linux and cuda_compiler_version != "None"] @@ -475,8 +477,6 @@ outputs: {% set skips = skips ~ " or (TestLinalgCPU and test_inverse_errors_large_cpu)" %} # [linux and blas_impl == "mkl" and cuda_compiler_version != "None"] {% set skips = skips ~ " or test_reentrant_parent_error_on_cpu_cuda" %} # [linux and blas_impl == "mkl" and cuda_compiler_version != "None"] # non-MKL problems - {% set skips = skips ~ " or test_cross_entropy_loss_2d_out_of_bounds_class_index_cuda" %} # [linux and blas_impl != "mkl" and cuda_compiler_version != "None"] - {% set skips = skips ~ " or test_cublas_config_nondeterministic_alert_cuda " %} # [linux and blas_impl != "mkl" and cuda_compiler_version != "None"] {% set skips = skips ~ " or test_gather_scatter_cpu or test_index_put2_cpu " %} # [linux and blas_impl != "mkl" and cuda_compiler_version != "None"] # these tests are failing with low -n values {% set skips = skips ~ " or test_base_does_not_require_grad_mode_nothing" %} From db4e301c42ed596cd2fd6034d0f1041b17bdc423 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Thu, 13 Feb 2025 09:58:19 +1100 Subject: [PATCH 53/58] ignore C++ warnings --- recipe/build.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/recipe/build.sh b/recipe/build.sh index feabd568b..7a2d2917d 100644 --- a/recipe/build.sh +++ b/recipe/build.sh @@ -34,6 +34,9 @@ export CXXFLAGS="$(echo $CXXFLAGS | sed 's/-std=c++[0-9][0-9]//g')" # break users' programs export CFLAGS="$(echo $CFLAGS | sed 's/-fvisibility-inlines-hidden//g')" export CXXFLAGS="$(echo $CXXFLAGS | sed 's/-fvisibility-inlines-hidden//g')" +# ignore warnings; blows up the logs for no benefit; they need to be fixed upstream +export CXXFLAGS="$CXXFLAGS -w" + export LDFLAGS="$(echo $LDFLAGS | sed 's/-Wl,--as-needed//g')" # The default conda LDFLAGs include -Wl,-dead_strip_dylibs, which removes all the # MKL sequential, core, etc. libraries, resulting in a "Symbol not found: _mkl_blas_caxpy" From 564024050d36ed111b21539ab0a7f9de93971f4b Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Thu, 13 Feb 2025 10:01:25 +1100 Subject: [PATCH 54/58] skip some tests that take a very long time --- recipe/meta.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 1e367fb8e..a8142eba3 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -473,6 +473,9 @@ outputs: {% set skips = skips ~ " or test_matmul_offline_tunableop_cuda_float16" %} # [linux and cuda_compiler_version != "None"] # catastropic accuracy failure in convolution {% set skips = skips ~ " or test_Conv3d_1x1x1_no_bias_cuda" %} # [linux and cuda_compiler_version != "None"] + # skip some very long-running groups of tests (~30 minutes total) + {% set skips = skips ~ " or (test_gradgrad_nn_Transformer and _cuda_)" %} # [linux and cuda_compiler_version != "None"] + {% set skips = skips ~ " or test_avg_pool3d_backward2" %} # [linux and cuda_compiler_version != "None"] # MKL problems {% set skips = skips ~ " or (TestLinalgCPU and test_inverse_errors_large_cpu)" %} # [linux and blas_impl == "mkl" and cuda_compiler_version != "None"] {% set skips = skips ~ " or test_reentrant_parent_error_on_cpu_cuda" %} # [linux and blas_impl == "mkl" and cuda_compiler_version != "None"] From 395bea3054205029ec53a7064c3cc03e07e56bfa Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Thu, 13 Feb 2025 10:17:23 +1100 Subject: [PATCH 55/58] Revert "temporary: restrict jobs to linux64+CUDA" This reverts commit f14feb5e3e043b2a59fd6776d504ba72f052e0ff. --- recipe/meta.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index a8142eba3..55f1df0b3 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -70,8 +70,6 @@ build: number: {{ build }} # cuda 11.8 was dropped due to maintenance effort, see discussion in #177 skip: true # [cuda_compiler_version == "11.8"] - # temporary - skip: true # [not (linux64 and cuda_compiler_version != "None")] # This logic allows two rc variants to be defined in the conda_build_config, but only one to actually be built. # We want to be able to define two variants in the cbc so we can assign different labels to each in the upload channel # (by zipping is_rc with channel_targets). This prevents rc builds being used unless specifically requested. From a75ba71388b126c31f9a0d926096e62cf2c5ef1b Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Thu, 13 Feb 2025 10:32:24 +1100 Subject: [PATCH 56/58] avoid filter for ptxas advisories on osx --- recipe/build.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/recipe/build.sh b/recipe/build.sh index 7a2d2917d..13d48aa6f 100644 --- a/recipe/build.sh +++ b/recipe/build.sh @@ -249,7 +249,12 @@ case ${PKG_NAME} in libtorch) # Call setup.py directly to avoid spending time on unnecessarily # packing and unpacking the wheel. - $PREFIX/bin/python setup.py -q build | stdbuf -oL grep -vE "Advisory: Modifier '\.sp::ordered_metadata'" + if [[ "$target_platform" == linux-* ]]; then + # filter out extremely noisy ptxas advisories + $PREFIX/bin/python setup.py -q build | stdbuf -oL grep -vE "Advisory: Modifier '\.sp::ordered_metadata'" + else + $PREFIX/bin/python setup.py -q build + fi mv build/lib.*/torch/bin/* ${PREFIX}/bin/ mv build/lib.*/torch/lib/* ${PREFIX}/lib/ From 833b3c83826da0e6f1548cb824251e46f1e8ed1f Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Thu, 13 Feb 2025 15:42:15 +1100 Subject: [PATCH 57/58] re-enable parallelism in test suite everywhere except linux64+CUDA --- recipe/meta.yaml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 55f1df0b3..5bcfb10aa 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -376,8 +376,7 @@ outputs: # Required by run_test.py - pytest-flakefinder - pytest-rerunfailures - # disabled because GPU tests might run OOM - # - pytest-xdist + - pytest-xdist # danpetry/TF: Pytorch includes their own edited version of pytest-shard and adding # it into the test deps as well results in the --shard-id option being added twice. # https://github.com/pytorch/pytorch/blob/main/test/pytest_shard_custom.py @@ -498,9 +497,12 @@ outputs: # the whole test suite takes forever, but we should get a good enough coverage # for potential packaging problems by running a fixed subset - export OMP_NUM_THREADS=4 # [unix] - # reduced paralellism to avoid OOM; test only one python version on aarch because emulation is super-slow + # reduced paralellism to avoid OOM for CUDA builds + {% set jobs = "-n 2" %} + {% set jobs = "-n 1" %} # [linux64 and cuda_compiler_version != "None"] + # test only one python version on aarch because emulation is super-slow; # disable hypothesis because it randomly yields health check errors - - pytest -v {{ tests }} -k "not ({{ skips }})" -m "not hypothesis" --durations=50 --disable-warnings # [not aarch64 or py==312] + - pytest -v {{ jobs }} {{ tests }} -k "not ({{ skips }})" -m "not hypothesis" --durations=50 --disable-warnings # [not aarch64 or py==312] # regression test for https://github.com/conda-forge/pytorch-cpu-feedstock/issues/329, where we picked up # duplicate `.pyc` files due to newest py-ver (3.13) in the build environment not matching the one in host; From cdef2760fc213b644aa7d21440a70e064ef1f0a8 Mon Sep 17 00:00:00 2001 From: "H. Vetinari" Date: Thu, 13 Feb 2025 15:45:27 +1100 Subject: [PATCH 58/58] remove test suite verbosity again --- recipe/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 5bcfb10aa..8f540e3e2 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -502,7 +502,7 @@ outputs: {% set jobs = "-n 1" %} # [linux64 and cuda_compiler_version != "None"] # test only one python version on aarch because emulation is super-slow; # disable hypothesis because it randomly yields health check errors - - pytest -v {{ jobs }} {{ tests }} -k "not ({{ skips }})" -m "not hypothesis" --durations=50 --disable-warnings # [not aarch64 or py==312] + - pytest {{ jobs }} {{ tests }} -k "not ({{ skips }})" -m "not hypothesis" --durations=50 --disable-warnings # [not aarch64 or py==312] # regression test for https://github.com/conda-forge/pytorch-cpu-feedstock/issues/329, where we picked up # duplicate `.pyc` files due to newest py-ver (3.13) in the build environment not matching the one in host;
VariantStatus
linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTruelinux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse - variant + variant
linux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTruelinux_64_blas_implgenericc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse - variant + variant
linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTruelinux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse - variant + variant
linux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTruelinux_64_blas_implmklc_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse - variant + variant
linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcTruelinux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNonecxx_compiler_version13is_rcFalse - variant + variant
linux_aarch64_c_compiler_version13channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcTruelinux_aarch64_c_compiler_version13channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6cxx_compiler_version13is_rcFalse - variant + variant
osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpythonosx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython - variant + variant
osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpythonosx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython - variant + variant
osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpythonosx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython - variant + variant
osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpythonosx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython - variant + variant
osx_64_blas_implgenericchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313osx_64_blas_implgenericchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313 - variant + variant
osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpythonosx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython - variant + variant
osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpythonosx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython - variant + variant
osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpythonosx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython - variant + variant
osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpythonosx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython - variant + variant
osx_64_blas_implmklchannel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313osx_64_blas_implmklchannel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313 - variant + variant
osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.10.____cpythonosx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.10.____cpython - variant + variant
osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.11.____cpythonosx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.11.____cpython - variant + variant
osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.12.____cpythonosx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.12.____cpython - variant + variant
osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2.0python3.9.____cpythonosx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2.0python3.9.____cpython - variant + variant
osx_arm64_channel_targetsconda-forge_pytorch_rcis_rcTruenumpy2python3.13.____cp313osx_arm64_channel_targetsconda-forge_mainis_rcFalsenumpy2python3.13.____cp313 - variant + variant
win_64_channel_targetsconda-forge_pytorch_rccuda_compilerNonecuda_compiler_versionNoneis_rcTruewin_64_channel_targetsconda-forge_maincuda_compilerNonecuda_compiler_versionNoneis_rcFalse - variant + variant
win_64_channel_targetsconda-forge_pytorch_rccuda_compilercuda-nvcccuda_compiler_version12.6is_rcTruewin_64_channel_targetsconda-forge_maincuda_compilercuda-nvcccuda_compiler_version12.6is_rcFalse - variant + variant