Skip to content
Closed
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions .scripts/run_osx_build.sh

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 7 additions & 0 deletions recipe/activate.bat
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
@echo off

if not defined CF_TORCH_CUDA_ARCH_LIST (
set "CF_TORCH_CUDA_ARCH_LIST=@cf_torch_cuda_arch_list@"
set "CF_TORCH_CUDA_ARCH_LIST_BACKUP=NOT_SET"
)

7 changes: 7 additions & 0 deletions recipe/activate.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/bin/bash

if [[ ! -v CF_TORCH_CUDA_ARCH_LIST ]]
then
export CF_TORCH_CUDA_ARCH_LIST="@cf_torch_cuda_arch_list@"
export CF_TORCH_CUDA_ARCH_LIST_BACKUP="NOT_SET"
fi
14 changes: 14 additions & 0 deletions recipe/bld.bat
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ if not "%cuda_compiler_version%" == "None" (
@REM MKLDNN is an Apache-2.0 licensed library for DNNs and is used
@REM for CPU builds. Not to be confused with MKL.
set "USE_MKLDNN=1"
set "TORCH_CUDA_ARCH_LIST="

@REM On windows, env vars are case-insensitive and setup.py
@REM passes all env vars starting with CUDA_*, CMAKE_* to
Expand Down Expand Up @@ -221,6 +222,19 @@ if "%PKG_NAME%" == "libtorch" (
@REM Keep the original backed up to sed later
copy build\CMakeCache.txt build\CMakeCache.txt.orig
if %ERRORLEVEL% neq 0 exit 1

sed -e "s/@cf_torch_cuda_arch_list@/%TORCH_CUDA_ARCH_LIST%/g" ^
%RECIPE_DIR%\activate.bat > %RECIPE_DIR%\activate-replaced.bat
if %ERRORLEVEL% neq 0 exit 1

mkdir %PREFIX%\etc\conda\activate.d
copy %RECIPE_DIR%\activate-replaced.bat %PREFIX%\etc\conda\activate.d\libtorch_activate.bat
if %ERRORLEVEL% neq 0 exit 1

mkdir %PREFIX%\etc\conda\deactivate.d
copy %RECIPE_DIR%\deactivate.bat %PREFIX%\etc\conda\deactivate.d\libtorch_deactivate.bat
if %ERRORLEVEL% neq 0 exit 1

) else if "%PKG_NAME%" == "pytorch" (
@REM Move libtorch_python and remove the other directories afterwards.
robocopy /NP /NFL /NDL /NJH /E %SP_DIR%\torch\bin\ %LIBRARY_BIN%\ torch_python.dll
Expand Down
9 changes: 9 additions & 0 deletions recipe/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,7 @@ else
# for CPU builds. Not to be confused with MKL.
export USE_MKLDNN=1
export USE_CUDA=0
export TORCH_CUDA_ARCH_LIST=""
fi

echo '${CXX}'=${CXX}
Expand All @@ -279,6 +280,14 @@ case ${PKG_NAME} in

# Keep the original backed up to sed later
cp build/CMakeCache.txt build/CMakeCache.txt.orig

for CHANGE in "activate" "deactivate"
do
mkdir -p "${PREFIX}/etc/conda/${CHANGE}.d"
sed -e "s/@cf_torch_cuda_arch_list@/${TORCH_CUDA_ARCH_LIST}/g" \
"${RECIPE_DIR}/${CHANGE}.sh" > "${PREFIX}/etc/conda/${CHANGE}.d/libtorch_${CHANGE}.sh"
done

;;
pytorch)
$PREFIX/bin/python -m pip install . --no-deps --no-build-isolation -v --no-clean --config-settings=--global-option=-q \
Expand Down
6 changes: 6 additions & 0 deletions recipe/deactivate.bat
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
@echo off

if "%CF_TORCH_CUDA_ARCH_LIST_BACKUP%" == "NOT_SET" (
set "CF_TORCH_CUDA_ARCH_LIST="
set "CF_TORCH_CUDA_ARCH_LIST_BACKUP="
)
7 changes: 7 additions & 0 deletions recipe/deactivate.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/bin/bash

if [[ "${CF_TORCH_CUDA_ARCH_LIST_BACKUP}" == "NOT_SET" ]]
then
unset CF_TORCH_CUDA_ARCH_LIST
unset CF_TORCH_CUDA_ARCH_LIST_BACKUP
fi
2 changes: 1 addition & 1 deletion recipe/meta.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# if you wish to build release candidate number X, append the version string with ".rcX"
{% set version = "2.9.1" %}
{% set build = 1 %}
{% set build = 2 %}

# Use a higher build number for the CUDA variant, to ensure that it's
# preferred by conda's solver, and it's preferentially
Expand Down
Loading