Skip to content

Commit a58a565

Browse files
Revert "[Environment Variable][6/N] Use thread-safe getenv functions (pytorch#140200)"
This reverts commit 7d4f5f7. Reverted pytorch#140200 on behalf of https://github.com/ezyang due to One of these diffs had incorrect downstream optional handling, we must reaudit all of these diffs ([comment](pytorch#140200 (comment)))
1 parent 5dc6b8c commit a58a565

File tree

14 files changed

+80
-80
lines changed

14 files changed

+80
-80
lines changed

aten/src/ATen/core/type.cpp

+2-3
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
#include <ATen/core/grad_mode.h>
88
#include <ATen/core/jit_type.h>
99
#include <c10/macros/Macros.h>
10-
#include <c10/util/env.h>
1110
#include <c10/util/flat_hash_map.h>
1211
#include <c10/util/irange.h>
1312
#include <array>
@@ -46,9 +45,9 @@ static_assert(
4645
"getTypePtr<std::tuple<int64_t, int64_t>> not returning const ref!");
4746

4847
TypeVerbosity type_verbosity() {
49-
static const auto c_verbosity = c10::utils::get_env("PYTORCH_JIT_TYPE_VERBOSITY");
48+
static const char* c_verbosity = std::getenv("PYTORCH_JIT_TYPE_VERBOSITY");
5049
static TypeVerbosity verbosity = c_verbosity ?
51-
static_cast<TypeVerbosity>(std::stoi(c_verbosity.value())) : TypeVerbosity::Default;
50+
static_cast<TypeVerbosity>(std::stoi(c_verbosity)) : TypeVerbosity::Default;
5251
return verbosity;
5352
}
5453

aten/src/ATen/native/DispatchStub.cpp

+9-10
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33

44
#include <c10/core/DeviceType.h>
55
#include <c10/util/Exception.h>
6-
#include <c10/util/env.h>
76

87
#if !defined(__s390x__) && !defined(__powerpc__)
98
#include <cpuinfo.h>
@@ -25,20 +24,20 @@ static inline bool cpu_has_vxe()
2524
#endif
2625

2726
static CPUCapability compute_cpu_capability() {
28-
const auto envar = c10::utils::get_env("ATEN_CPU_CAPABILITY");
29-
if (envar.has_value()) {
27+
auto envar = std::getenv("ATEN_CPU_CAPABILITY");
28+
if (envar) {
3029
#if defined(HAVE_VSX_CPU_DEFINITION)
31-
if (envar == "vsx") {
30+
if (strcmp(envar, "vsx") == 0) {
3231
return CPUCapability::VSX;
3332
}
3433
#elif defined(HAVE_ZVECTOR_CPU_DEFINITION)
35-
if (envar == "zvector") {
34+
if (strcmp(envar, "zvector") == 0) {
3635
return CPUCapability::ZVECTOR;
3736
}
3837
#elif defined(HAVE_SVE_CPU_DEFINITION)
3938
int sve_vl = cpuinfo_get_max_arm_sve_length(); //Returns maximum SVE VL supported by your HW.
4039
#ifdef HAVE_SVE256_CPU_DEFINITION
41-
if (envar == "sve256") {
40+
if (strcmp(envar, "sve256") == 0) {
4241
if (sve_vl == 256) {
4342
return CPUCapability::SVE256;
4443
}
@@ -48,20 +47,20 @@ static CPUCapability compute_cpu_capability() {
4847
#endif
4948
#else
5049
#ifdef HAVE_AVX512_CPU_DEFINITION
51-
if (envar == "avx512") {
50+
if (strcmp(envar, "avx512") == 0) {
5251
return CPUCapability::AVX512;
5352
}
5453
#endif
5554
#ifdef HAVE_AVX2_CPU_DEFINITION
56-
if (envar == "avx2") {
55+
if (strcmp(envar, "avx2") == 0) {
5756
return CPUCapability::AVX2;
5857
}
5958
#endif
6059
#endif
61-
if (envar == "default") {
60+
if (strcmp(envar, "default") == 0) {
6261
return CPUCapability::DEFAULT;
6362
}
64-
TORCH_WARN("ignoring invalid value for ATEN_CPU_CAPABILITY: ", envar.value());
63+
TORCH_WARN("ignoring invalid value for ATEN_CPU_CAPABILITY: ", envar);
6564
}
6665

6766
#if !defined(__powerpc__) && !defined(__s390x__) && !defined(HAVE_SVE_CPU_DEFINITION)

aten/src/ATen/native/Linear.cpp

+10-2
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,16 @@ namespace at::native {
4040
// Parse environment variable "TORCH_LINEAR_FLATTEN_3D"
4141
static inline bool parseLinearFlatten3d() {
4242
// Uninitialized value
43-
static auto value = c10::utils::check_env("TORCH_LINEAR_FLATTEN_3D");
44-
return value.has_value() && value.value();
43+
static int value = -1;
44+
if (value == -1) {
45+
const char* env_str = std::getenv("TORCH_LINEAR_FLATTEN_3D");
46+
if (env_str != nullptr && strcmp(env_str, "1") == 0) {
47+
value = 1;
48+
} else {
49+
value = 0;
50+
}
51+
}
52+
return bool(value);
4553
}
4654

4755
// `_flatten_nd_linear` flattens all but the last dimension of the input tensor

aten/src/ATen/native/cuda/Blas.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -179,8 +179,8 @@ cuda::blas::GEMMAndBiasActivationEpilogue activation_to_gemm_and_blas_arg(Activa
179179
}
180180

181181
static bool getDisableAddmmCudaLt() {
182-
static const auto env_value = c10::utils::get_env("DISABLE_ADDMM_CUDA_LT");
183-
if (env_value == "1") {
182+
static const char* env_value = std::getenv("DISABLE_ADDMM_CUDA_LT");
183+
if (env_value != nullptr && strcmp(env_value, "1") == 0) {
184184
return true;
185185
}
186186
return false;

aten/src/ATen/native/cuda/jit_utils.cpp

+13-13
Original file line numberDiff line numberDiff line change
@@ -1395,40 +1395,40 @@ std::string generate_reduction_code(
13951395
// Acquires (possibly creating) the kernel cache directory
13961396
std::optional<std::string> get_cache_dir() {
13971397
// If the environment variable USE_TORCH_KERNEL_CACHE is set to "0" then no persistent cache is used
1398-
const auto uptkc = c10::utils::get_env("USE_PYTORCH_KERNEL_CACHE");
1399-
const bool use_kernel_cache = (uptkc != "0");
1398+
const char* uptkc = std::getenv("USE_PYTORCH_KERNEL_CACHE");
1399+
const bool use_kernel_cache = (uptkc == nullptr) ? true : std::strcmp(uptkc, "0");
14001400

14011401
if (!use_kernel_cache) {
14021402
return {};
14031403
}
14041404

14051405
// Cache path comes from PYTORCH_KERNEL_CACHE_PATH, then TEMP (Windows) or XDG_CACHE_HOME (Linux), then HOME environment variables
14061406
std::string cache_dir;
1407-
auto ptkcp = c10::utils::get_env("PYTORCH_KERNEL_CACHE_PATH");
1407+
char* ptkcp = std::getenv("PYTORCH_KERNEL_CACHE_PATH");
14081408
// Create kernel_cache_dir if needed as we do not want to create the base directory passed by the user
14091409
std::string kernels_cache_dir = "";
1410-
if (ptkcp.has_value()) {
1411-
cache_dir = ptkcp.value();
1410+
if (ptkcp != nullptr) {
1411+
cache_dir = std::string(ptkcp);
14121412
} else {
14131413
#ifdef _WIN32
1414-
ptkcp = c10::utils::get_env("TEMP");
1414+
ptkcp = std::getenv("TEMP");
14151415
#else
14161416
// USES XDG_CACHE_HOME if it's set
1417-
ptkcp = c10::utils::get_env("XDG_CACHE_HOME");
1417+
ptkcp = std::getenv("XDG_CACHE_HOME");
14181418
#endif
1419-
if (ptkcp.has_value()) {
1419+
if (ptkcp != nullptr) {
14201420
kernels_cache_dir = "/torch/kernels";
1421-
cache_dir = ptkcp.value() + kernels_cache_dir;
1421+
cache_dir = std::string(ptkcp) + kernels_cache_dir;
14221422
} else {
14231423
// Falls back to HOME/.cache
1424-
ptkcp = c10::utils::get_env("HOME");
1425-
if (ptkcp.has_value()) {
1424+
ptkcp = std::getenv("HOME");
1425+
if (ptkcp == nullptr) {
14261426
TORCH_WARN_ONCE("No PYTORCH_KERNEL_CACHE_PATH or HOME environment variable set!",
14271427
" This disables kernel caching.");
14281428
return {};
14291429
} else {
14301430
kernels_cache_dir = "/.cache/torch/kernels";
1431-
cache_dir = ptkcp.value() + kernels_cache_dir;
1431+
cache_dir = std::string(ptkcp) + kernels_cache_dir;
14321432
}
14331433
}
14341434
}
@@ -1437,7 +1437,7 @@ std::optional<std::string> get_cache_dir() {
14371437
const char* p_cache_dir = cache_dir.c_str();
14381438
const bool cache_dir_exists = (access(p_cache_dir, F_OK) == 0);
14391439
if (!cache_dir_exists) {
1440-
std::string s_ptkcp = ptkcp.value();
1440+
std::string s_ptkcp = std::string(ptkcp);
14411441
if (!r_mkdir_with_base(s_ptkcp, kernels_cache_dir)) {
14421442
TORCH_WARN_ONCE("Specified kernel cache directory could not be created! This disables kernel caching.",
14431443
" Specified directory is ", cache_dir, ".",

c10/util/env.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -77,10 +77,10 @@ bool has_env(const char* name) noexcept {
7777
std::optional<bool> check_env(const char* name) {
7878
auto env_opt = get_env(name);
7979
if (env_opt.has_value()) {
80-
if (env_opt == "0") {
80+
if (*env_opt == "0") {
8181
return false;
8282
}
83-
if (env_opt == "1") {
83+
if (*env_opt == "1") {
8484
return true;
8585
}
8686
TORCH_WARN(

torch/csrc/distributed/c10d/Utils.hpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -128,15 +128,15 @@ inline int getCvarInt(const std::vector<std::string>& env, int def) {
128128
* versions of a variable get higher priority than the latter
129129
* versions of the same variable */
130130
for (ssize_t i = static_cast<ssize_t>(env.size()) - 1; i >= 0; i--) {
131-
const auto val = c10::utils::get_env(env[i].c_str());
132-
if (!val.has_value()) {
131+
char* val = std::getenv(env[i].c_str());
132+
if (val == nullptr) {
133133
continue;
134134
} else if (i) {
135135
WARN_ENV_VAR_ONCE(env[i], env[0]);
136136
}
137137

138138
try {
139-
ret = std::stoi(val.value());
139+
ret = std::stoi(val);
140140
} catch (std::exception&) {
141141
TORCH_CHECK(false, "Invalid value for environment variable: " + env[i]);
142142
}

torch/csrc/jit/frontend/ir_emitter.cpp

+5-5
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33

44
#include <c10/util/Exception.h>
55
#include <c10/util/StringUtil.h>
6-
#include <c10/util/env.h>
76
#include <c10/util/irange.h>
87
#include <caffe2/serialize/versions.h>
98
#include <torch/csrc/jit/api/function_impl.h>
@@ -48,11 +47,12 @@ bool reportSourceLocation(size_t file_size) {
4847
if (file_size < 512ull * 1024) {
4948
return true;
5049
}
51-
const auto enable_env =
52-
c10::utils::get_env("PYTORCH_JIT_ENABLE_LARGE_SOURCE_LOCATION");
50+
const char* enable_env =
51+
std::getenv("PYTORCH_JIT_ENABLE_LARGE_SOURCE_LOCATION");
5352
bool flag = true;
54-
if (!enable_env.has_value() || enable_env == "0" || enable_env == "FALSE" ||
55-
enable_env == "false") {
53+
if (enable_env == nullptr || std::strcmp(enable_env, "0") == 0 ||
54+
std::strcmp(enable_env, "FALSE") == 0 ||
55+
std::strcmp(enable_env, "false") == 0) {
5656
flag = false;
5757
}
5858
return flag;

torch/csrc/jit/jit_log.cpp

+2-5
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99
#include <ATen/core/function.h>
1010
#include <c10/util/Exception.h>
1111
#include <c10/util/StringUtil.h>
12-
#include <c10/util/env.h>
1312
#include <torch/csrc/jit/api/function_impl.h>
1413
#include <torch/csrc/jit/frontend/error_report.h>
1514
#include <torch/csrc/jit/ir/ir.h>
@@ -33,10 +32,8 @@ class JitLoggingConfig {
3332
std::ostream* out;
3433

3534
JitLoggingConfig() : out(&std::cerr) {
36-
const auto jit_log_level = c10::utils::get_env("PYTORCH_JIT_LOG_LEVEL");
37-
if (jit_log_level.has_value()) {
38-
logging_levels = jit_log_level.value();
39-
}
35+
const char* jit_log_level = std::getenv("PYTORCH_JIT_LOG_LEVEL");
36+
logging_levels.assign(jit_log_level == nullptr ? "" : jit_log_level);
4037

4138
parse();
4239
}

torch/csrc/jit/jit_opt_limit.cpp

+5-4
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,13 @@
11
#include <cstdlib>
2+
#include <iomanip>
23
#include <sstream>
34
#include <string>
45
#include <utility>
6+
#include <vector>
57

68
#include <ATen/core/function.h>
79
#include <c10/util/Exception.h>
810
#include <c10/util/StringUtil.h>
9-
#include <c10/util/env.h>
1011
#include <torch/csrc/jit/api/function_impl.h>
1112
#include <torch/csrc/jit/jit_opt_limit.h>
1213

@@ -48,14 +49,14 @@ static std::unordered_map<std::string, int64_t> parseJITOptLimitOption(
4849
}
4950

5051
bool opt_limit(const char* pass_name) {
51-
static const auto opt_limit = c10::utils::get_env("PYTORCH_JIT_OPT_LIMIT");
52+
static const char* opt_limit = std::getenv("PYTORCH_JIT_OPT_LIMIT");
5253
// if nothing is provided, let's allow everything
53-
if (!opt_limit.has_value()) {
54+
if (!opt_limit) {
5455
return true;
5556
}
5657

5758
static const std::unordered_map<std::string, int64_t> passes_to_opt_limits =
58-
parseJITOptLimitOption(opt_limit->c_str());
59+
parseJITOptLimitOption(opt_limit);
5960
std::string pass{pass_name};
6061
pass = c10::detail::StripBasename(pass);
6162
pass = c10::detail::ExcludeFileExtension(pass);

torch/csrc/jit/passes/tensorexpr_fuser.cpp

+6-6
Original file line numberDiff line numberDiff line change
@@ -155,11 +155,11 @@ void setTensorExprFuserEnabled(bool val) {
155155
}
156156

157157
bool tensorExprFuserEnabled() {
158-
static const auto enable_opt = c10::utils::get_env("PYTORCH_TENSOREXPR");
159-
if (!enable_opt.has_value()) {
158+
static const char* enable_c_str = std::getenv("PYTORCH_TENSOREXPR");
159+
if (!enable_c_str) {
160160
return texpr_fuser_enabled_;
161161
}
162-
if (enable_opt == "0") {
162+
if (std::string(enable_c_str) == "0") {
163163
return false;
164164
}
165165
return true;
@@ -1293,10 +1293,10 @@ class TensorExprFuser {
12931293
// 'PYTORCH_TENSOREXPR_DONT_FUSE="clamp:mul:add"' disables fusion on
12941294
// aten::clamp, aten::mul and aten::add.
12951295
void parseTENotFuseOption() {
1296-
const auto option = c10::utils::get_env("PYTORCH_TENSOREXPR_DONT_FUSE");
1296+
const char* option = std::getenv("PYTORCH_TENSOREXPR_DONT_FUSE");
12971297
std::stringstream in_ss;
1298-
if (option.has_value()) {
1299-
in_ss << option.value();
1298+
if (option) {
1299+
in_ss << option;
13001300
}
13011301

13021302
std::string line;

torch/csrc/jit/runtime/graph_executor.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -864,7 +864,7 @@ bool GraphExecutor::isOptimized() const {
864864

865865
TORCH_API bool IsNewExecutorEnabled() {
866866
static const auto disable_new_executor =
867-
c10::utils::has_env("TORCH_JIT_DISABLE_NEW_EXECUTOR");
867+
std::getenv("TORCH_JIT_DISABLE_NEW_EXECUTOR");
868868
return getExecutorMode() && FLAGS_torch_jit_enable_new_executor &&
869869
!disable_new_executor;
870870
}

torch/csrc/jit/tensorexpr/kernel.cpp

+14-16
Original file line numberDiff line numberDiff line change
@@ -54,49 +54,47 @@ bool setFallbackAllowed(bool value) {
5454
}
5555

5656
bool fallbackAllowed() {
57-
static const auto enable_opt =
58-
c10::utils::get_env("PYTORCH_TENSOREXPR_FALLBACK");
59-
if (!enable_opt.has_value()) {
57+
static const char* enable_c_str = std::getenv("PYTORCH_TENSOREXPR_FALLBACK");
58+
if (!enable_c_str) {
6059
return fallback_allowed;
6160
}
62-
if (enable_opt == "0") {
61+
if (std::string(enable_c_str) == "0") {
6362
return false;
6463
}
6564
return true;
6665
}
6766

6867
static bool fallbackEnforced() {
69-
static const auto enable_opt =
70-
c10::utils::get_env("PYTORCH_TENSOREXPR_FALLBACK");
68+
static const char* enable_c_str = std::getenv("PYTORCH_TENSOREXPR_FALLBACK");
7169
if (tensorexpr::getTEGenerateBlockCode()) {
7270
return false;
7371
}
74-
if (!enable_opt.has_value()) {
72+
if (!enable_c_str) {
7573
return fallback_allowed;
7674
}
77-
if (enable_opt == "2") {
75+
if (std::string(enable_c_str) == "2") {
7876
return true;
7977
}
8078
return false;
8179
}
8280

8381
static int64_t randomTransformsRequested() {
84-
const auto enable_opt =
85-
c10::utils::get_env("PYTORCH_TENSOREXPR_RANDOM_TRANSFORM_SEED");
86-
if (!enable_opt.has_value()) {
82+
const char* enable_c_str =
83+
std::getenv("PYTORCH_TENSOREXPR_RANDOM_TRANSFORM_SEED");
84+
if (!enable_c_str) {
8785
return 0;
8886
}
89-
return std::stoi(enable_opt.value());
87+
return std::stoi(std::string(enable_c_str));
9088
}
9189

9290
#ifdef TORCH_ENABLE_LLVM
9391
static bool dontUseLLVMFlag() {
94-
static const auto enable_opt =
95-
c10::utils::get_env("PYTORCH_TENSOREXPR_DONT_USE_LLVM");
96-
if (!enable_opt) {
92+
static const char* enable_c_str =
93+
std::getenv("PYTORCH_TENSOREXPR_DONT_USE_LLVM");
94+
if (!enable_c_str) {
9795
return false;
9896
}
99-
return enable_opt == "1";
97+
return std::string(enable_c_str) == "1";
10098
}
10199
#endif
102100

0 commit comments

Comments
 (0)