Skip to content

Commit 2675ef8

Browse files
Revert " [Environment Variable][5/N] Use thread-safe getenv functions (pytorch#139762)"
This reverts commit 43f0fe6. Reverted pytorch#139762 on behalf of https://github.com/malfet due to One of these diffs had incorrect downstream optional handling, we must reaudit all of these diffs ([comment](pytorch#139762 (comment)))
1 parent 3d61801 commit 2675ef8

File tree

6 files changed

+45
-48
lines changed

6 files changed

+45
-48
lines changed

aten/src/ATen/core/dispatch/Dispatcher.cpp

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,10 @@
1-
#include <ATen/core/PythonOpRegistrationTrampoline.h>
21
#include <ATen/core/dispatch/Dispatcher.h>
2+
#include <ATen/core/PythonOpRegistrationTrampoline.h>
3+
#include <chrono>
34
#include <list>
5+
#include <sstream>
46
#include <utility>
57

6-
#include <c10/util/env.h>
78
#ifdef FBCODE_CAFFE2
89
#include <c10/util/static_tracepoint.h>
910
#endif
@@ -16,18 +17,18 @@ TORCH_SDT_DEFINE_SEMAPHORE(operator_end)
1617
#endif
1718

1819
bool show_dispatch_trace() {
19-
static const auto envar = c10::utils::get_env("TORCH_SHOW_DISPATCH_TRACE");
20+
static auto envar = std::getenv("TORCH_SHOW_DISPATCH_TRACE");
2021

21-
if (envar.has_value()) {
22-
if (envar == "0") {
22+
if (envar) {
23+
if (strcmp(envar, "0") == 0) {
2324
return false;
2425
}
25-
if (envar == "1") {
26+
if (strcmp(envar, "1") == 0) {
2627
return true;
2728
}
2829
TORCH_WARN(
2930
"ignoring invalid value for TORCH_SHOW_DISPATCH_TRACE: ",
30-
envar.value(),
31+
envar,
3132
" valid values are 0 or 1.");
3233
}
3334

aten/src/ATen/cuda/tunable/Tunable.cpp

Lines changed: 27 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
#include <ATen/cuda/tunable/Tunable.h>
1414
#include <c10/util/Exception.h>
1515
#include <c10/util/StringUtil.h>
16-
#include <c10/util/env.h>
1716
#include <torch/version.h>
1817

1918
#ifndef _WIN32
@@ -434,8 +433,8 @@ void TuningContext::EnableTunableOp(bool value) {
434433
}
435434

436435
bool TuningContext::IsTunableOpEnabled() const {
437-
static const auto env = c10::utils::get_env("PYTORCH_TUNABLEOP_ENABLED");
438-
if (env == "1") {
436+
static const char *env = std::getenv("PYTORCH_TUNABLEOP_ENABLED");
437+
if (env != nullptr && strcmp(env, "1") == 0) {
439438
return true;
440439
}
441440
return enable_;
@@ -461,25 +460,25 @@ void TuningContext::EnableRecordUntuned(bool value) {
461460
}
462461

463462
bool TuningContext::IsTuningEnabled() const {
464-
static const auto env = c10::utils::get_env("PYTORCH_TUNABLEOP_TUNING");
465-
if (env == "0") {
463+
static const char *env = std::getenv("PYTORCH_TUNABLEOP_TUNING");
464+
if (env != nullptr && strcmp(env, "0") == 0) {
466465
return false;
467466
}
468467
return tuning_enable_;
469468
}
470469

471470
bool TuningContext::IsRecordUntunedEnabled() const {
472-
static const auto env = c10::utils::get_env("PYTORCH_TUNABLEOP_RECORD_UNTUNED");
473-
if (env == "1") {
471+
static const char *env = std::getenv("PYTORCH_TUNABLEOP_RECORD_UNTUNED");
472+
if (env != nullptr && strcmp(env, "1") == 0) {
474473
return true;
475474
}
476475
return record_untuned_enable_;
477476
}
478477

479478
std::ofstream& TuningContext::GetUntunedFile(){
480479
if (!untuned_file_.is_open()) {
481-
const auto env = c10::utils::get_env("PYTORCH_TUNABLEOP_UNTUNED_FILENAME");
482-
std::string filename = (!env.has_value()) ? "tunableop_untuned.csv" : env.value();
480+
const char *env = std::getenv("PYTORCH_TUNABLEOP_UNTUNED_FILENAME");
481+
std::string filename = (env == nullptr) ? "tunableop_untuned.csv" : env;
483482

484483
std::string device = c10::str(int(c10::cuda::current_device()));
485484
std::size_t found = filename.rfind('.');
@@ -516,9 +515,9 @@ void TuningContext::SetMaxTuningDurationMs(int max_duration_ms) {
516515
}
517516

518517
int TuningContext::GetMaxTuningDurationMs() const {
519-
static const auto env = c10::utils::get_env("PYTORCH_TUNABLEOP_MAX_TUNING_DURATION_MS");
520-
if (env.has_value()) {
521-
int val = stoi(env.value());
518+
static const char *env = std::getenv("PYTORCH_TUNABLEOP_MAX_TUNING_DURATION_MS");
519+
if (env != nullptr) {
520+
int val = atoi(env);
522521
return val < 0 ? 0 : val;
523522
}
524523
return max_tuning_duration_ms_;
@@ -529,9 +528,9 @@ void TuningContext::SetMaxTuningIterations(int max_iter) {
529528
}
530529

531530
int TuningContext::GetMaxTuningIterations() const {
532-
static const auto env = c10::utils::get_env("PYTORCH_TUNABLEOP_MAX_TUNING_ITERATIONS");
533-
if (env.has_value()) {
534-
int val = stoi(env.value());
531+
static const char *env = std::getenv("PYTORCH_TUNABLEOP_MAX_TUNING_ITERATIONS");
532+
if (env != nullptr) {
533+
int val = atoi(env);
535534
return val < 0 ? 0 : val;
536535
}
537536
return max_tuning_iterations_;
@@ -542,9 +541,9 @@ void TuningContext::SetMaxWarmupDurationMs(int max_duration_ms) {
542541
}
543542

544543
int TuningContext::GetMaxWarmupDurationMs() const {
545-
static const auto env = c10::utils::get_env("PYTORCH_TUNABLEOP_MAX_WARMUP_DURATION_MS");
546-
if (env.has_value()) {
547-
int val = stoi(env.value());
544+
static const char *env = std::getenv("PYTORCH_TUNABLEOP_MAX_WARMUP_DURATION_MS");
545+
if (env != nullptr) {
546+
int val = atoi(env);
548547
return val < 0 ? 0 : val;
549548
}
550549
return max_warmup_duration_ms_;
@@ -555,9 +554,9 @@ void TuningContext::SetMaxWarmupIterations(int max_iter) {
555554
}
556555

557556
int TuningContext::GetMaxWarmupIterations() const {
558-
static const auto env = c10::utils::get_env("PYTORCH_TUNABLEOP_MAX_WARMUP_ITERATIONS");
559-
if (env.has_value()) {
560-
int val = stoi(env.value());
557+
static const char *env = std::getenv("PYTORCH_TUNABLEOP_MAX_WARMUP_ITERATIONS");
558+
if (env != nullptr) {
559+
int val = atoi(env);
561560
return val < 0 ? 0 : val;
562561
}
563562
return max_warmup_iterations_;
@@ -568,8 +567,8 @@ void TuningContext::EnableICacheFlush(bool value) {
568567
}
569568

570569
bool TuningContext::IsICacheFlushEnabled() const {
571-
static const auto env = c10::utils::get_env("PYTORCH_TUNABLEOP_ICACHE_FLUSH_ENABLED");
572-
if (env == "0") {
570+
static const char *env = std::getenv("PYTORCH_TUNABLEOP_ICACHE_FLUSH_ENABLED");
571+
if (env != nullptr && strcmp(env, "0") == 0) {
573572
return false;
574573
}
575574
return icache_flush_;
@@ -580,10 +579,10 @@ void TuningContext::SetRotatingBufferSize(int size) {
580579
}
581580

582581
int TuningContext::GetRotatingBufferSize() const {
583-
static const auto env = c10::utils::get_env("PYTORCH_TUNABLEOP_ROTATING_BUFFER_SIZE");
584-
if (env.has_value()) {
582+
static const char *env = std::getenv("PYTORCH_TUNABLEOP_ROTATING_BUFFER_SIZE");
583+
if (env != nullptr) {
585584
constexpr int MB = 1024 * 1024;
586-
int val = stoi(env.value());
585+
int val = atoi(env);
587586
return val < 0 ? 0 : val * MB; // env var is specified as MB, returned as bytes
588587
}
589588
else {
@@ -603,8 +602,8 @@ TuningResultsManager& TuningContext::GetTuningResultsManager() {
603602
manager_initialized_ = true;
604603
if (GetFilename().empty()) {
605604
// if SetFilename() was not already called, call it now with the default or env var
606-
const auto env = c10::utils::get_env("PYTORCH_TUNABLEOP_FILENAME");
607-
std::string filename = (!env.has_value()) ? "tunableop_results.csv" : env.value();
605+
const char *env = std::getenv("PYTORCH_TUNABLEOP_FILENAME");
606+
std::string filename = (env == nullptr) ? "tunableop_results.csv" : env;
608607
SetFilename(filename, true);
609608
}
610609
auto filename = GetFilename();

aten/src/ATen/native/LinearAlgebra.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1364,8 +1364,8 @@ static inline int64_t get_mkldnn_matmul_min_dim() {
13641364
//it's enabled on all Neoverse cpus.
13651365
return is_arm_neoverse() ? 8 : 0;
13661366
}();
1367-
const auto ptr = c10::utils::get_env("TORCH_MKLDNN_MATMUL_MIN_DIM");
1368-
return ptr.has_value() ? std::stoi(ptr.value()) : default_min_dim;
1367+
const char* ptr = std::getenv("TORCH_MKLDNN_MATMUL_MIN_DIM");
1368+
return ptr != nullptr ? std::atoi(ptr) : default_min_dim;
13691369
}();
13701370
return value;
13711371
}
@@ -1378,8 +1378,8 @@ static inline int64_t get_mkldnn_matmul_min_size() {
13781378
// it's enabled on all Neoverse cpus.
13791379
return is_arm_neoverse() ? 8 * 1024 : 0;
13801380
}();
1381-
const auto ptr = c10::utils::get_env("TORCH_MKLDNN_MATMUL_MIN_SIZE");
1382-
return ptr.has_value() ? std::stoi(ptr.value()) : default_min_size;
1381+
const char* ptr = std::getenv("TORCH_MKLDNN_MATMUL_MIN_SIZE");
1382+
return ptr != nullptr ? std::atoi(ptr) : default_min_size;
13831383
}();
13841384
return value;
13851385
}

torch/csrc/lazy/core/config.cpp

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
#include <c10/util/env.h>
21
#include <torch/csrc/lazy/core/config.h>
32

43
C10_DEFINE_bool(torch_lazy_ir_debug, false, "Enable lazy tensor IR debugging")
@@ -77,9 +76,9 @@ namespace torch::lazy {
7776
std::string& getLTCForceFallback() {
7877
static std::string config;
7978
static bool _ignore = [&]() {
80-
auto env = c10::utils::get_env("LTC_FORCE_FALLBACK");
81-
if (env) {
82-
config = std::move(env.value());
79+
char* envptr = std::getenv("LTC_FORCE_FALLBACK");
80+
if (envptr) {
81+
config = std::string(envptr);
8382
}
8483
return true;
8584
}();

torch/csrc/lazy/core/debug_util.cpp

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
#include <c10/util/env.h>
21
#include <c10/util/irange.h>
32
#include <torch/csrc/lazy/core/debug_util.h>
43

@@ -18,8 +17,8 @@ namespace torch::lazy {
1817
namespace {
1918

2019
std::string GetEnvString(const char* name, const std::string& defval) {
21-
const auto env = c10::utils::get_env(name);
22-
return env.has_value() ? env.value() : defval;
20+
const char* env = std::getenv(name);
21+
return env != nullptr ? env : defval;
2322
}
2423

2524
DebugUtil::GraphFormat DefaultGraphFormat() {

torch/csrc/lazy/core/shape.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
#include <c10/util/env.h>
21
#include <c10/util/irange.h>
32
#include <torch/csrc/lazy/core/shape.h>
43
#include <torch/csrc/lazy/core/tensor.h>
@@ -59,7 +58,7 @@ Shape Shape::with_symbolic_dims(
5958
}
6059

6160
bool symbolicShapeEnabled() {
62-
static const bool enabled = c10::utils::has_env("LTC_ENABLE_SYMBOLIC_SHAPES");
61+
static bool enabled = std::getenv("LTC_ENABLE_SYMBOLIC_SHAPES") != nullptr;
6362
return enabled || FLAGS_ltc_enable_symbolic_shapes;
6463
}
6564

0 commit comments

Comments
 (0)