Skip to content

Commit 38f3d1f

Browse files
Yangqingfacebook-github-bot
authored andcommitted
move flags to c10 (pytorch#12144)
Summary: still influx. Pull Request resolved: pytorch#12144 Reviewed By: smessmer Differential Revision: D10140176 Pulled By: Yangqing fbshipit-source-id: 1a313abed022039333e3925d19f8b3ef2d95306c
1 parent c9f7d7b commit 38f3d1f

File tree

180 files changed

+1342
-1260
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

180 files changed

+1342
-1260
lines changed

aten/src/ATen/core/C++17.h

-1
Original file line numberDiff line numberDiff line change
@@ -251,5 +251,4 @@ template<class T> inline std::string to_string(T value) {
251251
return detail::to_string_<T>::call(value);
252252
}
253253

254-
255254
}}

aten/src/ATen/core/TensorImpl.h

+11-8
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,10 @@
1111
#include <ATen/core/context_base.h>
1212
#include <ATen/core/optional.h>
1313

14+
#include "c10/util/Flags.h"
15+
1416
#include "caffe2/core/allocator.h"
1517
#include "caffe2/core/common.h"
16-
#include "caffe2/core/flags.h"
1718
#include "caffe2/core/logging.h"
1819

1920
// A global boolean variable to control whether we free memory when a Tensor
@@ -23,14 +24,13 @@
2324
// This parameter is respected "upper-case" methods which call Resize()
2425
// (e.g., CopyFrom, ResizeLike); it is NOT respected by Tensor::resize_
2526
// or ShrinkTo, both of which guarantee to never to free memory.
26-
CAFFE2_DECLARE_bool(caffe2_keep_on_shrink);
27+
C10_DECLARE_bool(caffe2_keep_on_shrink);
2728

2829
// Since we can have high variance in blob memory allocated across different
2930
// inputs in the same run, we will shrink the blob only if the memory gain
3031
// is larger than this flag in bytes. This only applies to functions which
3132
// respect caffe2_keep_on_shrink.
32-
CAFFE2_DECLARE_int64(caffe2_max_keep_on_shrink_memory);
33-
33+
C10_DECLARE_int64(caffe2_max_keep_on_shrink_memory);
3434

3535
namespace caffe2 {
3636

@@ -604,10 +604,13 @@ struct CAFFE2_API TensorImpl : public c10::intrusive_ptr_target {
604604
// is smaller than new size
605605
reset_tensor = storage_.capacity() < (storage_offset_ + numel_) * storage_.itemsize();
606606
} else {
607-
reset_tensor = storage_.capacity() < (storage_offset_ + numel_) * storage_.itemsize() ||
608-
!caffe2::FLAGS_caffe2_keep_on_shrink ||
609-
storage_.capacity() - (storage_offset_ + numel_) * storage_.itemsize() >
610-
static_cast<size_t>(caffe2::FLAGS_caffe2_max_keep_on_shrink_memory);
607+
reset_tensor = storage_.capacity() <
608+
(storage_offset_ + numel_) * storage_.itemsize() ||
609+
!c10::FLAGS_caffe2_keep_on_shrink ||
610+
storage_.capacity() -
611+
(storage_offset_ + numel_) * storage_.itemsize() >
612+
static_cast<size_t>(
613+
c10::FLAGS_caffe2_max_keep_on_shrink_memory);
611614
}
612615

613616
if (reset_tensor && !is_init) {

aten/src/ATen/mkl/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
All files living in this directory are written with the assumption that MKL is available,
22
which means that these code are not guarded by `#if AT_MKL_ENABLED()`. Therefore, whenever
33
you need to use definitions from here, please guard the `#include<ATen/mkl/*.h>` and
4-
definition usages with `#if AT_MKL_ENABLED()` macro, e.g. [SpectralOps.cpp](native/mkl/SpectralOps.cpp).
4+
definition usages with `#if AT_MKL_ENABLED()` macro, e.g. [SpectralOps.cpp](native/mkl/SpectralOps.cpp).

aten/src/TH/generic/simd/convolve.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
void convolve_5x5(float* output, float* input, float* kernel, int64_t outRows, int64_t outCols, int64_t inCols);
1+
void convolve_5x5(float* output, float* input, float* kernel, int64_t outRows, int64_t outCols, int64_t inCols);

aten/src/TH/generic/simd/convolve5x5_avx.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -211,4 +211,4 @@ void convolve_5x5_avx(float* output, float* input, float* kernel, int64_t outRow
211211
CLEAR_AVX();
212212
convolve_5x5_sse(&output[procCols], &input[procCols], kernel, outRows, remCols, outStride, inCols);
213213
}
214-
}
214+
}

aten/src/THCUNN/generic/SpatialFullConvolution.cu

+1-1
Original file line numberDiff line numberDiff line change
@@ -58,4 +58,4 @@ void THNN_(SpatialFullConvolution_accGradParameters)(
5858
kW, kH, dW, dH, padW, padH, 1, 1, adjW, adjH, scale_);
5959
}
6060

61-
#endif
61+
#endif

binaries/caffe2_benchmark.cc

+30-33
Original file line numberDiff line numberDiff line change
@@ -9,69 +9,66 @@ using std::map;
99
using std::string;
1010
using std::vector;
1111

12-
CAFFE2_DEFINE_string(
12+
C10_DEFINE_string(
1313
backend,
1414
"builtin",
1515
"The backend to use when running the model. The allowed "
1616
"backend choices are: builtin, default, nnpack, eigen, mkl, cuda");
1717

18-
CAFFE2_DEFINE_string(
19-
init_net,
20-
"",
21-
"The given net to initialize any parameters.");
22-
CAFFE2_DEFINE_string(
18+
C10_DEFINE_string(init_net, "", "The given net to initialize any parameters.");
19+
C10_DEFINE_string(
2320
input,
2421
"",
2522
"Input that is needed for running the network. If "
2623
"multiple input needed, use comma separated string.");
27-
CAFFE2_DEFINE_string(
24+
C10_DEFINE_string(
2825
input_dims,
2926
"",
3027
"Alternate to input_files, if all inputs are simple "
3128
"float TensorCPUs, specify the dimension using comma "
3229
"separated numbers. If multiple input needed, use "
3330
"semicolon to separate the dimension of different "
3431
"tensors.");
35-
CAFFE2_DEFINE_string(
32+
C10_DEFINE_string(
3633
input_file,
3734
"",
3835
"Input file that contain the serialized protobuf for "
3936
"the input blobs. If multiple input needed, use comma "
4037
"separated string. Must have the same number of items "
4138
"as input does.");
42-
CAFFE2_DEFINE_string(
39+
C10_DEFINE_string(
4340
input_type,
4441
"float",
4542
"Input type when specifying the input dimension."
4643
"The supported types are float, uint8_t.");
47-
CAFFE2_DEFINE_int(iter, 10, "The number of iterations to run.");
48-
CAFFE2_DEFINE_string(net, "", "The given net to benchmark.");
49-
CAFFE2_DEFINE_string(
44+
C10_DEFINE_int(iter, 10, "The number of iterations to run.");
45+
C10_DEFINE_string(net, "", "The given net to benchmark.");
46+
C10_DEFINE_string(
5047
output,
5148
"",
5249
"Output that should be dumped after the execution "
5350
"finishes. If multiple outputs are needed, use comma "
5451
"separated string. If you want to dump everything, pass "
5552
"'*' as the output value.");
56-
CAFFE2_DEFINE_string(
53+
C10_DEFINE_string(
5754
output_folder,
5855
"",
5956
"The folder that the output should be written to. This "
6057
"folder must already exist in the file system.");
61-
CAFFE2_DEFINE_bool(
58+
C10_DEFINE_bool(
6259
run_individual,
6360
false,
6461
"Whether to benchmark individual operators.");
65-
CAFFE2_DEFINE_int(
62+
C10_DEFINE_int(
6663
sleep_before_run,
6764
0,
6865
"The seconds to sleep before starting the benchmarking.");
69-
CAFFE2_DEFINE_bool(
66+
C10_DEFINE_bool(
7067
text_output,
7168
false,
7269
"Whether to write out output in text format for regression purpose.");
73-
CAFFE2_DEFINE_int(warmup, 0, "The number of iterations to warm up.");
74-
CAFFE2_DEFINE_bool(
70+
C10_DEFINE_int(warmup, 0, "The number of iterations to warm up.");
71+
C10_DEFINE_bool(
7572
wipe_cache,
7673
false,
7774
"Whether to evict the cache before running network.");
@@ -81,19 +78,19 @@ int main(int argc, char** argv) {
8178
benchmark(
8279
argc,
8380
argv,
84-
caffe2::FLAGS_backend,
85-
caffe2::FLAGS_init_net,
86-
caffe2::FLAGS_input,
87-
caffe2::FLAGS_input_dims,
88-
caffe2::FLAGS_input_file,
89-
caffe2::FLAGS_input_type,
90-
caffe2::FLAGS_iter,
91-
caffe2::FLAGS_net,
92-
caffe2::FLAGS_output,
93-
caffe2::FLAGS_output_folder,
94-
caffe2::FLAGS_run_individual,
95-
caffe2::FLAGS_sleep_before_run,
96-
caffe2::FLAGS_text_output,
97-
caffe2::FLAGS_warmup,
98-
caffe2::FLAGS_wipe_cache);
81+
c10::FLAGS_backend,
82+
c10::FLAGS_init_net,
83+
c10::FLAGS_input,
84+
c10::FLAGS_input_dims,
85+
c10::FLAGS_input_file,
86+
c10::FLAGS_input_type,
87+
c10::FLAGS_iter,
88+
c10::FLAGS_net,
89+
c10::FLAGS_output,
90+
c10::FLAGS_output_folder,
91+
c10::FLAGS_run_individual,
92+
c10::FLAGS_sleep_before_run,
93+
c10::FLAGS_text_output,
94+
c10::FLAGS_warmup,
95+
c10::FLAGS_wipe_cache);
9996
}

binaries/convert_caffe_image_db.cc

+8-8
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,11 @@
2020
#include "caffe2/proto/caffe2_legacy.pb.h"
2121
#include "caffe2/core/logging.h"
2222

23-
CAFFE2_DEFINE_string(input_db, "", "The input db.");
24-
CAFFE2_DEFINE_string(input_db_type, "", "The input db type.");
25-
CAFFE2_DEFINE_string(output_db, "", "The output db.");
26-
CAFFE2_DEFINE_string(output_db_type, "", "The output db type.");
27-
CAFFE2_DEFINE_int(batch_size, 1000, "The write batch size.");
23+
C10_DEFINE_string(input_db, "", "The input db.");
24+
C10_DEFINE_string(input_db_type, "", "The input db type.");
25+
C10_DEFINE_string(output_db, "", "The output db.");
26+
C10_DEFINE_string(output_db_type, "", "The output db type.");
27+
C10_DEFINE_int(batch_size, 1000, "The write batch size.");
2828

2929
using caffe2::db::Cursor;
3030
using caffe2::db::DB;
@@ -37,9 +37,9 @@ int main(int argc, char** argv) {
3737
caffe2::GlobalInit(&argc, &argv);
3838

3939
std::unique_ptr<DB> in_db(caffe2::db::CreateDB(
40-
caffe2::FLAGS_input_db_type, caffe2::FLAGS_input_db, caffe2::db::READ));
40+
c10::FLAGS_input_db_type, c10::FLAGS_input_db, caffe2::db::READ));
4141
std::unique_ptr<DB> out_db(caffe2::db::CreateDB(
42-
caffe2::FLAGS_output_db_type, caffe2::FLAGS_output_db, caffe2::db::NEW));
42+
c10::FLAGS_output_db_type, c10::FLAGS_output_db, caffe2::db::NEW));
4343
std::unique_ptr<Cursor> cursor(in_db->NewCursor());
4444
std::unique_ptr<Transaction> transaction(out_db->NewTransaction());
4545
int count = 0;
@@ -80,7 +80,7 @@ int main(int argc, char** argv) {
8080
data->set_byte_data(buffer, datum.data().size());
8181
}
8282
transaction->Put(cursor->key(), protos.SerializeAsString());
83-
if (++count % caffe2::FLAGS_batch_size == 0) {
83+
if (++count % c10::FLAGS_batch_size == 0) {
8484
transaction->Commit();
8585
LOG(INFO) << "Converted " << count << " items so far.";
8686
}

binaries/convert_db.cc

+8-8
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,11 @@
1919
#include "caffe2/proto/caffe2_pb.h"
2020
#include "caffe2/core/logging.h"
2121

22-
CAFFE2_DEFINE_string(input_db, "", "The input db.");
23-
CAFFE2_DEFINE_string(input_db_type, "", "The input db type.");
24-
CAFFE2_DEFINE_string(output_db, "", "The output db.");
25-
CAFFE2_DEFINE_string(output_db_type, "", "The output db type.");
26-
CAFFE2_DEFINE_int(batch_size, 1000, "The write batch size.");
22+
C10_DEFINE_string(input_db, "", "The input db.");
23+
C10_DEFINE_string(input_db_type, "", "The input db type.");
24+
C10_DEFINE_string(output_db, "", "The output db.");
25+
C10_DEFINE_string(output_db_type, "", "The output db type.");
26+
C10_DEFINE_int(batch_size, 1000, "The write batch size.");
2727

2828
using caffe2::db::Cursor;
2929
using caffe2::db::DB;
@@ -33,15 +33,15 @@ int main(int argc, char** argv) {
3333
caffe2::GlobalInit(&argc, &argv);
3434

3535
std::unique_ptr<DB> in_db(caffe2::db::CreateDB(
36-
caffe2::FLAGS_input_db_type, caffe2::FLAGS_input_db, caffe2::db::READ));
36+
c10::FLAGS_input_db_type, c10::FLAGS_input_db, caffe2::db::READ));
3737
std::unique_ptr<DB> out_db(caffe2::db::CreateDB(
38-
caffe2::FLAGS_output_db_type, caffe2::FLAGS_output_db, caffe2::db::NEW));
38+
c10::FLAGS_output_db_type, c10::FLAGS_output_db, caffe2::db::NEW));
3939
std::unique_ptr<Cursor> cursor(in_db->NewCursor());
4040
std::unique_ptr<Transaction> transaction(out_db->NewTransaction());
4141
int count = 0;
4242
for (; cursor->Valid(); cursor->Next()) {
4343
transaction->Put(cursor->key(), cursor->value());
44-
if (++count % caffe2::FLAGS_batch_size == 0) {
44+
if (++count % c10::FLAGS_batch_size == 0) {
4545
transaction->Commit();
4646
LOG(INFO) << "Converted " << count << " items so far.";
4747
}

binaries/convert_encoded_to_raw_leveldb.cc

+27-24
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,9 @@
1616

1717
// This script converts an image dataset to leveldb.
1818
//
19-
// caffe2::FLAGS_input_folder is the root folder that holds all the images, and
20-
// caffe2::FLAGS_list_file should be a list of files as well as their labels, in the
21-
// format as
19+
// c10::FLAGS_input_folder is the root folder that holds all the images, and
20+
// c10::FLAGS_list_file should be a list of files as well as their labels, in
21+
// the format as
2222
// subfolder1/file1.JPEG 7
2323
// ....
2424

@@ -35,14 +35,15 @@
3535
#include "leveldb/db.h"
3636
#include "leveldb/write_batch.h"
3737

38-
CAFFE2_DEFINE_string(input_db_name, "", "The input image file name.");
39-
CAFFE2_DEFINE_string(output_db_name, "", "The output training leveldb name.");
40-
CAFFE2_DEFINE_bool(color, true, "If set, load images in color.");
41-
CAFFE2_DEFINE_int(scale, 256,
42-
"If caffe2::FLAGS_raw is set, scale all the images' shorter edge to the given "
38+
C10_DEFINE_string(input_db_name, "", "The input image file name.");
39+
C10_DEFINE_string(output_db_name, "", "The output training leveldb name.");
40+
C10_DEFINE_bool(color, true, "If set, load images in color.");
41+
C10_DEFINE_int(
42+
scale,
43+
256,
44+
"If c10::FLAGS_raw is set, scale all the images' shorter edge to the given "
4345
"value.");
44-
CAFFE2_DEFINE_bool(warp, false, "If warp is set, warp the images to square.");
45-
46+
C10_DEFINE_bool(warp, false, "If warp is set, warp the images to square.");
4647

4748
namespace caffe2 {
4849

@@ -92,7 +93,7 @@ void ConvertToRawDataset(
9293
data->set_data_type(TensorProto::BYTE);
9394
data->add_dims(0);
9495
data->add_dims(0);
95-
if (caffe2::FLAGS_color) {
96+
if (c10::FLAGS_color) {
9697
data->add_dims(3);
9798
}
9899
string value;
@@ -107,28 +108,30 @@ void ConvertToRawDataset(
107108
const string& encoded_image = input_protos.protos(0).string_data(0);
108109
int encoded_size = encoded_image.size();
109110
cv::Mat img = cv::imdecode(
110-
cv::Mat(1, &encoded_size, CV_8UC1,
111-
const_cast<char*>(encoded_image.data())),
112-
caffe2::FLAGS_color ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE);
111+
cv::Mat(
112+
1, &encoded_size, CV_8UC1, const_cast<char*>(encoded_image.data())),
113+
c10::FLAGS_color ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE);
113114
cv::Mat resized_img;
114115
int scaled_width, scaled_height;
115-
if (caffe2::FLAGS_warp) {
116-
scaled_width = caffe2::FLAGS_scale;
117-
scaled_height = caffe2::FLAGS_scale;
116+
if (c10::FLAGS_warp) {
117+
scaled_width = c10::FLAGS_scale;
118+
scaled_height = c10::FLAGS_scale;
118119
} else if (img.rows > img.cols) {
119-
scaled_width = caffe2::FLAGS_scale;
120-
scaled_height = static_cast<float>(img.rows) * caffe2::FLAGS_scale / img.cols;
120+
scaled_width = c10::FLAGS_scale;
121+
scaled_height =
122+
static_cast<float>(img.rows) * c10::FLAGS_scale / img.cols;
121123
} else {
122-
scaled_height = caffe2::FLAGS_scale;
123-
scaled_width = static_cast<float>(img.cols) * caffe2::FLAGS_scale / img.rows;
124+
scaled_height = c10::FLAGS_scale;
125+
scaled_width = static_cast<float>(img.cols) * c10::FLAGS_scale / img.rows;
124126
}
125127
cv::resize(img, resized_img, cv::Size(scaled_width, scaled_height), 0, 0,
126128
cv::INTER_LINEAR);
127129
data->set_dims(0, scaled_height);
128130
data->set_dims(1, scaled_width);
129131
DCHECK(resized_img.isContinuous());
130-
data->set_byte_data(resized_img.ptr(),
131-
scaled_height * scaled_width * (caffe2::FLAGS_color ? 3 : 1));
132+
data->set_byte_data(
133+
resized_img.ptr(),
134+
scaled_height * scaled_width * (c10::FLAGS_color ? 3 : 1));
132135
output_protos.SerializeToString(&value);
133136
// Put in db
134137
batch->Put(iter->key(), value);
@@ -151,6 +154,6 @@ void ConvertToRawDataset(
151154
int main(int argc, char** argv) {
152155
caffe2::GlobalInit(&argc, &argv);
153156
caffe2::ConvertToRawDataset(
154-
caffe2::FLAGS_input_db_name, caffe2::FLAGS_output_db_name);
157+
c10::FLAGS_input_db_name, c10::FLAGS_output_db_name);
155158
return 0;
156159
}

0 commit comments

Comments
 (0)