Skip to content

Commit 947b253

Browse files
alekstheodtensorflower-gardener
authored andcommitted
PR #33414: [ROCm] Add missing keepalive timeout for rbe builds in rocm
Imported from GitHub PR openxla/xla#33414 📝 Summary of Changes Set keepalive time to prevent connection drop from bazel client during rbe builds 🎯 Justification Set keepalive time to prevent connection drop from bazel client during rbe builds 🚀 Kind of Contribution Please remove what does not apply: 🐛 Bug Fix 📊 Benchmark (for Performance Improvements) Not relevant 🧪 Unit Tests: Not relevant 🧪 Execution Tests: Not relevant Copybara import of the project: -- 76a26b318dac967b17a64abfbbbb00ca26980b5a by Alexandros Theodoridis <[email protected]>: Add missing keepalive timeout for rbe builds in rocm Merging this change closes #33414 FUTURE_COPYBARA_INTEGRATE_REVIEW=openxla/xla#33414 from ROCm:ci_add_missing_keepalive_timeout_for_rbe_builds 76a26b318dac967b17a64abfbbbb00ca26980b5a PiperOrigin-RevId: 827818530
1 parent ee6b915 commit 947b253

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

73 files changed

+409
-399
lines changed

tensorflow/compiler/tf2xla/kernels/all_reduce_op.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -94,9 +94,9 @@ class CollectiveReduceV2Op : public XlaOpKernel {
9494

9595
private:
9696
DataType dtype_ = DT_INVALID;
97-
string merge_op_name_;
98-
string final_op_name_;
99-
string communication_hint_;
97+
std::string merge_op_name_;
98+
std::string final_op_name_;
99+
std::string communication_hint_;
100100

101101
CollectiveReduceV2Op(const CollectiveReduceV2Op&) = delete;
102102
void operator=(const CollectiveReduceV2Op&) = delete;

tensorflow/compiler/tf2xla/kernels/batch_norm_op.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ class FusedBatchNormOp : public XlaOpKernel {
4848
OP_REQUIRES_OK(ctx, ctx->GetAttr("is_training", &is_training_));
4949
OP_REQUIRES_OK(
5050
ctx, ctx->GetAttr("exponential_avg_factor", &exponential_avg_factor_));
51-
string data_format_str;
51+
std::string data_format_str;
5252
OP_REQUIRES_OK(ctx, ctx->GetAttr("data_format", &data_format_str));
5353
OP_REQUIRES(
5454
ctx, FormatFromString(data_format_str, &data_format_),
@@ -61,7 +61,7 @@ class FusedBatchNormOp : public XlaOpKernel {
6161
errors::InvalidArgument(
6262
"FusedBatchNormEx supports at most 1 side input."));
6363
add_side_input_ = (num_side_inputs == 1);
64-
string activation_mode;
64+
std::string activation_mode;
6565
OP_REQUIRES_OK(ctx, ctx->GetAttr("activation_mode", &activation_mode));
6666
OP_REQUIRES(ctx,
6767
activation_mode == "Identity" || activation_mode == "Relu",
@@ -249,7 +249,7 @@ class FusedBatchNormGradOp : public XlaOpKernel {
249249
explicit FusedBatchNormGradOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
250250
OP_REQUIRES_OK(ctx, ctx->GetAttr("epsilon", &epsilon_));
251251
OP_REQUIRES_OK(ctx, ctx->GetAttr("is_training", &is_training_));
252-
string data_format_str;
252+
std::string data_format_str;
253253
OP_REQUIRES_OK(ctx, ctx->GetAttr("data_format", &data_format_str));
254254
OP_REQUIRES(
255255
ctx, FormatFromString(data_format_str, &data_format_),

tensorflow/compiler/tf2xla/kernels/bcast_ops.cc

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -66,9 +66,11 @@ class BCastArgsOp : public XlaOpKernel {
6666
Tensor output(val_type, TensorShape({len}));
6767
for (int64_t i = 0; i < len; ++i) {
6868
if (val_type == DT_INT32) {
69-
output.flat<int32>()(i) = static_cast<int32>(bcast.output_shape()[i]);
69+
output.flat<int32_t>()(i) =
70+
static_cast<int32_t>(bcast.output_shape()[i]);
7071
} else {
71-
output.flat<int64>()(i) = static_cast<int64>(bcast.output_shape()[i]);
72+
output.flat<int64_t>()(i) =
73+
static_cast<int64_t>(bcast.output_shape()[i]);
7274
}
7375
}
7476
ctx->SetConstantOutput(0, output);
@@ -129,9 +131,9 @@ class BCastGradArgsOp : public XlaOpKernel {
129131
Tensor constant(val_type, TensorShape({len}));
130132
for (int64_t i = 0; i < len; ++i) {
131133
if (val_type == DT_INT32) {
132-
constant.flat<int32>()(i) = static_cast<int32>(v[i]);
134+
constant.flat<int32_t>()(i) = static_cast<int32_t>(v[i]);
133135
} else {
134-
constant.flat<int64>()(i) = static_cast<int64>(v[i]);
136+
constant.flat<int64_t>()(i) = static_cast<int64_t>(v[i]);
135137
}
136138
}
137139
ctx->SetConstantOutput(idx, constant);

tensorflow/compiler/tf2xla/kernels/bias_ops.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ namespace {
2828
class BiasOp : public XlaOpKernel {
2929
public:
3030
explicit BiasOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
31-
string data_format;
31+
std::string data_format;
3232
if (ctx->GetAttr("data_format", &data_format).ok()) {
3333
OP_REQUIRES(ctx, FormatFromString(data_format, &data_format_),
3434
errors::InvalidArgument("Invalid data format"));

tensorflow/compiler/tf2xla/kernels/bucketize_op.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ class BucketizeOp : public XlaOpKernel {
5555
/*broadcast_dimensions=*/{0}),
5656
xla::S32);
5757
xla::XlaOp buckets = xla::Reduce(
58-
comparison, /*init_value=*/xla::ConstantR0<int32>(builder, 0),
58+
comparison, /*init_value=*/xla::ConstantR0<int32_t>(builder, 0),
5959
/*computation=*/xla::CreateScalarAddComputation(xla::S32, builder),
6060
/*dimensions_to_reduce=*/{0});
6161
context->SetOutput(0, buckets);

tensorflow/compiler/tf2xla/kernels/case_op.cc

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ XlaCaseOp::GetPrunedBranchesAndIndex(XlaOpKernelContext* ctx) {
6666
return {unpruned_branches_, ctx->Input(0)};
6767
}
6868

69-
int32_t branch_index = branch_index_literal.Get<int32>({});
69+
int32_t branch_index = branch_index_literal.Get<int32_t>({});
7070
if (branch_index < 0 || branch_index >= unpruned_branches_.size()) {
7171
branch_index = unpruned_branches_.size() - 1;
7272
}
@@ -187,7 +187,8 @@ void XlaCaseOp::Compile(XlaOpKernelContext* ctx) {
187187

188188
// Add any TensorArray gradients touched by the then/else computation to
189189
// the enclosing graph.
190-
for (const string& grad_source : update.tensor_array_gradients_accessed) {
190+
for (const std::string& grad_source :
191+
update.tensor_array_gradients_accessed) {
191192
VLOG(5) << "TensorArray " << resource->name() << " accessed gradient "
192193
<< grad_source;
193194
XlaResource* gradient;
@@ -289,7 +290,7 @@ void XlaCaseOp::Compile(XlaOpKernelContext* ctx) {
289290
// Set token input for this "case" op.
290291
std::vector<xla::XlaOp> token_inputs;
291292
token_inputs.reserve(token_input_nodes_.size());
292-
for (const string& node_name : token_input_nodes_) {
293+
for (const std::string& node_name : token_input_nodes_) {
293294
auto token_or = compiler->GetNodeToken(node_name);
294295
OP_REQUIRES_OK(ctx, token_or.status());
295296
token_inputs.push_back(token_or.value());

tensorflow/compiler/tf2xla/kernels/case_op.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,8 +65,8 @@ class XlaCaseOp : public XlaOpKernel {
6565
DataTypeVector input_types_;
6666
DataTypeVector output_types_;
6767
bool has_token_input_output_;
68-
std::vector<string> token_input_nodes_;
69-
string original_node_name_;
68+
std::vector<std::string> token_input_nodes_;
69+
std::string original_node_name_;
7070
// Whether to propagate compile time consts into the cond branches.
7171
// This is not supported by default now since it may cause HBM memory
7272
// overheads.

tensorflow/compiler/tf2xla/kernels/categorical_op.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,7 @@ class StatelessCategoricalOp : public CategoricalOp {
185185

186186
private:
187187
DataType dtype_;
188-
string device_type_string_;
188+
std::string device_type_string_;
189189

190190
StatelessCategoricalOp(const StatelessCategoricalOp&) = delete;
191191
void operator=(const StatelessCategoricalOp&) = delete;

tensorflow/compiler/tf2xla/kernels/const_op.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ template <typename DstT,
3838
std::is_same<DstT, bfloat16>::value>::type* =
3939
nullptr>
4040
DstT CastTo(int32_t src) {
41-
return absl::bit_cast<DstT>(static_cast<uint16>(src));
41+
return absl::bit_cast<DstT>(static_cast<uint16_t>(src));
4242
}
4343

4444
// Returns scalar constant with the value in the tensor, if the given proto has

tensorflow/compiler/tf2xla/kernels/conv_op_helpers.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -163,8 +163,8 @@ absl::Status CheckConvAttrs(const ConvOpAttrs& attrs) {
163163
absl::Status ConvBackpropComputeDimensionsV2XlaShapes(
164164
absl::string_view label, int num_spatial_dims,
165165
const xla::Shape& input_shape, const xla::Shape& filter_shape,
166-
const xla::Shape& out_backprop_shape, absl::Span<const int32> dilations,
167-
const std::vector<int32>& strides, Padding padding,
166+
const xla::Shape& out_backprop_shape, absl::Span<const int32_t> dilations,
167+
const std::vector<int32_t>& strides, Padding padding,
168168
TensorFormat data_format, ConvBackpropDimensions* dims,
169169
absl::Span<const int64_t> explicit_paddings) {
170170
TensorShape input_tensor_shape, filter_tensor_shape,
@@ -203,7 +203,7 @@ absl::StatusOr<ConvOpAttrs> ConvOpAttrs::Create(int num_spatial_dims,
203203
ctx->GetAttr("explicit_paddings", &attrs.explicit_paddings));
204204
}
205205

206-
string data_format;
206+
std::string data_format;
207207
TF_RETURN_IF_ERROR(ctx->GetAttr("data_format", &data_format));
208208
if (!FormatFromString(data_format, &attrs.data_format)) {
209209
return errors::InvalidArgument("Invalid data format: ", data_format);
@@ -231,7 +231,7 @@ absl::StatusOr<ConvNDOpAttrs> ConvNDOpAttrs::Create(OpKernelConstruction* ctx) {
231231
ctx->GetAttr("explicit_paddings", &attrs.explicit_paddings));
232232
}
233233

234-
string data_format_str;
234+
std::string data_format_str;
235235
TF_RETURN_IF_ERROR(ctx->GetAttr("data_format", &data_format_str));
236236
if (!(data_format_str == "CHANNELS_LAST" ||
237237
data_format_str == "CHANNELS_FIRST")) {

0 commit comments

Comments
 (0)