Skip to content

Commit 581da43

Browse files
Automated Code Change
PiperOrigin-RevId: 691706707
1 parent 166dbe2 commit 581da43

14 files changed

+56
-53
lines changed

Diff for: tensorflow_serving/servables/tensorflow/multi_inference.cc

+2-2
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ limitations under the License.
3030
namespace tensorflow {
3131
namespace serving {
3232

33-
Status TensorFlowMultiInferenceRunner::Infer(
33+
absl::Status TensorFlowMultiInferenceRunner::Infer(
3434
const RunOptions& run_options, const MultiInferenceRequest& request,
3535
MultiInferenceResponse* response) {
3636
TRACELITERAL("TensorFlowMultiInferenceRunner::Infer");
@@ -126,7 +126,7 @@ Status TensorFlowMultiInferenceRunner::Infer(
126126
return absl::OkStatus();
127127
}
128128

129-
Status RunMultiInference(
129+
absl::Status RunMultiInference(
130130
const RunOptions& run_options, const MetaGraphDef& meta_graph_def,
131131
const absl::optional<int64_t>& servable_version, Session* session,
132132
const MultiInferenceRequest& request, MultiInferenceResponse* response,

Diff for: tensorflow_serving/servables/tensorflow/multi_inference_helper.cc

+2-2
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ const ModelSpec& GetModelSpecFromRequest(const MultiInferenceRequest& request) {
3434

3535
} // namespace
3636

37-
Status RunMultiInferenceWithServerCore(
37+
absl::Status RunMultiInferenceWithServerCore(
3838
const RunOptions& run_options, ServerCore* core,
3939
const tensorflow::thread::ThreadPoolOptions& thread_pool_options,
4040
const MultiInferenceRequest& request, MultiInferenceResponse* response) {
@@ -43,7 +43,7 @@ Status RunMultiInferenceWithServerCore(
4343
request, response);
4444
}
4545

46-
Status RunMultiInferenceWithServerCoreWithModelSpec(
46+
absl::Status RunMultiInferenceWithServerCoreWithModelSpec(
4747
const RunOptions& run_options, ServerCore* core,
4848
const tensorflow::thread::ThreadPoolOptions& thread_pool_options,
4949
const ModelSpec& model_spec, const MultiInferenceRequest& request,

Diff for: tensorflow_serving/servables/tensorflow/multi_inference_helper_test.cc

+4-3
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,8 @@ class MultiInferenceTest : public ::testing::Test {
6161
static void TearDownTestSuite() { server_core_.reset(); }
6262

6363
protected:
64-
static Status CreateServerCore(std::unique_ptr<ServerCore>* server_core) {
64+
static absl::Status CreateServerCore(
65+
std::unique_ptr<ServerCore>* server_core) {
6566
ModelServerConfig config;
6667
auto model_config = config.mutable_model_config_list()->add_config();
6768
model_config->set_name(kTestModelName);
@@ -127,8 +128,8 @@ void PopulateTask(const string& signature_name, const string& method_name,
127128
task->set_method_name(method_name);
128129
}
129130

130-
void ExpectStatusError(const Status& status,
131-
const tensorflow::errors::Code expected_code,
131+
void ExpectStatusError(const absl::Status& status,
132+
const absl::StatusCode expected_code,
132133
const string& message_substring) {
133134
ASSERT_EQ(expected_code, status.code());
134135
EXPECT_THAT(status.message(), ::testing::HasSubstr(message_substring));

Diff for: tensorflow_serving/servables/tensorflow/predict_impl.cc

+9-10
Original file line numberDiff line numberDiff line change
@@ -30,24 +30,23 @@ limitations under the License.
3030
namespace tensorflow {
3131
namespace serving {
3232

33-
Status TensorflowPredictor::Predict(const RunOptions& run_options,
34-
ServerCore* core,
35-
const PredictRequest& request,
36-
PredictResponse* response) {
33+
absl::Status TensorflowPredictor::Predict(const RunOptions& run_options,
34+
ServerCore* core,
35+
const PredictRequest& request,
36+
PredictResponse* response) {
3737
if (!request.has_model_spec()) {
38-
return tensorflow::Status(
38+
return absl::Status(
3939
static_cast<absl::StatusCode>(absl::StatusCode::kInvalidArgument),
4040
"Missing ModelSpec");
4141
}
4242
return PredictWithModelSpec(run_options, core, request.model_spec(), request,
4343
response);
4444
}
4545

46-
Status TensorflowPredictor::PredictWithModelSpec(const RunOptions& run_options,
47-
ServerCore* core,
48-
const ModelSpec& model_spec,
49-
const PredictRequest& request,
50-
PredictResponse* response) {
46+
absl::Status TensorflowPredictor::PredictWithModelSpec(
47+
const RunOptions& run_options, ServerCore* core,
48+
const ModelSpec& model_spec, const PredictRequest& request,
49+
PredictResponse* response) {
5150
ServableHandle<SavedModelBundle> bundle;
5251
TF_RETURN_IF_ERROR(core->GetServableHandle(model_spec, &bundle));
5352
return internal::RunPredict(

Diff for: tensorflow_serving/servables/tensorflow/predict_impl_test.cc

+2-2
Original file line numberDiff line numberDiff line change
@@ -63,8 +63,8 @@ class PredictImplTest : public ::testing::Test {
6363
}
6464

6565
protected:
66-
static Status CreateServerCore(const string& model_path,
67-
std::unique_ptr<ServerCore>* server_core) {
66+
static absl::Status CreateServerCore(
67+
const string& model_path, std::unique_ptr<ServerCore>* server_core) {
6868
ModelServerConfig config;
6969
auto model_config = config.mutable_model_config_list()->add_config();
7070
model_config->set_name(kTestModelName);

Diff for: tensorflow_serving/servables/tensorflow/regression_service.cc

+3-3
Original file line numberDiff line numberDiff line change
@@ -25,13 +25,13 @@ limitations under the License.
2525
namespace tensorflow {
2626
namespace serving {
2727

28-
Status TensorflowRegressionServiceImpl::Regress(
28+
absl::Status TensorflowRegressionServiceImpl::Regress(
2929
const RunOptions& run_options, ServerCore* core,
3030
const thread::ThreadPoolOptions& thread_pool_options,
3131
const RegressionRequest& request, RegressionResponse* response) {
3232
// Verify Request Metadata and create a ServableRequest
3333
if (!request.has_model_spec()) {
34-
return tensorflow::Status(
34+
return absl::Status(
3535
static_cast<absl::StatusCode>(absl::StatusCode::kInvalidArgument),
3636
"Missing ModelSpec");
3737
}
@@ -40,7 +40,7 @@ Status TensorflowRegressionServiceImpl::Regress(
4040
request.model_spec(), request, response);
4141
}
4242

43-
Status TensorflowRegressionServiceImpl::RegressWithModelSpec(
43+
absl::Status TensorflowRegressionServiceImpl::RegressWithModelSpec(
4444
const RunOptions& run_options, ServerCore* core,
4545
const thread::ThreadPoolOptions& thread_pool_options,
4646
const ModelSpec& model_spec, const RegressionRequest& request,

Diff for: tensorflow_serving/servables/tensorflow/saved_model_bundle_factory_test.cc

+6-6
Original file line numberDiff line numberDiff line change
@@ -60,10 +60,10 @@ enum class ModelType { kTfModel, kTfLiteModel };
6060
Loader::Metadata CreateMetadata() { return {ServableId{"name", 42}}; }
6161

6262
// Creates a new session based on the config and export path.
63-
Status CreateBundleFromPath(const CreationType creation_type,
64-
const SessionBundleConfig& config,
65-
const string& path,
66-
std::unique_ptr<SavedModelBundle>* bundle) {
63+
absl::Status CreateBundleFromPath(const CreationType creation_type,
64+
const SessionBundleConfig& config,
65+
const string& path,
66+
std::unique_ptr<SavedModelBundle>* bundle) {
6767
std::unique_ptr<SavedModelBundleFactory> factory;
6868
auto config_with_session_hook = config;
6969
config_with_session_hook.set_session_target(
@@ -119,8 +119,8 @@ class SavedModelBundleFactoryTest
119119
virtual ~SavedModelBundleFactoryTest() = default;
120120

121121
protected:
122-
Status CreateSession(const SessionBundleConfig& config,
123-
std::unique_ptr<Session>* session) const override {
122+
absl::Status CreateSession(const SessionBundleConfig& config,
123+
std::unique_ptr<Session>* session) const override {
124124
std::unique_ptr<SavedModelBundle> bundle;
125125
TF_RETURN_IF_ERROR(CreateBundleFromPath(GetParam().creation_type, config,
126126
export_dir_, &bundle));

Diff for: tensorflow_serving/servables/tensorflow/saved_model_bundle_source_adapter.cc

+4-4
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ limitations under the License.
3333
namespace tensorflow {
3434
namespace serving {
3535

36-
Status SavedModelBundleSourceAdapter::Create(
36+
absl::Status SavedModelBundleSourceAdapter::Create(
3737
const SavedModelBundleSourceAdapterConfig& config,
3838
std::unique_ptr<SavedModelBundleSourceAdapter>* adapter) {
3939
std::unique_ptr<SavedModelBundleFactory> bundle_factory;
@@ -86,8 +86,8 @@ SavedModelBundleSourceAdapter::GetServableCreator(
8686
};
8787
}
8888

89-
Status SavedModelBundleSourceAdapter::Convert(const StoragePath& path,
90-
std::unique_ptr<Loader>* loader) {
89+
absl::Status SavedModelBundleSourceAdapter::Convert(
90+
const StoragePath& path, std::unique_ptr<Loader>* loader) {
9191
std::shared_ptr<SavedModelBundleFactory> bundle_factory = bundle_factory_;
9292
auto servable_creator = GetServableCreator(bundle_factory, path);
9393
auto resource_estimator = [bundle_factory,
@@ -125,7 +125,7 @@ Status SavedModelBundleSourceAdapter::Convert(const StoragePath& path,
125125
// Register the source adapter.
126126
class SavedModelBundleSourceAdapterCreator {
127127
public:
128-
static Status Create(
128+
static absl::Status Create(
129129
const SavedModelBundleSourceAdapterConfig& config,
130130
std::unique_ptr<SourceAdapter<StoragePath, std::unique_ptr<Loader>>>*
131131
adapter) {

Diff for: tensorflow_serving/servables/tensorflow/saved_model_config.cc

+2-2
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ limitations under the License.
2727
namespace tensorflow {
2828
namespace serving {
2929

30-
Status LoadSavedModelConfig(
30+
absl::Status LoadSavedModelConfig(
3131
const std::string& export_dir, tensorflow::GraphOptions& graph_options,
3232
tensorflow::tfrt_stub::RuntimeConfig& runtime_config) {
3333
absl::StatusOr<SavedModelConfig> model_config =
@@ -52,7 +52,7 @@ Status LoadSavedModelConfig(
5252
}
5353
}
5454

55-
return Status();
55+
return absl::Status();
5656
}
5757

5858
} // namespace serving

Diff for: tensorflow_serving/servables/tensorflow/saved_model_warmup.cc

+8-6
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,10 @@ namespace serving {
3636

3737
namespace {
3838

39-
Status RunWarmupRequest(const PredictionLog& warmup_record,
40-
const RunOptions& run_options,
41-
const MetaGraphDef& meta_graph_def, Session* session) {
39+
absl::Status RunWarmupRequest(const PredictionLog& warmup_record,
40+
const RunOptions& run_options,
41+
const MetaGraphDef& meta_graph_def,
42+
Session* session) {
4243
switch (warmup_record.log_type_case()) {
4344
case PredictionLog::kRegressLog: {
4445
RegressionResponse response;
@@ -78,9 +79,10 @@ Status RunWarmupRequest(const PredictionLog& warmup_record,
7879

7980
} // namespace
8081

81-
Status RunSavedModelWarmup(const ModelWarmupOptions& model_warmup_options,
82-
const RunOptions& run_options,
83-
const string& export_dir, SavedModelBundle* bundle) {
82+
absl::Status RunSavedModelWarmup(const ModelWarmupOptions& model_warmup_options,
83+
const RunOptions& run_options,
84+
const string& export_dir,
85+
SavedModelBundle* bundle) {
8486
return internal::RunSavedModelWarmup(
8587
model_warmup_options, export_dir, [&](PredictionLog prediction_log) {
8688
return RunWarmupRequest(prediction_log, run_options,

Diff for: tensorflow_serving/servables/tensorflow/saved_model_warmup_test.cc

+4-4
Original file line numberDiff line numberDiff line change
@@ -133,8 +133,8 @@ TEST(SavedModelBundleWarmupTest, UnsupportedLogType_SessionRun) {
133133
saved_model_bundle.session.reset(mock);
134134
EXPECT_CALL(*mock, Run(_, _, _, _, _, _, _))
135135
.WillRepeatedly(Return(absl::OkStatus()));
136-
const Status status = RunSavedModelWarmup(ModelWarmupOptions(), RunOptions(),
137-
base_path, &saved_model_bundle);
136+
const absl::Status status = RunSavedModelWarmup(
137+
ModelWarmupOptions(), RunOptions(), base_path, &saved_model_bundle);
138138
ASSERT_FALSE(status.ok());
139139
EXPECT_EQ(::tensorflow::error::UNIMPLEMENTED, status.code()) << status;
140140
EXPECT_THAT(status.ToString(),
@@ -159,8 +159,8 @@ TEST(SavedModelBundleWarmupTest, UnsupportedLogType_PredictStreamed) {
159159
saved_model_bundle.session.reset(mock);
160160
EXPECT_CALL(*mock, Run(_, _, _, _, _, _, _))
161161
.WillRepeatedly(Return(absl::OkStatus()));
162-
const Status status = RunSavedModelWarmup(ModelWarmupOptions(), RunOptions(),
163-
base_path, &saved_model_bundle);
162+
const absl::Status status = RunSavedModelWarmup(
163+
ModelWarmupOptions(), RunOptions(), base_path, &saved_model_bundle);
164164
ASSERT_FALSE(status.ok());
165165
EXPECT_EQ(::tensorflow::error::UNIMPLEMENTED, status.code()) << status;
166166
EXPECT_THAT(status.ToString(),

Diff for: tensorflow_serving/servables/tensorflow/serving_session.cc

+3-3
Original file line numberDiff line numberDiff line change
@@ -22,15 +22,15 @@ limitations under the License.
2222
namespace tensorflow {
2323
namespace serving {
2424

25-
Status ServingSession::Create(const GraphDef& graph) {
25+
absl::Status ServingSession::Create(const GraphDef& graph) {
2626
return errors::PermissionDenied("State changes denied via ServingSession");
2727
}
2828

29-
Status ServingSession::Extend(const GraphDef& graph) {
29+
absl::Status ServingSession::Extend(const GraphDef& graph) {
3030
return errors::PermissionDenied("State changes denied via ServingSession");
3131
}
3232

33-
Status ServingSession::Close() {
33+
absl::Status ServingSession::Close() {
3434
return errors::PermissionDenied("State changes denied via ServingSession");
3535
}
3636

Diff for: tensorflow_serving/servables/tensorflow/simple_servers.cc

+3-3
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ namespace {
4444
// directories. Upon finding these, it provides the target with the new version
4545
// (a directory). The servable_name param simply allows this source to create
4646
// all AspiredVersions for the target with the same servable_name.
47-
Status CreateStoragePathSource(
47+
absl::Status CreateStoragePathSource(
4848
const string& base_path, const string& servable_name,
4949
std::unique_ptr<Source<StoragePath>>* path_source) {
5050
FileSystemStoragePathSourceConfig config;
@@ -66,7 +66,7 @@ Status CreateStoragePathSource(
6666
// 'CreateSingleTFModelManagerFromBasePath' method, with the
6767
// FileSystemStoragePathSource as the Source and the SavedModelBundleSource as
6868
// the Target.
69-
Status CreateSavedModelBundleSource(
69+
absl::Status CreateSavedModelBundleSource(
7070
std::unique_ptr<SavedModelBundleSourceAdapter>* source) {
7171
SavedModelBundleSourceAdapterConfig config;
7272
TF_RETURN_IF_ERROR(SavedModelBundleSourceAdapter::Create(config, source));
@@ -76,7 +76,7 @@ Status CreateSavedModelBundleSource(
7676

7777
} // namespace
7878

79-
Status CreateSingleTFModelManagerFromBasePath(
79+
absl::Status CreateSingleTFModelManagerFromBasePath(
8080
const string& base_path, std::unique_ptr<Manager>* const manager) {
8181
std::unique_ptr<SavedModelBundleSourceAdapter> bundle_source;
8282
TF_RETURN_IF_ERROR(CreateSavedModelBundleSource(&bundle_source));

Diff for: tensorflow_serving/servables/tensorflow/simple_servers_test.cc

+4-3
Original file line numberDiff line numberDiff line change
@@ -73,16 +73,17 @@ class SimpleServersTest : public ::testing::Test {
7373

7474
TEST_F(SimpleServersTest, Basic) {
7575
std::unique_ptr<Manager> manager;
76-
const Status status = simple_servers::CreateSingleTFModelManagerFromBasePath(
77-
test_data_path_, &manager);
76+
const absl::Status status =
77+
simple_servers::CreateSingleTFModelManagerFromBasePath(test_data_path_,
78+
&manager);
7879
TF_CHECK_OK(status);
7980
// We wait until the manager starts serving the servable.
8081
// TODO(b/25545570): Use the waiter api when it's ready.
8182
while (manager->ListAvailableServableIds().empty()) {
8283
Env::Default()->SleepForMicroseconds(1000);
8384
}
8485
ServableHandle<SavedModelBundle> bundle;
85-
const Status handle_status =
86+
const absl::Status handle_status =
8687
manager->GetServableHandle(ServableRequest::Latest("default"), &bundle);
8788
TF_CHECK_OK(handle_status);
8889
TestSingleRequest(*bundle);

0 commit comments

Comments
 (0)