Skip to content

Commit a3f8c28

Browse files
zufangzhuDboyqiao
authored andcommitted
Update tensorflow-serving docker (#2509)
1 parent 552e9cd commit a3f8c28

File tree

8 files changed

+101
-31
lines changed

8 files changed

+101
-31
lines changed

docker/tensorflow-serving/README.md

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,14 +10,16 @@ To build the docker container, enter into [docker/tensorflow-serving](./) folder
1010

1111
### I. Binaries Preparation
1212

13-
Refer to [Install for Tensorflow Serving](../../docs/guide/tensorflow_serving.md) to build the TensorFlow Serving binary, and refer to [Install for CPP](../../docs/install/install_for_cpp.md) to build the Intel® Extension for TensorFlow* CC library from source. Then package and copy these binaries into the `./models/binaries` directory, as shown below.
13+
Refer to [Install for Tensorflow Serving](../../docs/guide/tf_serving_install.md) to build the TensorFlow Serving binary, and refer to [Install for CPP](../../docs/install/install_for_cpp.md) to build the Intel® Extension for TensorFlow* CC library from source. Then package and copy these binaries into the `./models/binaries` directory, as shown below.
1414

1515
```bash
1616
mkdir -p ./models/binaries
1717

1818
# Package and copy Intel® Extension for TensorFlow* CC library
1919
mkdir -p itex-bazel-bin/
2020
cp -r <path_to_itex>/bazel-out/k8-opt-ST-*/bin/ itex-bazel-bin/
21+
# if you build with threadpool
22+
cp -r <path_to_itex>/bazel-out/k8-opt-ST-*/bin/ itex-bazel-bin/bin_threadpool/
2123
tar cvfh itex-bazel-bin.tar itex-bazel-bin/
2224
cp itex-bazel-bin.tar ./models/binaries/
2325

@@ -61,3 +63,4 @@ docker run -v <your-local-dir>:/workspace \
6163
-it \
6264
$IMAGE_NAME
6365
```
66+
NOTE: If you want to run docker with threadpool, you should add `-e ITEX_OMP_THREADPOOL=0`

docker/tensorflow-serving/build.sh

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -22,12 +22,12 @@ IMAGE_NAME=intel-extension-for-tensorflow:serving-$IMAGE_TYPE
2222
if [ $IMAGE_TYPE == "gpu" ]
2323
then
2424
docker build --no-cache --build-arg UBUNTU_VERSION=22.04 \
25-
--build-arg ICD_VER=23.17.26241.33-647~22.04 \
26-
--build-arg LEVEL_ZERO_GPU_VER=1.3.26241.33-647~22.04 \
27-
--build-arg LEVEL_ZERO_VER=1.11.0-647~22.04 \
28-
--build-arg LEVEL_ZERO_DEV_VER=1.11.0-647~22.04 \
29-
--build-arg DPCPP_VER=2023.2.0-49495 \
30-
--build-arg MKL_VER=2023.2.0-49495 \
25+
--build-arg ICD_VER=23.30.26918.50-736~22.04 \
26+
--build-arg LEVEL_ZERO_GPU_VER=1.3.26918.50-736~22.04 \
27+
--build-arg LEVEL_ZERO_VER=1.13.1-719~22.04 \
28+
--build-arg LEVEL_ZERO_DEV_VER=1.13.1-719~22.04 \
29+
--build-arg DPCPP_VER=2024.0.0-49819 \
30+
--build-arg MKL_VER=2024.0.0-49656 \
3131
--build-arg TF_SERVING_BINARY=tensorflow_model_server \
3232
--build-arg TF_PLUGIN_TAR=itex-bazel-bin.tar \
3333
-t $IMAGE_NAME \

docker/tensorflow-serving/itex-serving-cpu.Dockerfile

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,10 +57,16 @@ RUN mkdir -p ${MODEL_BASE_PATH}
5757
ENV MODEL_NAME=my_model
5858
RUN mkdir -p ${MODEL_BASE_PATH}/${MODEL_NAME}
5959

60+
ENV ITEX_OMP_THREADPOOL=1
6061
RUN echo '#!/bin/bash \n\n\
62+
if [ ${ITEX_OMP_THREADPOOL} == 1 ]; then \n\
63+
DIR=/itex/itex-bazel-bin/bin/itex \n\
64+
else \n\
65+
DIR=/itex/itex-bazel-bin/bin_threadpool/itex \n\
66+
fi \n\
6167
/usr/local/bin/tensorflow_model_server --port=8500 --rest_api_port=8501 \
6268
--model_name=${MODEL_NAME} --model_base_path=${MODEL_BASE_PATH}/${MODEL_NAME} \
63-
--tensorflow_plugins=/itex/itex-bazel-bin/bin/itex \
69+
--tensorflow_plugins=${DIR} \
6470
"$@"' > /usr/bin/tf_serving_entrypoint.sh \
6571
&& chmod +x /usr/bin/tf_serving_entrypoint.sh
6672

docker/tensorflow-serving/itex-serving-gpu.Dockerfile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,9 +37,9 @@ RUN apt-get update && \
3737
apt-get clean && \
3838
rm -rf /var/lib/apt/lists/*
3939

40-
RUN wget -qO - https://repositories.intel.com/graphics/intel-graphics.key | \
40+
RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | \
4141
gpg --dearmor --output /usr/share/keyrings/intel-graphics.gpg
42-
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu jammy max" | \
42+
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy unified" | \
4343
tee /etc/apt/sources.list.d/intel-gpu-jammy.list
4444

4545
ARG ICD_VER

docs/guide/tf_serving_install.md

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ The generated `libitex_cpu_cc.so` or `libitex_gpu_cc.so` binary are found in the
4242
git clone https://github.com/tensorflow/tensorflow
4343
4444
# checkout specific commit id
45-
cd tensorflow
45+
cd tensorflow
4646
git checkout xxxxx
4747
```
4848
- Add `alwayslink=1` for `kernels_experimental` library in local `tensorflow/tensorflow/c/BUILD` file:
@@ -72,15 +72,8 @@ The generated `libitex_cpu_cc.so` or `libitex_gpu_cc.so` binary are found in the
7272
- Patch TensorFlow Serving
7373
```
7474
cd serving
75-
patch -p1 -i ../intel-extension-for-tensorflow/third_party/tf_serving/serving_plugin.patch
76-
```
77-
- Update `serving/WORKSPACE` to use local TensorFlow
78-
Replace L24-L29 with below code to use local TensorFlow: https://github.com/tensorflow/serving/blob/master/WORKSPACE#L24
79-
```
80-
local_repository(
81-
name= "org_tensorflow",
82-
path = "path to local tensorflow source code",
83-
)
75+
git checkout r2.14
76+
git apply ../intel-extension-for-tensorflow/third_party/tf_serving/serving_plugin.patch
8477
```
8578
8679
- Build TensorFlow Serving

docs/install/install_for_cpp.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -181,6 +181,12 @@ For CPU support
181181
$ bazel build -c opt --config=cpu //itex:libitex_cpu_cc.so
182182
```
183183

184+
If you want to build with threadpool, you should add buid options `--define=build_with_threadpool=true` and environment variables `ITEX_OMP_THREADPOOL=0`
185+
186+
```bash
187+
$ bazel build -c opt --config=cpu --define=build_with_threadpool=true //itex:libitex_cpu_cc.so
188+
```
189+
184190
CC library location: `<Path to intel-extension-for-tensorflow>/bazel-bin/itex/libitex_cpu_cc.so`
185191

186192
NOTE: `libitex_cpu_cc.so` is depended on `libiomp5.so`, so `libiomp5.so` shoule be copied to the same diretcory of `libitex_cpu_cc.so`

docs/install/install_for_xpu.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -52,11 +52,11 @@ To use Intel® Optimization for Horovod* with the Intel® oneAPI Collective Comm
5252
```
5353
$ docker pull intel/intel-extension-for-tensorflow:xpu
5454
$ docker run -it -p 8888:8888 --device /dev/dri -v /dev/dri/by-path:/dev/dri/by-path --ipc=host intel/intel-extension-for-tensorflow:xpu
55-
$ export LD_LIBRARY_PATH=/opt/intel/oneapi/lib:/opt/intel/oneapi/lib/intel64:/opt/intel/oneapi/lib/intel64/libfabric:$LD_LIBRARY_PATH
56-
$ export PATH=/opt/intel/oneapi/lib/intel64/bin:$PATH
57-
$ export I_MPI_ROOT=/opt/intel/oneapi/lib/intel64/
58-
$ export CCL_ROOT=/opt/intel/oneapi/lib/intel64/
59-
$ export FI_PROVIDER_PATH=/opt/intel/oneapi/lib/intel64/libfabric/
55+
$ export LD_LIBRARY_PATH=/opt/intel/oneapi/redist/opt/mpi/libfabric/lib:$LD_LIBRARY_PATH
56+
$ export PATH=/opt/intel/oneapi/redist/bin:$PATH
57+
$ export I_MPI_ROOT=/opt/intel/oneapi/redist/lib
58+
$ export CCL_ROOT=/opt/intel/oneapi/redist
59+
$ export FI_PROVIDER_PATH=/opt/intel/oneapi/redist/opt/mpi/libfabric/lib/prov
6060
```
6161

6262
Then go to your browser on http://localhost:8888/

third_party/tf_serving/serving_plugin.patch

Lines changed: 68 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -28,17 +28,19 @@ index 7a017679..270d594e 100644
2828
+ visibility = ["//visibility:public"],
2929
+)
3030
diff --git a/tensorflow_serving/model_servers/BUILD b/tensorflow_serving/model_servers/BUILD
31-
index 2809e2af..3045c42f 100644
31+
index 616d887a..0387aaee 100644
3232
--- a/tensorflow_serving/model_servers/BUILD
3333
+++ b/tensorflow_serving/model_servers/BUILD
34-
@@ -1,5 +1,6 @@
35-
# Description: Model Server
34+
@@ -2,6 +2,8 @@
3635

36+
# Placeholder: load py_test
37+
# Placeholder: load py_binary
38+
+
3739
+load("//tensorflow_serving:serving.bzl", "if_with_plugins_support")
3840
load("//tensorflow_serving:tensorflow_version.bzl", "if_not_v2", "if_v2")
3941
load("@rules_pkg//:pkg.bzl", "pkg_deb", "pkg_tar")
4042
load("@org_tensorflow//tensorflow:tensorflow.bzl", "if_google", "if_libtpu", "if_with_tpu_support")
41-
@@ -417,7 +418,11 @@ cc_library(
43+
@@ -421,7 +423,11 @@ cc_library(
4244
"@org_tensorflow//tensorflow/core:protos_all_cc",
4345
"@org_tensorflow//tensorflow/core:tensorflow",
4446
"@org_tensorflow//tensorflow/core/profiler/rpc:profiler_service_impl",
@@ -51,15 +53,15 @@ index 2809e2af..3045c42f 100644
5153
)
5254

5355
cc_library(
54-
@@ -435,7 +440,6 @@ cc_library(
56+
@@ -439,7 +445,6 @@ cc_library(
5557
],
5658
deps = [
5759
":server_lib",
5860
- "@org_tensorflow//tensorflow/c:c_api",
5961
"@org_tensorflow//tensorflow/compiler/jit:xla_cpu_jit",
6062
"@org_tensorflow//tensorflow/core:lib",
6163
"@org_tensorflow//tensorflow/core/platform/cloud:gcs_file_system",
62-
@@ -452,6 +456,14 @@ cc_library(
64+
@@ -456,6 +461,14 @@ cc_library(
6365

6466
cc_binary(
6567
name = "tensorflow_model_server",
@@ -151,6 +153,66 @@ index 03467d6a..26dfdb99 100644
151153

152154
Options();
153155
};
156+
diff --git a/tensorflow_serving/model_servers/test_util/BUILD b/tensorflow_serving/model_servers/test_util/BUILD
157+
index dcc97948..95d2ac7f 100644
158+
--- a/tensorflow_serving/model_servers/test_util/BUILD
159+
+++ b/tensorflow_serving/model_servers/test_util/BUILD
160+
@@ -31,6 +31,7 @@ cc_library(
161+
"//visibility:public",
162+
],
163+
deps = [
164+
+ "//tensorflow_serving/apis:logging_cc_proto",
165+
"//tensorflow_serving/apis:model_cc_proto",
166+
"//tensorflow_serving/config:model_server_config_cc_proto",
167+
"//tensorflow_serving/config:platform_config_cc_proto",
168+
diff --git a/tensorflow_serving/model_servers/test_util/mock_server_core.h b/tensorflow_serving/model_servers/test_util/mock_server_core.h
169+
index ecde432a..64675eee 100644
170+
--- a/tensorflow_serving/model_servers/test_util/mock_server_core.h
171+
+++ b/tensorflow_serving/model_servers/test_util/mock_server_core.h
172+
@@ -19,6 +19,7 @@ limitations under the License.
173+
174+
#include <memory>
175+
#include <string>
176+
+#include <utility>
177+
178+
#include "base/logging.h"
179+
#include "google/protobuf/any.pb.h"
180+
@@ -56,7 +57,9 @@ class MockServerCore : public ServerCore {
181+
return platform_config_map;
182+
}
183+
184+
- static Options GetOptions(const PlatformConfigMap& platform_config_map) {
185+
+ static Options GetOptions(
186+
+ const PlatformConfigMap& platform_config_map,
187+
+ std::unique_ptr<ServerRequestLogger> server_request_logger) {
188+
Options options;
189+
options.platform_config_map = platform_config_map;
190+
options.servable_state_monitor_creator =
191+
@@ -71,13 +74,21 @@ class MockServerCore : public ServerCore {
192+
UniquePtrWithDeps<AspiredVersionsManager>* manager) -> Status {
193+
return Status();
194+
};
195+
- TF_CHECK_OK(
196+
- ServerRequestLogger::Create(nullptr, &options.server_request_logger));
197+
+ if (server_request_logger != nullptr) {
198+
+ options.server_request_logger = std::move(server_request_logger);
199+
+ } else {
200+
+ TF_CHECK_OK(
201+
+ ServerRequestLogger::Create(nullptr, &options.server_request_logger));
202+
+ }
203+
return options;
204+
}
205+
206+
explicit MockServerCore(const PlatformConfigMap& platform_config_map)
207+
- : ServerCore(GetOptions(platform_config_map)) {}
208+
+ : MockServerCore(platform_config_map, nullptr) {}
209+
+ MockServerCore(const PlatformConfigMap& platform_config_map,
210+
+ std::unique_ptr<ServerRequestLogger> server_request_logger)
211+
+ : ServerCore(GetOptions(platform_config_map,
212+
+ std::move(server_request_logger))) {}
213+
214+
MOCK_METHOD(ServableStateMonitor*, servable_state_monitor, (),
215+
(const, override));
154216
diff --git a/tensorflow_serving/model_servers/tf_c_api_exported_symbols.lds b/tensorflow_serving/model_servers/tf_c_api_exported_symbols.lds
155217
new file mode 100644
156218
index 00000000..b5e82a09

0 commit comments

Comments
 (0)