diff --git a/Classify/classifier.py b/Classify/classifier.py index 82a9d71..396a14e 100644 --- a/Classify/classifier.py +++ b/Classify/classifier.py @@ -4,12 +4,52 @@ import numpy as np import tensorflow as tf from swiftclient.service import Connection +import urllib.request app = flask.Flask(__name__) app.debug = False graph = tf.Graph() labels = [] +modelNeedsToBeLoaded = True +@app.route('/classify', methods=['GET']) +def classify(): + global modelNeedsToBeLoaded + try: + if (modelNeedsToBeLoaded == True): + modelNeedsToBeLoaded = False + init() + + imageUrl = flask.request.args.get('image-url', '') + file_name, headers = urllib.request.urlretrieve(imageUrl) + file_reader = tf.read_file(file_name, "file_reader") + + except Exception as err: + response = flask.jsonify({'error': 'Issue with Object Storage credentials or with image URL'}) + response.status_code = 400 + return response + + image_reader = tf.image.decode_jpeg(file_reader, channels=3, name='jpeg_reader') + float_caster = tf.cast(image_reader, tf.float32) + dims_expander = tf.expand_dims(float_caster, 0) + resized = tf.image.resize_bilinear(dims_expander, [224, 224]) + normalized = tf.divide(tf.subtract(resized, [128]), [128]) + input_operation = graph.get_operation_by_name("import/input") + output_operation = graph.get_operation_by_name("import/final_result") + tf_picture = tf.Session().run(normalized) + + with tf.Session(graph=graph) as sess: + results = np.squeeze(sess.run(output_operation.outputs[0], {input_operation.outputs[0]: tf_picture})) + index = results.argsort() + answer = {} + + for i in index: + answer[labels[i]] = float(results[i]) + + response = flask.jsonify(answer) + response.status_code = 200 + + return response @app.route('/init', methods=['POST']) def init(): diff --git a/Classify/tensorflow-model-classifier.yaml b/Classify/tensorflow-model-classifier.yaml new file mode 100644 index 0000000..cdc3fae --- /dev/null +++ b/Classify/tensorflow-model-classifier.yaml @@ -0,0 +1,30 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: classifier-deployment +spec: + replicas: 1 + template: + metadata: + labels: + app: classifier-server + spec: + containers: + - name: classifier-container + image: nheidloff/tensorflow-kubernetes-classify + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + run: classifier-service + name: classifier-service +spec: + ports: + - port: 8080 + targetPort: 8080 + selector: + app: classifier-server + type: NodePort \ No newline at end of file diff --git a/README_KUBE_CLASSIFY.md b/README_KUBE_CLASSIFY.md new file mode 100644 index 0000000..768f117 --- /dev/null +++ b/README_KUBE_CLASSIFY.md @@ -0,0 +1,39 @@ +# Deploying TensorFlow Models to Kubernetes on the IBM Cloud + +As documented in [Image Recognition with Tensorflow classification on OpenWhisk](https://ansi.23-5.eu/2017/11/image-recognition-tensorflow-classification-openwhisk/) the MobileNet model can be used to classify images in OpenWhisk functions. The same Docker image can also be deployed to Kubernetes on the IBM Cloud. This is useful when models are too big to be deployed to OpenWhisk, for example the Inception model rather than MobileNet. + +Before the Docker image can be built, you need to create an instance of IBM Object Storage on the IBM Cloud. Check out the article [Accessing IBM Object Store from Python](https://ansi.23-5.eu/2017/11/accessing-ibm-object-store-python/) for details. Paste the values of 'region', 'projectId', 'userId' and 'password' in [classifier.py](Classify/classifier.py). + +After this upload the model (retrained_graph.pb and retrained_labels.txt) into your Object Storage instance in a bucket 'tensorflow'. + +In order to build the image, run these commands: + +```sh +$ cd Classify +$ docker build -t $USER/tensorflow-kubernetes-classify:latest . +``` + +In order to test the image, run these commands: + +```sh +$ docker run -d -p 8080:8080 $USER/tensorflow-kubernetes-classify:latest +$ curl http://localhost:8080/classify?image-url=http://heidloff.net/wp-content/uploads/2017/10/codetalks17-6.jpg +``` + +In order to deploy the image to Kubernetes, run the following commands after you've updated your user name in [tensorflow-model-classifier.yaml](Classify/tensorflow-model-classifier.yaml): + +```sh +$ docker push $USER/tensorflow-kubernetes-classify:latest +$ bx plugin install container-service -r Bluemix +$ bx login -a https://api.eu-de.bluemix.net +$ bx cs cluster-config mycluster +$ export KUBECONFIG=/Users/nheidlo..... +$ cd Classify +$ kubectl create -f tensorflow-model-classifier.yaml +$ bx cs workers mycluster +$ kubectl describe service classifier-service +``` + +In order to test the classifier, open the following URL after you've replaced your 'Public IP' and 'NodePort' from the previous two commands: + +http://169.51.19.8:32441/classify?image-url=http://heidloff.net/wp-content/uploads/2017/10/codetalks17-6.jpg \ No newline at end of file