1
+ # tutorial from http://machinelearningmastery.com/tutorial-first-neural-network-python-keras/
2
+ # Model saving is at https://gist.github.com/ismaeIfm/eeb24fad2623dfb69ca81bb0f254543f
3
+ # A good TF/Keras interoperability post https://blog.keras.io/keras-as-a-simplified-interface-to-tensorflow-tutorial.html
4
+
5
+ from keras .models import Sequential
6
+ from keras .layers import Dense
7
+ import numpy
8
+ import tensorflow as tf
9
+ from keras .models import model_from_json
10
+ from tensorflow .python .tools import freeze_graph
11
+ from tensorflow .python .tools import optimize_for_inference_lib
12
+ from keras import backend as K
13
+
14
+
15
+ # fix random seed for reproducibility
16
+ numpy .random .seed (7 )
17
+
18
+ # create TF session and set it in Keras
19
+ sess = tf .Session ()
20
+ K .set_session (sess )
21
+ K .set_learning_phase (1 )
22
+
23
+ # load pima indians dataset
24
+ dataset = numpy .loadtxt ("../data/pima-indians-diabetes.csv" , delimiter = "," )
25
+ # split into input (X) and output (Y) variables
26
+ X = dataset [:,0 :8 ]
27
+ Y = dataset [:,8 ]
28
+
29
+ # create model
30
+ model = Sequential ()
31
+ model .add (Dense (12 , input_dim = 8 , activation = 'relu' ))
32
+ model .add (Dense (8 , activation = 'relu' ))
33
+ model .add (Dense (1 , activation = 'sigmoid' ))
34
+
35
+ # Compile model
36
+ model .compile (loss = 'binary_crossentropy' , optimizer = 'adam' , metrics = ['accuracy' ])
37
+
38
+ # Fit the model
39
+ model .fit (X , Y , epochs = 150 , batch_size = 10 )
40
+
41
+ print ('Done training!' )
42
+
43
+ modelInput = model .input
44
+ modelOutput = model .output
45
+ print "input" , modelInput .name
46
+ print "output" , modelOutput .name
47
+
48
+ print ("Model's parameters" )
49
+ print (model .get_weights ())
50
+
51
+
52
+ K .set_learning_phase (0 ) # all new operations will be in test mode from now on
53
+
54
+ # serialize the model and get its weights, for quick re-building
55
+ config = model .to_json ()
56
+ weights = model .get_weights ()
57
+
58
+ # re-build a model where the learning phase is now hard-coded to 0
59
+ new_model = model_from_json (config )
60
+ new_model .set_weights (weights )
61
+
62
+ #create the saver
63
+ # Saver op to save and restore all the variables
64
+ saver = tf .train .Saver ()
65
+
66
+ # Save produced model
67
+ model_path = "/Users/boris/Projects/TensorFlowPython/models/"
68
+ model_name = "KerasModel"
69
+ save_path = saver .save (sess , model_path + model_name + ".ckpt" )
70
+ print "Saved model at " , save_path
71
+ graph_path = tf .train .write_graph (sess .graph_def , model_path , model_name + ".pb" , as_text = True )
72
+ print "Saved graph at :" , graph_path
73
+
74
+ # Now freeze the graph (put variables into graph)
75
+
76
+ input_saver_def_path = ""
77
+ input_binary = False
78
+ output_node_names = "dense_3_1/Sigmoid" # Model result node
79
+ restore_op_name = "save/restore_all"
80
+ filename_tensor_name = "save/Const:0"
81
+ output_frozen_graph_name = model_path + 'frozen_' + model_name + '.pb'
82
+ clear_devices = True
83
+
84
+
85
+ freeze_graph .freeze_graph (graph_path , input_saver_def_path ,
86
+ input_binary , save_path , output_node_names ,
87
+ restore_op_name , filename_tensor_name ,
88
+ output_frozen_graph_name , clear_devices , "" )
89
+ print "Model is frozen"
90
+
91
+ # optimizing graph
92
+
93
+ input_graph_def = tf .GraphDef ()
94
+ with tf .gfile .Open (output_frozen_graph_name , "r" ) as f :
95
+ data = f .read ()
96
+ input_graph_def .ParseFromString (data )
97
+
98
+
99
+ output_graph_def = optimize_for_inference_lib .optimize_for_inference (
100
+ input_graph_def ,
101
+ ["dense_1_input_1" ], # an array of the input node(s)
102
+ ["dense_3_1/Sigmoid" ], # an array of output nodes
103
+ tf .float32 .as_datatype_enum )
104
+
105
+ # Save the optimized graph
106
+
107
+ tf .train .write_graph (output_graph_def , model_path , "optimized_" + model_name + ".pb" , as_text = False )
108
+ tf .train .write_graph (output_graph_def , model_path , "optimized_text_" + model_name + ".pb" , as_text = True )
109
+
110
+
111
+ # evaluate the model
112
+ #scores = model.evaluate(X, Y)
113
+ #print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
114
+
115
+ # calculate predictions
116
+ #predictions = model.predict(X)
117
+ # round predictions
118
+ # rounded = [round(x[0]) for x in predictions]
119
+ # print(rounded)
0 commit comments