1
+ import tensorflow as tf
2
+ import numpy
3
+ import matplotlib .pyplot as plt
4
+
5
+ from tensorflow .python .tools import freeze_graph
6
+
7
+ # Example from https://blog.altoros.com/using-linear-regression-in-tensorflow.html
8
+ # As a training set for the tutorial, we use house prices in Portland, Oregon,
9
+ # where X (the predictor variable) is the house size and Y (the criterion variable) is the house price.
10
+ # The data set contains 47 examples.
11
+
12
+ # exporting is based on https://medium.com/@hamedmp/exporting-trained-tensorflow-models-to-c-the-right-way-cf24b609d183#.rhjmkrdln
13
+ # and https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph_test.py
14
+
15
+ # Train a data set
16
+
17
+ size_data = numpy .asarray ([ 2104 , 1600 , 2400 , 1416 , 3000 , 1985 , 1534 , 1427 ,
18
+ 1380 , 1494 , 1940 , 2000 , 1890 , 4478 , 1268 , 2300 ,
19
+ 1320 , 1236 , 2609 , 3031 , 1767 , 1888 , 1604 , 1962 ,
20
+ 3890 , 1100 , 1458 , 2526 , 2200 , 2637 , 1839 , 1000 ,
21
+ 2040 , 3137 , 1811 , 1437 , 1239 , 2132 , 4215 , 2162 ,
22
+ 1664 , 2238 , 2567 , 1200 , 852 , 1852 , 1203 ])
23
+ price_data = numpy .asarray ([ 399900 , 329900 , 369000 , 232000 , 539900 , 299900 , 314900 , 198999 ,
24
+ 212000 , 242500 , 239999 , 347000 , 329999 , 699900 , 259900 , 449900 ,
25
+ 299900 , 199900 , 499998 , 599000 , 252900 , 255000 , 242900 , 259900 ,
26
+ 573900 , 249900 , 464500 , 469000 , 475000 , 299900 , 349900 , 169900 ,
27
+ 314900 , 579900 , 285900 , 249900 , 229900 , 345000 , 549000 , 287000 ,
28
+ 368500 , 329900 , 314000 , 299000 , 179900 , 299900 , 239500 ])
29
+
30
+ # Test a data set
31
+
32
+ size_data_test = numpy .asarray ([ 1600 , 1494 , 1236 , 1100 , 3137 , 2238 ])
33
+ price_data_test = numpy .asarray ([ 329900 , 242500 , 199900 , 249900 , 579900 , 329900 ])
34
+
35
+ # Normalizing your data helps to improve the performance of gradient descent, especially in the case of multivariate linear regression.
36
+ def normalize (array ):
37
+ return (array - array .mean ()) / array .std ()
38
+
39
+ # Normalize a data set
40
+
41
+ size_data_n = normalize (size_data )
42
+ price_data_n = normalize (price_data )
43
+
44
+ size_data_test_n = normalize (size_data_test )
45
+ price_data_test_n = normalize (price_data_test )
46
+
47
+ checkpoint_path = "models/saved_checkpoint"
48
+ checkpoint_state_name = "saved_checkpoint"
49
+
50
+
51
+ # Display a plot
52
+ # plt.plot(size_data, price_data, 'ro', label='Samples data')
53
+ # plt.legend()
54
+ # plt.draw()
55
+
56
+ samples_number = price_data_n .size
57
+
58
+ # TF graph input
59
+ X = tf .placeholder ("float" )
60
+ Y = tf .placeholder ("float" )
61
+
62
+ # Create a model
63
+
64
+ # Set model weights
65
+ W = tf .Variable (numpy .random .randn (), name = "weight" )
66
+ b = tf .Variable (numpy .random .randn (), name = "bias" )
67
+
68
+ # Set parameters
69
+ learning_rate = 0.1
70
+ training_iteration = 200
71
+
72
+ # Construct a linear model
73
+ model = tf .add (tf .multiply (X , W ), b , name = "model" )
74
+
75
+ # Minimize squared errors
76
+ cost_function = tf .reduce_sum (tf .pow (model - Y , 2 ))/ (2 * samples_number ) #L2 loss
77
+ optimizer = tf .train .GradientDescentOptimizer (learning_rate ).minimize (cost_function , name = 'optimizer' ) #Gradient descent
78
+
79
+ # Initialize variables
80
+ init = tf .initialize_all_variables ()
81
+
82
+ # 'Saver' op to save and restore all the variables
83
+ saver = tf .train .Saver ()
84
+
85
+ # Launch a graph
86
+ with tf .Session () as sess :
87
+ sess .run (init )
88
+
89
+ display_step = 20
90
+
91
+ # Fit all training data
92
+ for iteration in range (training_iteration ):
93
+ for (x , y ) in zip (size_data_n , price_data_n ):
94
+ sess .run (optimizer , feed_dict = {X : x , Y : y })
95
+
96
+ # Display logs per iteration step
97
+ if iteration % display_step == 0 :
98
+ print "Iteration:" , '%04d' % (iteration + 1 ), "cost=" , "{:.9f}" .format (sess .run (cost_function , feed_dict = {X :size_data_n , Y :price_data_n })), \
99
+ "W=" , sess .run (W ), "b=" , sess .run (b )
100
+
101
+ tuning_cost = sess .run (cost_function , feed_dict = {X : normalize (size_data_n ), Y : normalize (price_data_n )})
102
+
103
+ print "Tuning completed:" , "cost=" , "{:.9f}" .format (tuning_cost ), "W=" , sess .run (W ), "b=" , sess .run (b )
104
+
105
+ # Validate a tuning model
106
+
107
+ testing_cost = sess .run (cost_function , feed_dict = {X : size_data_test_n , Y : price_data_test_n })
108
+
109
+ print "Testing data cost:" , testing_cost
110
+
111
+ # Display a plot
112
+ # plt.figure()
113
+ # plt.plot(size_data_n, price_data_n, 'ro', label='Normalized samples')
114
+ # plt.plot(size_data_test_n, price_data_test_n, 'go', label='Normalized testing samples')
115
+ # plt.plot(size_data_n, sess.run(W) * size_data_n + sess.run(b), label='Fitted line')
116
+ # plt.legend()
117
+
118
+ # plt.show()
119
+
120
+ # Save model weights and graph to disk
121
+ save_path = saver .save (sess , checkpoint_path , global_step = 0 , latest_filename = checkpoint_state_name )
122
+ print "Save path:" , save_path
123
+ graph_path = tf .train .write_graph (sess .graph_def , 'models/' , 'graph.pb' , as_text = True )
124
+ print "Graph path:" , graph_path
125
+
126
+ # Merge grapt with weights
127
+ # input_saver_def_path = ""
128
+ # input_binary = False
129
+ # output_node_names = "weight/read"
130
+ # restore_op_name = "save/restore_all"
131
+ # filename_tensor_name = "save/Const:0"
132
+ # clear_devices = False
133
+ # output_graph_path = "models/graphwithdata.pb"
134
+
135
+ # freeze_graph.freeze_graph(graph_path, input_saver_def_path,
136
+ # input_binary, save_path, output_node_names,
137
+ # filename_tensor_name, filename_tensor_name,
138
+ # output_graph_path, clear_devices, "")
0 commit comments