Skip to content

Commit 580d441

Browse files
author
blublinsky
committed
Initial commit
0 parents  commit 580d441

24 files changed

+4496
-0
lines changed

Diff for: .gitignore

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
target/
2+
# IntelliJ
3+
.idea/
4+
.idea_modules/
5+
*.iml

Diff for: CustomModelLearn.py

+34
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
import numpy as np
2+
import tensorflow as tf
3+
# Declare list of features, we only have one real-valued feature
4+
def model(features, labels, mode):
5+
# Build a linear model and predict values
6+
W = tf.get_variable("W", [1], dtype=tf.float64)
7+
b = tf.get_variable("b", [1], dtype=tf.float64)
8+
y = W*features['x'] + b
9+
# Loss sub-graph
10+
loss = tf.reduce_sum(tf.square(y - labels))
11+
# Training sub-graph
12+
global_step = tf.train.get_global_step()
13+
optimizer = tf.train.GradientDescentOptimizer(0.01)
14+
train = tf.group(optimizer.minimize(loss),
15+
tf.assign_add(global_step, 1))
16+
# ModelFnOps connects subgraphs we built to the
17+
# appropriate functionality.
18+
return tf.contrib.learn.ModelFnOps(
19+
mode=mode, predictions=y,
20+
loss=loss,
21+
train_op=train)
22+
23+
estimator = tf.contrib.learn.Estimator(model_fn=model)
24+
25+
# define our data set
26+
x = np.array([1., 2., 3., 4.])
27+
y = np.array([0., -1., -2., -3.])
28+
input_fn = tf.contrib.learn.io.numpy_input_fn({"x": x}, y, 4, num_epochs=1000)
29+
30+
# train
31+
estimator.fit(input_fn=input_fn, steps=1000)
32+
# evaluate our model
33+
print(estimator.evaluate(input_fn=input_fn, steps=10))
34+

Diff for: LinearRegression.py

+28
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
import tensorflow as tf
2+
import numpy as np
3+
4+
# Model parameters
5+
W = tf.Variable([.3], tf.float32)
6+
b = tf.Variable([-.3], tf.float32)
7+
# Model input and output
8+
x = tf.placeholder(tf.float32)
9+
linear_model = W * x + b
10+
y = tf.placeholder(tf.float32)
11+
# loss
12+
loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares
13+
# optimizer
14+
optimizer = tf.train.GradientDescentOptimizer(0.01)
15+
train = optimizer.minimize(loss)
16+
# training data
17+
x_train = [1,2,3,4]
18+
y_train = [0,-1,-2,-3]
19+
# training loop
20+
init = tf.global_variables_initializer()
21+
sess = tf.Session()
22+
sess.run(init) # reset values to wrong
23+
for i in range(1000):
24+
sess.run(train, {x:x_train, y:y_train})
25+
26+
# evaluate training accuracy
27+
curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x:x_train, y:y_train})
28+
print("W: %s b: %s loss: %s"%(curr_W, curr_b, curr_loss))

Diff for: LinearRegressionLearn.py

+138
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,138 @@
1+
import tensorflow as tf
2+
import numpy
3+
import matplotlib.pyplot as plt
4+
5+
from tensorflow.python.tools import freeze_graph
6+
7+
# Example from https://blog.altoros.com/using-linear-regression-in-tensorflow.html
8+
# As a training set for the tutorial, we use house prices in Portland, Oregon,
9+
# where X (the predictor variable) is the house size and Y (the criterion variable) is the house price.
10+
# The data set contains 47 examples.
11+
12+
# exporting is based on https://medium.com/@hamedmp/exporting-trained-tensorflow-models-to-c-the-right-way-cf24b609d183#.rhjmkrdln
13+
# and https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph_test.py
14+
15+
# Train a data set
16+
17+
size_data = numpy.asarray([ 2104, 1600, 2400, 1416, 3000, 1985, 1534, 1427,
18+
1380, 1494, 1940, 2000, 1890, 4478, 1268, 2300,
19+
1320, 1236, 2609, 3031, 1767, 1888, 1604, 1962,
20+
3890, 1100, 1458, 2526, 2200, 2637, 1839, 1000,
21+
2040, 3137, 1811, 1437, 1239, 2132, 4215, 2162,
22+
1664, 2238, 2567, 1200, 852, 1852, 1203 ])
23+
price_data = numpy.asarray([ 399900, 329900, 369000, 232000, 539900, 299900, 314900, 198999,
24+
212000, 242500, 239999, 347000, 329999, 699900, 259900, 449900,
25+
299900, 199900, 499998, 599000, 252900, 255000, 242900, 259900,
26+
573900, 249900, 464500, 469000, 475000, 299900, 349900, 169900,
27+
314900, 579900, 285900, 249900, 229900, 345000, 549000, 287000,
28+
368500, 329900, 314000, 299000, 179900, 299900, 239500 ])
29+
30+
# Test a data set
31+
32+
size_data_test = numpy.asarray([ 1600, 1494, 1236, 1100, 3137, 2238 ])
33+
price_data_test = numpy.asarray([ 329900, 242500, 199900, 249900, 579900, 329900 ])
34+
35+
# Normalizing your data helps to improve the performance of gradient descent, especially in the case of multivariate linear regression.
36+
def normalize(array):
37+
return (array - array.mean()) / array.std()
38+
39+
# Normalize a data set
40+
41+
size_data_n = normalize(size_data)
42+
price_data_n = normalize(price_data)
43+
44+
size_data_test_n = normalize(size_data_test)
45+
price_data_test_n = normalize(price_data_test)
46+
47+
checkpoint_path = "models/saved_checkpoint"
48+
checkpoint_state_name = "saved_checkpoint"
49+
50+
51+
# Display a plot
52+
# plt.plot(size_data, price_data, 'ro', label='Samples data')
53+
# plt.legend()
54+
# plt.draw()
55+
56+
samples_number = price_data_n.size
57+
58+
# TF graph input
59+
X = tf.placeholder("float")
60+
Y = tf.placeholder("float")
61+
62+
# Create a model
63+
64+
# Set model weights
65+
W = tf.Variable(numpy.random.randn(), name="weight")
66+
b = tf.Variable(numpy.random.randn(), name="bias")
67+
68+
# Set parameters
69+
learning_rate = 0.1
70+
training_iteration = 200
71+
72+
# Construct a linear model
73+
model = tf.add(tf.multiply(X, W), b, name="model")
74+
75+
# Minimize squared errors
76+
cost_function = tf.reduce_sum(tf.pow(model - Y, 2))/(2 * samples_number) #L2 loss
77+
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function, name='optimizer') #Gradient descent
78+
79+
# Initialize variables
80+
init = tf.initialize_all_variables()
81+
82+
# 'Saver' op to save and restore all the variables
83+
saver = tf.train.Saver()
84+
85+
# Launch a graph
86+
with tf.Session() as sess:
87+
sess.run(init)
88+
89+
display_step = 20
90+
91+
# Fit all training data
92+
for iteration in range(training_iteration):
93+
for (x, y) in zip(size_data_n, price_data_n):
94+
sess.run(optimizer, feed_dict={X: x, Y: y})
95+
96+
# Display logs per iteration step
97+
if iteration % display_step == 0:
98+
print "Iteration:", '%04d' % (iteration + 1), "cost=", "{:.9f}".format(sess.run(cost_function, feed_dict={X:size_data_n, Y:price_data_n})), \
99+
"W=", sess.run(W), "b=", sess.run(b)
100+
101+
tuning_cost = sess.run(cost_function, feed_dict={X: normalize(size_data_n), Y: normalize(price_data_n)})
102+
103+
print "Tuning completed:", "cost=", "{:.9f}".format(tuning_cost), "W=", sess.run(W), "b=", sess.run(b)
104+
105+
# Validate a tuning model
106+
107+
testing_cost = sess.run(cost_function, feed_dict={X: size_data_test_n, Y: price_data_test_n})
108+
109+
print "Testing data cost:" , testing_cost
110+
111+
# Display a plot
112+
# plt.figure()
113+
# plt.plot(size_data_n, price_data_n, 'ro', label='Normalized samples')
114+
# plt.plot(size_data_test_n, price_data_test_n, 'go', label='Normalized testing samples')
115+
# plt.plot(size_data_n, sess.run(W) * size_data_n + sess.run(b), label='Fitted line')
116+
# plt.legend()
117+
118+
# plt.show()
119+
120+
# Save model weights and graph to disk
121+
save_path = saver.save(sess, checkpoint_path, global_step=0, latest_filename=checkpoint_state_name)
122+
print "Save path:" , save_path
123+
graph_path = tf.train.write_graph(sess.graph_def, 'models/', 'graph.pb', as_text=True)
124+
print "Graph path:" , graph_path
125+
126+
# Merge grapt with weights
127+
# input_saver_def_path = ""
128+
# input_binary = False
129+
# output_node_names = "weight/read"
130+
# restore_op_name = "save/restore_all"
131+
# filename_tensor_name = "save/Const:0"
132+
# clear_devices = False
133+
# output_graph_path = "models/graphwithdata.pb"
134+
135+
# freeze_graph.freeze_graph(graph_path, input_saver_def_path,
136+
# input_binary, save_path, output_node_names,
137+
# filename_tensor_name, filename_tensor_name,
138+
# output_graph_path, clear_devices, "")

Diff for: LogisticRegression.py

+36
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
# from https://github.com/nlintz/TensorFlow-Tutorials/blob/master/02_logistic_regression.py
2+
3+
import tensorflow as tf
4+
import numpy as np
5+
from tensorflow.examples.tutorials.mnist import input_data
6+
7+
def init_weights(shape):
8+
return tf.Variable(tf.random_normal(shape, stddev=0.01))
9+
10+
def model(X, w):
11+
return tf.matmul(X, w) # notice we use the same model as linear regression, this is because there is a baked in cost function which performs softmax and cross entropy
12+
13+
14+
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
15+
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
16+
17+
X = tf.placeholder("float", [None, 784]) # create symbolic variables
18+
Y = tf.placeholder("float", [None, 10])
19+
20+
w = init_weights([784, 10]) # like in linear regression, we need a shared variable weight matrix for logistic regression
21+
22+
py_x = model(X, w)
23+
24+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y)) # compute mean cross entropy (softmax is applied internally)
25+
train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost) # construct optimizer
26+
predict_op = tf.argmax(py_x, 1) # at predict time, evaluate the argmax of the logistic regression
27+
28+
# Launch the graph in a session
29+
with tf.Session() as sess:
30+
# you need to initialize all variables
31+
tf.global_variables_initializer().run()
32+
33+
for i in range(100):
34+
for start, end in zip(range(0, len(trX), 128), range(128, len(trX)+1, 128)):
35+
sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end]})
36+
print(i, np.mean(np.argmax(teY, axis=1) == sess.run(predict_op, feed_dict={X: teX})))

Diff for: MNIST_data/t10k-images-idx3-ubyte.gz

1.57 MB
Binary file not shown.

Diff for: MNIST_data/t10k-labels-idx1-ubyte.gz

4.44 KB
Binary file not shown.

Diff for: MNIST_data/train-images-idx3-ubyte.gz

9.45 MB
Binary file not shown.

Diff for: MNIST_data/train-labels-idx1-ubyte.gz

28.2 KB
Binary file not shown.

Diff for: README.md

+12
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
Tensorflow Python
2+
===========
3+
4+
This is my simple playground for Tensorflow Python
5+
follow https://www.tensorflow.org/install/install_mac for installation on Mac. I am using Python 2.7
6+
with the latest Tensorflow version
7+
Most of the examples are my attempts to learn Tensorflow and specyfy where they come from.
8+
The most useful ones are in Tensorflow tutorial folder and show how to export graph
9+
reflecting the training results.
10+
Additioanally TensorflowBoard and Polynomial regression ones are integrated with
11+
Tensorflow board - Python application very usefull for viewing graph and results
12+
of training.

Diff for: SavingRestoringModel.py

+130
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,130 @@
1+
# from https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/4_Utils/save_restore_model.py
2+
3+
import tensorflow as tf
4+
5+
# Import MNIST data
6+
from tensorflow.examples.tutorials.mnist import input_data
7+
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
8+
9+
# Parameters
10+
learning_rate = 0.001
11+
batch_size = 100
12+
display_step = 1
13+
model_path = "/Users/boris/Projects/TensorFlowPython/models/MNISTNueralNet.ckpt"
14+
15+
# Network Parameters
16+
n_hidden_1 = 256 # 1st layer number of features
17+
n_hidden_2 = 256 # 2nd layer number of features
18+
n_input = 784 # MNIST data input (img shape: 28*28)
19+
n_classes = 10 # MNIST total classes (0-9 digits)
20+
21+
# tf Graph input
22+
x = tf.placeholder("float", [None, n_input])
23+
y = tf.placeholder("float", [None, n_classes])
24+
25+
26+
# Create model
27+
def multilayer_perceptron(x, weights, biases):
28+
# Hidden layer with RELU activation
29+
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
30+
layer_1 = tf.nn.relu(layer_1)
31+
# Hidden layer with RELU activation
32+
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
33+
layer_2 = tf.nn.relu(layer_2)
34+
# Output layer with linear activation
35+
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
36+
return out_layer
37+
38+
# Store layers weight & bias
39+
weights = {
40+
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
41+
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
42+
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
43+
}
44+
biases = {
45+
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
46+
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
47+
'out': tf.Variable(tf.random_normal([n_classes]))
48+
}
49+
50+
# Construct model
51+
pred = multilayer_perceptron(x, weights, biases)
52+
53+
# Define loss and optimizer
54+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
55+
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
56+
57+
# Initializing the variables
58+
init = tf.global_variables_initializer()
59+
60+
# 'Saver' op to save and restore all the variables
61+
saver = tf.train.Saver()
62+
63+
# Running first session - learning
64+
print("Starting 1st session...")
65+
with tf.Session() as sess:
66+
# Initialize variables
67+
sess.run(init)
68+
69+
# Training cycle
70+
for epoch in range(3):
71+
avg_cost = 0.
72+
total_batch = int(mnist.train.num_examples/batch_size)
73+
# Loop over all batches
74+
for i in range(total_batch):
75+
batch_x, batch_y = mnist.train.next_batch(batch_size)
76+
# Run optimization op (backprop) and cost op (to get loss value)
77+
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
78+
y: batch_y})
79+
# Compute average loss
80+
avg_cost += c / total_batch
81+
# Display logs per epoch step
82+
if epoch % display_step == 0:
83+
print("Epoch:", '%04d' % (epoch+1), "cost=", \
84+
"{:.9f}".format(avg_cost))
85+
print("First Optimization Finished!")
86+
87+
# Test model
88+
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
89+
# Calculate accuracy
90+
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
91+
print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
92+
93+
# Save model weights to disk
94+
save_path = saver.save(sess, model_path)
95+
print("Model saved in file: %s" % save_path)
96+
97+
# Running a new session - continue training
98+
print("Starting 2nd session...")
99+
with tf.Session() as sess:
100+
# Initialize variables
101+
sess.run(init)
102+
103+
# Restore model weights from previously saved model
104+
saver.restore(sess, model_path)
105+
print("Model restored from file: %s" % save_path)
106+
107+
# Resume training
108+
for epoch in range(7):
109+
avg_cost = 0.
110+
total_batch = int(mnist.train.num_examples / batch_size)
111+
# Loop over all batches
112+
for i in range(total_batch):
113+
batch_x, batch_y = mnist.train.next_batch(batch_size)
114+
# Run optimization op (backprop) and cost op (to get loss value)
115+
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
116+
y: batch_y})
117+
# Compute average loss
118+
avg_cost += c / total_batch
119+
# Display logs per epoch step
120+
if epoch % display_step == 0:
121+
print("Epoch:", '%04d' % (epoch + 1), "cost=", \
122+
"{:.9f}".format(avg_cost))
123+
print("Second Optimization Finished!")
124+
125+
# Test model
126+
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
127+
# Calculate accuracy
128+
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
129+
print("Accuracy:", accuracy.eval(
130+
{x: mnist.test.images, y: mnist.test.labels}))

0 commit comments

Comments
 (0)