Skip to content

Commit 580d441

Browse files
author
blublinsky
committed
Initial commit
0 parents  commit 580d441

24 files changed

+4496
-0
lines changed

.gitignore

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
target/
2+
# IntelliJ
3+
.idea/
4+
.idea_modules/
5+
*.iml

CustomModelLearn.py

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
import numpy as np
2+
import tensorflow as tf
3+
# Declare list of features, we only have one real-valued feature
4+
def model(features, labels, mode):
5+
# Build a linear model and predict values
6+
W = tf.get_variable("W", [1], dtype=tf.float64)
7+
b = tf.get_variable("b", [1], dtype=tf.float64)
8+
y = W*features['x'] + b
9+
# Loss sub-graph
10+
loss = tf.reduce_sum(tf.square(y - labels))
11+
# Training sub-graph
12+
global_step = tf.train.get_global_step()
13+
optimizer = tf.train.GradientDescentOptimizer(0.01)
14+
train = tf.group(optimizer.minimize(loss),
15+
tf.assign_add(global_step, 1))
16+
# ModelFnOps connects subgraphs we built to the
17+
# appropriate functionality.
18+
return tf.contrib.learn.ModelFnOps(
19+
mode=mode, predictions=y,
20+
loss=loss,
21+
train_op=train)
22+
23+
estimator = tf.contrib.learn.Estimator(model_fn=model)
24+
25+
# define our data set
26+
x = np.array([1., 2., 3., 4.])
27+
y = np.array([0., -1., -2., -3.])
28+
input_fn = tf.contrib.learn.io.numpy_input_fn({"x": x}, y, 4, num_epochs=1000)
29+
30+
# train
31+
estimator.fit(input_fn=input_fn, steps=1000)
32+
# evaluate our model
33+
print(estimator.evaluate(input_fn=input_fn, steps=10))
34+

LinearRegression.py

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
import tensorflow as tf
2+
import numpy as np
3+
4+
# Model parameters
5+
W = tf.Variable([.3], tf.float32)
6+
b = tf.Variable([-.3], tf.float32)
7+
# Model input and output
8+
x = tf.placeholder(tf.float32)
9+
linear_model = W * x + b
10+
y = tf.placeholder(tf.float32)
11+
# loss
12+
loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares
13+
# optimizer
14+
optimizer = tf.train.GradientDescentOptimizer(0.01)
15+
train = optimizer.minimize(loss)
16+
# training data
17+
x_train = [1,2,3,4]
18+
y_train = [0,-1,-2,-3]
19+
# training loop
20+
init = tf.global_variables_initializer()
21+
sess = tf.Session()
22+
sess.run(init) # reset values to wrong
23+
for i in range(1000):
24+
sess.run(train, {x:x_train, y:y_train})
25+
26+
# evaluate training accuracy
27+
curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x:x_train, y:y_train})
28+
print("W: %s b: %s loss: %s"%(curr_W, curr_b, curr_loss))

LinearRegressionLearn.py

Lines changed: 138 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,138 @@
1+
import tensorflow as tf
2+
import numpy
3+
import matplotlib.pyplot as plt
4+
5+
from tensorflow.python.tools import freeze_graph
6+
7+
# Example from https://blog.altoros.com/using-linear-regression-in-tensorflow.html
8+
# As a training set for the tutorial, we use house prices in Portland, Oregon,
9+
# where X (the predictor variable) is the house size and Y (the criterion variable) is the house price.
10+
# The data set contains 47 examples.
11+
12+
# exporting is based on https://medium.com/@hamedmp/exporting-trained-tensorflow-models-to-c-the-right-way-cf24b609d183#.rhjmkrdln
13+
# and https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph_test.py
14+
15+
# Train a data set
16+
17+
size_data = numpy.asarray([ 2104, 1600, 2400, 1416, 3000, 1985, 1534, 1427,
18+
1380, 1494, 1940, 2000, 1890, 4478, 1268, 2300,
19+
1320, 1236, 2609, 3031, 1767, 1888, 1604, 1962,
20+
3890, 1100, 1458, 2526, 2200, 2637, 1839, 1000,
21+
2040, 3137, 1811, 1437, 1239, 2132, 4215, 2162,
22+
1664, 2238, 2567, 1200, 852, 1852, 1203 ])
23+
price_data = numpy.asarray([ 399900, 329900, 369000, 232000, 539900, 299900, 314900, 198999,
24+
212000, 242500, 239999, 347000, 329999, 699900, 259900, 449900,
25+
299900, 199900, 499998, 599000, 252900, 255000, 242900, 259900,
26+
573900, 249900, 464500, 469000, 475000, 299900, 349900, 169900,
27+
314900, 579900, 285900, 249900, 229900, 345000, 549000, 287000,
28+
368500, 329900, 314000, 299000, 179900, 299900, 239500 ])
29+
30+
# Test a data set
31+
32+
size_data_test = numpy.asarray([ 1600, 1494, 1236, 1100, 3137, 2238 ])
33+
price_data_test = numpy.asarray([ 329900, 242500, 199900, 249900, 579900, 329900 ])
34+
35+
# Normalizing your data helps to improve the performance of gradient descent, especially in the case of multivariate linear regression.
36+
def normalize(array):
37+
return (array - array.mean()) / array.std()
38+
39+
# Normalize a data set
40+
41+
size_data_n = normalize(size_data)
42+
price_data_n = normalize(price_data)
43+
44+
size_data_test_n = normalize(size_data_test)
45+
price_data_test_n = normalize(price_data_test)
46+
47+
checkpoint_path = "models/saved_checkpoint"
48+
checkpoint_state_name = "saved_checkpoint"
49+
50+
51+
# Display a plot
52+
# plt.plot(size_data, price_data, 'ro', label='Samples data')
53+
# plt.legend()
54+
# plt.draw()
55+
56+
samples_number = price_data_n.size
57+
58+
# TF graph input
59+
X = tf.placeholder("float")
60+
Y = tf.placeholder("float")
61+
62+
# Create a model
63+
64+
# Set model weights
65+
W = tf.Variable(numpy.random.randn(), name="weight")
66+
b = tf.Variable(numpy.random.randn(), name="bias")
67+
68+
# Set parameters
69+
learning_rate = 0.1
70+
training_iteration = 200
71+
72+
# Construct a linear model
73+
model = tf.add(tf.multiply(X, W), b, name="model")
74+
75+
# Minimize squared errors
76+
cost_function = tf.reduce_sum(tf.pow(model - Y, 2))/(2 * samples_number) #L2 loss
77+
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function, name='optimizer') #Gradient descent
78+
79+
# Initialize variables
80+
init = tf.initialize_all_variables()
81+
82+
# 'Saver' op to save and restore all the variables
83+
saver = tf.train.Saver()
84+
85+
# Launch a graph
86+
with tf.Session() as sess:
87+
sess.run(init)
88+
89+
display_step = 20
90+
91+
# Fit all training data
92+
for iteration in range(training_iteration):
93+
for (x, y) in zip(size_data_n, price_data_n):
94+
sess.run(optimizer, feed_dict={X: x, Y: y})
95+
96+
# Display logs per iteration step
97+
if iteration % display_step == 0:
98+
print "Iteration:", '%04d' % (iteration + 1), "cost=", "{:.9f}".format(sess.run(cost_function, feed_dict={X:size_data_n, Y:price_data_n})), \
99+
"W=", sess.run(W), "b=", sess.run(b)
100+
101+
tuning_cost = sess.run(cost_function, feed_dict={X: normalize(size_data_n), Y: normalize(price_data_n)})
102+
103+
print "Tuning completed:", "cost=", "{:.9f}".format(tuning_cost), "W=", sess.run(W), "b=", sess.run(b)
104+
105+
# Validate a tuning model
106+
107+
testing_cost = sess.run(cost_function, feed_dict={X: size_data_test_n, Y: price_data_test_n})
108+
109+
print "Testing data cost:" , testing_cost
110+
111+
# Display a plot
112+
# plt.figure()
113+
# plt.plot(size_data_n, price_data_n, 'ro', label='Normalized samples')
114+
# plt.plot(size_data_test_n, price_data_test_n, 'go', label='Normalized testing samples')
115+
# plt.plot(size_data_n, sess.run(W) * size_data_n + sess.run(b), label='Fitted line')
116+
# plt.legend()
117+
118+
# plt.show()
119+
120+
# Save model weights and graph to disk
121+
save_path = saver.save(sess, checkpoint_path, global_step=0, latest_filename=checkpoint_state_name)
122+
print "Save path:" , save_path
123+
graph_path = tf.train.write_graph(sess.graph_def, 'models/', 'graph.pb', as_text=True)
124+
print "Graph path:" , graph_path
125+
126+
# Merge grapt with weights
127+
# input_saver_def_path = ""
128+
# input_binary = False
129+
# output_node_names = "weight/read"
130+
# restore_op_name = "save/restore_all"
131+
# filename_tensor_name = "save/Const:0"
132+
# clear_devices = False
133+
# output_graph_path = "models/graphwithdata.pb"
134+
135+
# freeze_graph.freeze_graph(graph_path, input_saver_def_path,
136+
# input_binary, save_path, output_node_names,
137+
# filename_tensor_name, filename_tensor_name,
138+
# output_graph_path, clear_devices, "")

LogisticRegression.py

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
# from https://github.com/nlintz/TensorFlow-Tutorials/blob/master/02_logistic_regression.py
2+
3+
import tensorflow as tf
4+
import numpy as np
5+
from tensorflow.examples.tutorials.mnist import input_data
6+
7+
def init_weights(shape):
8+
return tf.Variable(tf.random_normal(shape, stddev=0.01))
9+
10+
def model(X, w):
11+
return tf.matmul(X, w) # notice we use the same model as linear regression, this is because there is a baked in cost function which performs softmax and cross entropy
12+
13+
14+
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
15+
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
16+
17+
X = tf.placeholder("float", [None, 784]) # create symbolic variables
18+
Y = tf.placeholder("float", [None, 10])
19+
20+
w = init_weights([784, 10]) # like in linear regression, we need a shared variable weight matrix for logistic regression
21+
22+
py_x = model(X, w)
23+
24+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y)) # compute mean cross entropy (softmax is applied internally)
25+
train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost) # construct optimizer
26+
predict_op = tf.argmax(py_x, 1) # at predict time, evaluate the argmax of the logistic regression
27+
28+
# Launch the graph in a session
29+
with tf.Session() as sess:
30+
# you need to initialize all variables
31+
tf.global_variables_initializer().run()
32+
33+
for i in range(100):
34+
for start, end in zip(range(0, len(trX), 128), range(128, len(trX)+1, 128)):
35+
sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end]})
36+
print(i, np.mean(np.argmax(teY, axis=1) == sess.run(predict_op, feed_dict={X: teX})))

MNIST_data/t10k-images-idx3-ubyte.gz

1.57 MB
Binary file not shown.

MNIST_data/t10k-labels-idx1-ubyte.gz

4.44 KB
Binary file not shown.

MNIST_data/train-images-idx3-ubyte.gz

9.45 MB
Binary file not shown.

MNIST_data/train-labels-idx1-ubyte.gz

28.2 KB
Binary file not shown.

README.md

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
Tensorflow Python
2+
===========
3+
4+
This is my simple playground for Tensorflow Python
5+
follow https://www.tensorflow.org/install/install_mac for installation on Mac. I am using Python 2.7
6+
with the latest Tensorflow version
7+
Most of the examples are my attempts to learn Tensorflow and specyfy where they come from.
8+
The most useful ones are in Tensorflow tutorial folder and show how to export graph
9+
reflecting the training results.
10+
Additioanally TensorflowBoard and Polynomial regression ones are integrated with
11+
Tensorflow board - Python application very usefull for viewing graph and results
12+
of training.

0 commit comments

Comments
 (0)