-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathLinearRegression.py
More file actions
90 lines (62 loc) · 2.16 KB
/
LinearRegression.py
File metadata and controls
90 lines (62 loc) · 2.16 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import numpy as np
def normalize(feature):
mu = np.mean(feature)
f_range = max(feature) - min(feature)
X = (feature - mu)/f_range
return X
def computeCost(X, y, theta):
m = len(y)
hx = np.dot(X, theta)
hx = hx.reshape((hx.shape[0]))
avg = 1/(2*m)
sqrerror = (hx-y)**2
J = avg*sum(sqrerror)
return J
def gradientDescent(X, y, theta, alpha, epochs):
J_hist = np.array([0 for i in range(epochs)])
m = len(y)
parameters = theta.shape[0]
tmp = [0 for i in range(parameters)]
for epoch in range(epochs):
hx = np.dot(X, theta)
hx = hx.reshape((hx.shape[0]))
err = hx-y
## theta_zero = theta[0] - alpha*(1/m)*sum(err*X[:, 0])
## theta_one = theta[1] - alpha*(1/m)*sum(err*X[:, 1])
## theta[0] = theta_zero
## theta[1] = theta_one
for i in range(parameters):
tmp[i] = theta[i] - alpha*(1/m)*sum(err*X[:, i])
for i in range(parameters):
theta[i] = tmp[i]
J = computeCost(X, y, theta)
## print(">epoch: {0}, alpha: {1}, error: {2}".format(epoch, alpha, J))
J_hist[epoch] = J
return theta, J_hist
def make_predictions(theta):
x = float(input("Enter population: "))
profit = theta[0] + theta[1]*x
print("Predicted profit: ", profit*10000)
data = np.genfromtxt('ex1data1.txt', delimiter=',')
X = data[:,0] # input features
y = data[:,1] # target
see_plot(X, y)
m = len(y) # no. of training examples
# X = normalize(X)
x0 = np.ones((m, 1))
X = X.reshape((X.shape[0],1)) # matrix X
X = np.concatenate((x0, X), axis=1)
theta = np.zeros((2, 1))
iterations = 1500
alpha = 0.01
J = computeCost(X, y, theta)
print("Initial cost on zero theta values = ", J)
print("Performing Gradient Descent for {} ITERATIONS".format(iterations))
print("Learning Rate: {}".format(alpha))
(theta, J_vals) = gradientDescent(X, y, theta, alpha, iterations)
print("After Gradient descent, found theta to be: ", theta)
while True:
halt = input("Our Model is ready! Press Q when you want to stop making predictions")
if halt == 'Q' or halt == 'q':
break
make_predictions(theta)