-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbinary_classifier.py
More file actions
59 lines (50 loc) · 2.05 KB
/
binary_classifier.py
File metadata and controls
59 lines (50 loc) · 2.05 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
# ====================================================================================
# This is a simple neural network (really just a binary linear logistic classifier)
# with no frills, meant to train a vector of weights (no bias in this one!),
# used in the Astrophysical Machine Learning course at the University of Iowa
# https://astrophysicalmachinelearning.wordpress.com/ taught by Shea Brown
# Written by Shea Brown, [email protected], https://sheabrownastro.wordpress.com/
# =====================================================================================
import numpy as np
import sys
# sigmoid function
# -----------------------
def sigmoid(x,deriv=False):
if(deriv==True):
return x*(1-x)
return 1/(1+np.exp(-x))
# Function to train the neural net
# --------------------------------
def train(X,y,N=10000,alpha=0.01):
np.random.seed(1)
# initialize weights randomly with mean 0
W = 2*np.random.random((len(X[0]),1)) - 1
# Start gradient descent
# ---------------------------------------------------
for iter in xrange(N):
sys.stdout.write("\rProcessing epoch %i" % iter)
sys.stdout.flush()
# Forward Propagation (make a guess)
# We do this for all our training examples
# in one go.
l1 = sigmoid(np.dot(X,W))
# how much did we miss?
l1_error = y - l1
# multiply how much we missed by the
# slope of the sigmoid at the values in l1
# times a learning rate
# -------------------------------------------------
l1_delta = alpha * l1_error * sigmoid(l1,True)
# update weights by multiplying l1_delta by the
# transpose of the data (this is what you get
# if you take the gradient of a loss function
# that is Loss=0.5*(y-l1)**2, which is an L2
# norm loss function, or least squares error
# -------------------------------------------
W += np.dot(X.T,l1_delta)
return W
# Function to make a prediction with user-defined weights W
# ----------------------------------------------------------
def predict(data,W):
out=sigmoid(np.dot(data,W))
return out