-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathBatchDescentLearner.cpp
68 lines (51 loc) · 1.66 KB
/
BatchDescentLearner.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
//
// BatchDescentLearner.cpp
// Vasco
//
// Created by Jamil Dhanani on 3/20/2014.
// Copyright (c) 2014 Jamil Dhanani. All rights reserved.
//
#include "BatchDescentLearner.h"
#define BATCH_TOLERANCE 0.001
#pragma mark - Constructor
BatchDescentLearner::BatchDescentLearner(int numParams, vector<SupervisedData*> data, float learningRate) : Learner(numParams, data, learningRate) {
}
BatchDescentLearner::BatchDescentLearner(int numParams, vector<SupervisedData*> data, float learningRate, float (*hypothesis)(float y)) : Learner(numParams, data, learningRate, hypothesis) {
}
void BatchDescentLearner::update() {
updateUntilConvergence();
}
#pragma mark - Hypothesis
double BatchDescentLearner::getHypothesisForData(Data* data) {
// LMS REGRESSION
// h_{\theta}(x) = \theta^{T}x
double hyp = 0;
for (int i = 0; i < _M; i++) {
hyp += data->features()[i] * _parameterValues[i];
}
if (_hypothesis)
hyp = _hypothesis(hyp);
return hyp;
}
#pragma mark - Update Rule
void BatchDescentLearner::updateUntilConvergence() {
float error = 0;
int j_max = _data.size();
while (fabs(error) < BATCH_TOLERANCE) {
// for each parameter
for (int j = 0; j < j_max; j++) {
// for each training example
float sum_err = 0;
for (int i = 0; i < _M; i++) {
float hyp = getHypothesisForData(_data.at(j));
float actual = _data.at(j)->supervisedValue();
sum_err += actual - hyp;
}
float update_size = sum_err * _learningRate;
// update
_parameterValues[j] += update_size;
error = update_size;
LOG("Update size: %f, New Value: %f\n", update_size, _parameterValues[j]);
}
}
}