-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathLeakyRelu.h
58 lines (45 loc) · 1.77 KB
/
LeakyRelu.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
#ifndef LIBDL_LEAKYRELU_H
#define LIBDL_LEAKYRELU_H
#include "CNode.h"
#include "../Utils.h"
template <typename D, std::int64_t R>
class LeakyRelu : public CNode<D, R> {
public:
LeakyRelu(
const std::shared_ptr<Tensor<D, R>> &x,
const std::shared_ptr<Tensor<D, R>> &result,
D negativeSlope)
: CNode<D, R>(Utils::removeOption<std::shared_ptr<CNodeBase>>({x->gradFn}), result),
x(x->data),
cx(x->gradFn),
negativeSlope(negativeSlope){}
/*
* \brief applies the leaky relu function elementwise
*
* \param x tensor of any shape
* \param negativeSlope factor by which negative values are scaled
*
* \return a new tensor with the same shape as x in which all negative values are scaled by negativeSlope
* */
static std::shared_ptr<Tensor<D, R>> leakyRelu(
const std::shared_ptr<Tensor<D, R>> &x,
D negativeSlope) {
auto mask = (*x->data >= x->data->constant(0)).select(x->data->constant(1), x->data->constant(negativeSlope));
auto result = std::make_shared<Tensor<D, R>>(*x->data * mask, x->data->dimensions());
if (x->needsGradient() && !CNodeBase::noGrad)
result->setGradFn(std::make_shared<LeakyRelu<D, R>>(x, result, negativeSlope));
return result;
}
void computeGradients() override {
if (cx.has_value()) {
auto mask = (*x >= x->constant(0)).select(x->constant(1), x->constant(negativeSlope));
cx.value()->addGrad(mask * *CNode<D, R>::grad);
}
CNode<D, R>::finishComputeGradient();
}
private:
std::shared_ptr<Eigen::Tensor<D, R>> x;
std::optional<std::shared_ptr<CNode<D, R>>> cx;
D negativeSlope;
};
#endif //LIBDL_LEAKYRELU_H