-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathPow.h
51 lines (42 loc) · 1.48 KB
/
Pow.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
#ifndef LIBDL_POW_H
#define LIBDL_POW_H
#include "../Tensor.h"
#include "../Utils.h"
template <typename D, std::int64_t R>
class Pow : public CNode<D, R> {
public:
Pow(
const std::shared_ptr<Tensor<D, R>> &x,
D p,
const std::shared_ptr<Tensor<D, R>> &result)
: CNode<D, R>(Utils::removeOption<std::shared_ptr<CNodeBase>>({x->gradFn}), result),
x(x->data),
cx(x->gradFn),
p(p) {}
/*
* \brief computes x to the power of p elementwise
*
* \param x the tensor for which the power should be computed
* \param p the power to which the tensor should be raised
*
* \return a new tensor with the same shape as x in which all elements have been raised to the power of p
* */
static std::shared_ptr<Tensor<D, R>> pow(
const std::shared_ptr<Tensor<D, R>> &x,
D p) {
auto result = std::make_shared<Tensor<D, R>>(x->data->pow(p), x->data->dimensions());
if (x->needsGradient() && !CNodeBase::noGrad)
result->setGradFn(std::make_shared<Pow<D, R>>(x, p, result));
return result;
}
void computeGradients() override {
if (cx.has_value())
cx.value()->addGrad(x->constant(p) * x->pow(p - 1) * *CNode<D, R>::grad);
CNode<D, R>::finishComputeGradient();
}
private:
std::shared_ptr<Eigen::Tensor<D, R>> x;
std::optional<std::shared_ptr<CNode<D, R>>> cx;
D p;
};
#endif //LIBDL_POW_H