forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathReduceAllOps.cpp
77 lines (61 loc) · 2.3 KB
/
ReduceAllOps.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/ReduceAllOps.h>
#include <ATen/native/Resize.h>
#include <ATen/core/Tensor.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_aminmax_native.h>
#include <ATen/ops/aminmax.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/max.h>
#include <ATen/ops/max_native.h>
#include <ATen/ops/min.h>
#include <ATen/ops/min_native.h>
#endif
namespace at {
namespace native {
DEFINE_DISPATCH(min_all_stub);
DEFINE_DISPATCH(max_all_stub);
Tensor min(const Tensor &self) {
TORCH_CHECK(self.numel() > 0,
"min(): Expected reduction dim to be specified for input.numel() == 0. Specify the reduction dim with the 'dim' argument.");
Tensor result = at::empty({}, self.options());
min_all_stub(self.device().type(), result, self.contiguous());
return result;
}
Tensor& min_unary_out(const Tensor &self, Tensor& out) {
// First check if the devices match (CPU vs GPU)
TORCH_CHECK(self.device() == out.device());
TORCH_CHECK(canCast(
typeMetaToScalarType(self.dtype()),
typeMetaToScalarType(out.dtype())));
at::native::resize_output(out, {});
min_all_stub(self.device().type(), out, self.contiguous());
return out;
}
Tensor max(const Tensor &self) {
TORCH_CHECK(self.numel() > 0,
"max(): Expected reduction dim to be specified for input.numel() == 0. Specify the reduction dim with the 'dim' argument.");
Tensor result = at::empty({}, self.options());
max_all_stub(self.device().type(), result, self.contiguous());
return result;
}
Tensor& max_unary_out(const Tensor &self, Tensor& out) {
// First check if the devices match (CPU vs GPU)
TORCH_CHECK(self.device() == out.device());
TORCH_CHECK(canCast(
typeMetaToScalarType(self.dtype()),
typeMetaToScalarType(out.dtype())));
at::native::resize_output(out, {});
max_all_stub(self.device().type(), out, self.contiguous());
return out;
}
// DEPRECATED: Use at::aminmax instead
std::tuple<Tensor, Tensor> _aminmax_all(const Tensor &self) {
TORCH_WARN_ONCE("_aminmax is deprecated as of PyTorch 1.11 and will be removed in a future release. Use aminmax instead."
" This warning will only appear once per process.");
return at::aminmax(self);
}
}} // namespace at::native