Skip to content

Commit 7e09505

Browse files
r-barnesfacebook-github-bot
authored andcommitted
Enable -Wunused-value in vision/PACKAGE +1
Summary: This diff enables compilation warning flags for the directory in question. Further details are in [this workplace post](https://fb.workplace.com/permalink.php?story_fbid=pfbid02XaWNiCVk69r1ghfvDVpujB8Hr9Y61uDvNakxiZFa2jwiPHscVdEQwCBHrmWZSyMRl&id=100051201402394). This is a low-risk diff. There are **no run-time effects** and the diff has already been observed to compile locally. **If the code compiles, it work; test errors are spurious.** Differential Revision: D70282347 fbshipit-source-id: e2fa55c002d7124b13450c812165d244b8a53f4e
1 parent 20bd8b3 commit 7e09505

File tree

2 files changed

+8
-4
lines changed

2 files changed

+8
-4
lines changed

pytorch3d/csrc/pulsar/pytorch/tensor_util.cpp

+3-2
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88

99
#ifdef WITH_CUDA
1010
#include <ATen/cuda/CUDAContext.h>
11+
#include <c10/cuda/CUDAException.h>
1112
#include <cuda_runtime_api.h>
1213
#endif
1314
#include <torch/extension.h>
@@ -33,13 +34,13 @@ torch::Tensor sphere_ids_from_result_info_nograd(
3334
.contiguous();
3435
if (forw_info.device().type() == c10::DeviceType::CUDA) {
3536
#ifdef WITH_CUDA
36-
cudaMemcpyAsync(
37+
C10_CUDA_CHECK(cudaMemcpyAsync(
3738
result.data_ptr(),
3839
tmp.data_ptr(),
3940
sizeof(uint32_t) * tmp.size(0) * tmp.size(1) * tmp.size(2) *
4041
tmp.size(3),
4142
cudaMemcpyDeviceToDevice,
42-
at::cuda::getCurrentCUDAStream());
43+
at::cuda::getCurrentCUDAStream()));
4344
#else
4445
throw std::runtime_error(
4546
"Copy on CUDA device initiated but built "

pytorch3d/csrc/pulsar/pytorch/util.cpp

+5-2
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
*/
88

99
#ifdef WITH_CUDA
10+
#include <c10/cuda/CUDAException.h>
1011
#include <cuda_runtime_api.h>
1112

1213
namespace pulsar {
@@ -17,15 +18,17 @@ void cudaDevToDev(
1718
const void* src,
1819
const int& size,
1920
const cudaStream_t& stream) {
20-
cudaMemcpyAsync(trg, src, size, cudaMemcpyDeviceToDevice, stream);
21+
C10_CUDA_CHECK(
22+
cudaMemcpyAsync(trg, src, size, cudaMemcpyDeviceToDevice, stream));
2123
}
2224

2325
void cudaDevToHost(
2426
void* trg,
2527
const void* src,
2628
const int& size,
2729
const cudaStream_t& stream) {
28-
cudaMemcpyAsync(trg, src, size, cudaMemcpyDeviceToHost, stream);
30+
C10_CUDA_CHECK(
31+
cudaMemcpyAsync(trg, src, size, cudaMemcpyDeviceToHost, stream));
2932
}
3033

3134
} // namespace pytorch

0 commit comments

Comments
 (0)