Skip to content

Commit

Permalink
gpu: nvidia: skip unsupported gtests
Browse files Browse the repository at this point in the history
  • Loading branch information
sgeor255 committed Nov 12, 2024
1 parent 8935834 commit 2861953
Show file tree
Hide file tree
Showing 7 changed files with 17 additions and 9 deletions.
5 changes: 4 additions & 1 deletion examples/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,10 @@ if(DNNL_SYCL_CUDA)
${CMAKE_CURRENT_SOURCE_DIR}/primitives/lstm.cpp
${CMAKE_CURRENT_SOURCE_DIR}/primitives/layer_normalization.cpp
${CMAKE_CURRENT_SOURCE_DIR}/primitives/reorder.cpp
${CMAKE_CURRENT_SOURCE_DIR}/primitives/shuffle.cpp)
${CMAKE_CURRENT_SOURCE_DIR}/primitives/shuffle.cpp
${CMAKE_CURRENT_SOURCE_DIR}/primitives/group_normalization.cpp
${CMAKE_CURRENT_SOURCE_DIR}/primitives/vanilla_rnn.cpp
${CMAKE_CURRENT_SOURCE_DIR}/primitives/lbr_gru.cpp)
endif()

# Remove examples for Graph API if graph component is not enabled
Expand Down
2 changes: 1 addition & 1 deletion tests/gtests/sycl/api/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ if(DNNL_WITH_SYCL)
endif()

# Enable linking SYCL kernels.
if(DNNL_SYCL_CUDA OR (DNNL_SYCL_GENERIC AND NVIDIA_TARGET_SUPPORTED))
if(DNNL_SYCL_GENERIC AND NVIDIA_TARGET_SUPPORTED)
append(CMAKE_CXX_FLAGS "-fsycl-targets=nvptx64-nvidia-cuda")
append(CMAKE_CXX_FLAGS "-Wno-linker-warnings")
endif()
Expand Down
8 changes: 4 additions & 4 deletions tests/gtests/test_deconvolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -312,10 +312,10 @@ class deconvolution_test_t
auto aa = allows_attr_t {false};

#ifndef DNNL_SYCL_GENERIC
aa.po_binary = !is_nvidia_gpu(eng) && !is_amd_gpu(eng);
aa.po_eltwise = !is_nvidia_gpu(eng) && !is_amd_gpu(eng);
aa.po_prelu = !is_nvidia_gpu(eng) && !is_amd_gpu(eng);
aa.po_sum = !is_nvidia_gpu(eng) && !is_amd_gpu(eng);
aa.po_binary = !is_amd_gpu(eng);
aa.po_eltwise = !is_amd_gpu(eng);
aa.po_prelu = !is_amd_gpu(eng);
aa.po_sum = !is_amd_gpu(eng);
#else
aa.po_eltwise = true;
aa.po_sum = true;
Expand Down
2 changes: 2 additions & 0 deletions tests/gtests/test_group_normalization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@ class group_normalization_test_t

protected:
void SetUp() override {
SKIP_IF_CUDA(
true, "Group Normalization operator is not supported in CUDA");
SKIP_IF_HIP(
true, "Group Normalization operator is not supported in HIP");
SKIP_IF_GENERIC(true,
Expand Down
3 changes: 3 additions & 0 deletions tests/gtests/test_iface_attr_quantization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -432,6 +432,9 @@ TEST_F(attr_quantization_test_t, TestLRN) {
}

TEST_F(attr_quantization_test_t, TestMatmul) {
// cuDNN doesn't support zero points
SKIP_IF_CUDA(true, "Test not supported on cuda");

for (auto a_dt : {data_type::f32, data_type::u8}) {
const data_type b_dt
= a_dt == data_type::f32 ? data_type::f32 : data_type::s8;
Expand Down
4 changes: 2 additions & 2 deletions tests/gtests/test_matmul.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -260,9 +260,9 @@ class matmul_iface_test_t
auto matmul_pd = pd_t(eng, src_md, weights_md, bia_md, dst_md, attr);

auto aa = allows_attr_t {false};
aa.po_binary = !is_nvidia_gpu(eng) && !is_amd_gpu(eng);
aa.po_binary = !is_amd_gpu(eng);
aa.po_eltwise = true;
aa.po_prelu = !is_nvidia_gpu(eng) && !is_amd_gpu(eng);
aa.po_prelu = !is_amd_gpu(eng);
aa.po_sum = true;
// scales are not supported by HIP
aa.scales = !is_amd_gpu(eng);
Expand Down
2 changes: 1 addition & 1 deletion tests/gtests/test_softmax.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ class softmax_test_t : public ::testing::TestWithParam<softmax_test_params_t> {
: p.aprop_kind;

allows_attr_t aa {false};
if (!(is_nvidia_gpu(eng) || is_amd_gpu(eng))) {
if (!is_amd_gpu(eng)) {
aa.po_eltwise = true;
aa.po_binary = true;
}
Expand Down

0 comments on commit 2861953

Please sign in to comment.