diff --git a/test/xpu/skip_list_common.py b/test/xpu/skip_list_common.py index 5e70eebf4..6f8367e58 100644 --- a/test/xpu/skip_list_common.py +++ b/test/xpu/skip_list_common.py @@ -24,6 +24,7 @@ # This case is marked as skip but XPU failed. However, CUDA and XPU throw the same runtime error. "test_out_histc_xpu_float32", # Data type is not supported in oneDNN! + # https://github.com/intel/torch-xpu-ops/issues/1896 "test_dtypes_nn_functional_conv1d_xpu", "test_dtypes_nn_functional_conv2d_xpu", "test_dtypes_nn_functional_conv3d_xpu", @@ -99,7 +100,6 @@ # unexpected success because of cpu fallback # Linalg OPs not supported "test_out_triangular_solve_xpu_float32", - # Newly added: # Cuda skipped it "test_non_standard_bool_values_sort_xpu_bool", # The implementation aligns with CUDA, RuntimeError: "sort" not implemented for 'Bool'. # Cuda XFAIL (stock pytorch commit: e7cf7d0) @@ -1414,8 +1414,7 @@ "test_gemm_bias_tunableop_xpu_bfloat16", "test_scaled_gemm_tunableop_xpu_float8_e4m3fnuz", "test_scaled_gemm_tunableop_xpu_float8_e5m2fnuz", - # CUDA bias cases added in latest PyTorch - # AttributeError: module 'torch._C' has no attribute '_cuda_tunableop_enable' + # https://github.com/intel/torch-xpu-ops/issues/2066 "test_matmul_check_entries_tunableop_xpu_float16", "test_minimum_tuning_iteration_tunableop_xpu_float16", "test_validator_tunableop_rocm_xpu_float32", @@ -2968,8 +2967,6 @@ "test_meta_outplace_vdot_xpu_complex", # Unexpected success: "test_dispatch_symbolic_meta_outplace_all_strides_narrow_copy_xpu_float32", - # New added case in 2.7 - "test_nonzero_xpu", # https://github.com/intel/torch-xpu-ops/issues/1569 # RuntimeError: output 0: meta disagrees with real impl "test_dispatch_meta_outplace_norm_fro_xpu_bfloat16",