This repository was archived by the owner on Mar 28, 2023. It is now read-only.
forked from llvm/llvm-test-suite
-
Notifications
You must be signed in to change notification settings - Fork 131
[SYCL][Matrix]Add test for odd sizes #1523
Closed
Closed
Changes from all commits
Commits
Show all changes
4 commits
Select commit
Hold shift + click to select a range
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,25 @@ | ||
//==-------- joint_matrix_all_sizes.cpp - DPC++ joint_matrix---------------==// | ||
// | ||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||
// See https://llvm.org/LICENSE.txt for license information. | ||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||
// | ||
//===----------------------------------------------------------------------===// | ||
// REQUIRES: matrix | ||
|
||
// RUN: %clangxx -fsycl %s -o %t.out -DSYCL_EXT_ONEAPI_MATRIX_VERSION=4 | ||
// RUN: %CPU_RUN_PLACEHOLDER %t.out | ||
// RUN: %GPU_RUN_PLACEHOLDER %t.out | ||
|
||
// XFAIL: gpu | ||
|
||
#include <iostream> | ||
#include <sycl/sycl.hpp> | ||
|
||
using namespace sycl; | ||
using namespace sycl::ext::oneapi::experimental::matrix; | ||
using bfloat16 = sycl::ext::oneapi::bfloat16; | ||
|
||
#define SG_SZ 16 | ||
|
||
#include "joint_matrix_all_sizes_impl.hpp" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,195 @@ | ||
#define BF16_EPSILON 0.00781250 | ||
|
||
template <typename T, size_t NUM_ROWS, size_t NUM_COLS> struct big_matrix { | ||
private: | ||
T *mat; | ||
|
||
public: | ||
T *get_data() { return mat; } | ||
void set_data(T *data) { mat = data; } | ||
big_matrix(T *data) : mat(data) {} | ||
}; | ||
|
||
template <typename T> | ||
void matrix_vnni(unsigned int rows, unsigned int cols, T *src, T *dest, | ||
unsigned int vnniFactor) { | ||
for (unsigned int i = 0; i < rows / vnniFactor; i++) { | ||
for (unsigned int j = 0; j < cols; j++) { | ||
for (unsigned int k = 0; k < vnniFactor; k++) { | ||
dest[i * cols * vnniFactor + j * vnniFactor + k] = | ||
src[(i * vnniFactor + k) * cols + j]; | ||
} | ||
} | ||
} | ||
} | ||
|
||
template <typename T1, typename T2, size_t M, size_t N, size_t K, | ||
int vnniFactor, size_t TM, size_t TN, size_t TK> | ||
void matrix_multiply(big_matrix<T1, M, N> &C, big_matrix<T2, M, K> &A, | ||
big_matrix<T2, K / vnniFactor, N * vnniFactor> &B) { | ||
size_t NDRangeM = M / TM; | ||
size_t NDRangeN = N / TN; | ||
buffer<T2, 2> bufA(A.get_data(), range<2>(M, K)); | ||
buffer<T2, 2> bufB(B.get_data(), range<2>(K, N)); | ||
buffer<T1, 2> bufC(C.get_data(), range<2>(M, N)); | ||
|
||
queue q; | ||
q.submit([&](handler &cgh) { | ||
sycl::accessor accC{bufC, cgh, sycl::read_write}; | ||
sycl::accessor accA{bufA, cgh, sycl::read_only}; | ||
sycl::accessor accB{bufB, cgh, sycl::read_only}; | ||
|
||
cgh.parallel_for( | ||
nd_range<2>({NDRangeM, NDRangeN * SG_SZ}, {1, 1 * SG_SZ}), | ||
[=](nd_item<2> spmd_item) [[intel::reqd_sub_group_size(SG_SZ)]] | ||
|
||
{ | ||
// The submatrix API has to be accessed by all the workitems in a | ||
// subgroup these functions will be called once by the subgroup no | ||
// code divergence between the workitems | ||
const auto global_idx = spmd_item.get_global_id(0); | ||
const auto global_idy = spmd_item.get_global_id(1); | ||
const auto sg_startx = global_idx - spmd_item.get_local_id(0); | ||
const auto sg_starty = global_idy - spmd_item.get_local_id(1); | ||
|
||
sub_group sg = spmd_item.get_sub_group(); | ||
joint_matrix<sub_group, T2, use::a, TM, TK, layout::row_major> sub_a; | ||
// For B, we assume B has been already VNNIed. | ||
joint_matrix<sub_group, T2, use::b, TK, TN, | ||
ext::intel::experimental::matrix::layout::packed> | ||
sub_b; | ||
joint_matrix<sub_group, T1, use::accumulator, TM, TN> sub_c; | ||
|
||
joint_matrix_load(sg, sub_c, | ||
accC.get_pointer() + (sg_startx * TM) * N + | ||
sg_starty / SG_SZ * TN, | ||
N, layout::row_major); | ||
for (int k = 0; k < K / TK; k += 1) { | ||
joint_matrix_load( | ||
sg, sub_a, accA.get_pointer() + (sg_startx * TM) * K + k * TK, | ||
K); | ||
joint_matrix_load(sg, sub_b, | ||
accB.get_pointer() + | ||
(k * TK / vnniFactor) * (N * vnniFactor) + | ||
sg_starty / SG_SZ * TN * vnniFactor, | ||
N * vnniFactor); | ||
sub_c = joint_matrix_mad(sg, sub_a, sub_b, sub_c); | ||
} | ||
joint_matrix_store(sg, sub_c, | ||
accC.get_pointer() + (sg_startx * TM) * N + | ||
sg_starty / SG_SZ * TN, | ||
N, layout::row_major); | ||
}); // parallel for | ||
}).wait(); | ||
} | ||
|
||
static constexpr size_t MATRIX_M = 128; | ||
static constexpr size_t MATRIX_N = 128; | ||
static constexpr size_t MATRIX_K = 128; | ||
|
||
float make_fp32(bfloat16 x) { | ||
unsigned int y = *((int *)&x); | ||
y = y << 16; | ||
float *res = reinterpret_cast<float *>(&y); | ||
return *res; | ||
} | ||
|
||
template <typename Ta, typename Tc> | ||
void matrix_multiply_ref(Ta *A, Ta *B, Tc *C, int M, int N, int K) { | ||
for (int m = 0; m < M; m++) | ||
for (int n = 0; n < N; n++) { | ||
for (int k = 0; k < K; k++) { | ||
if (std::is_same_v<Ta, bfloat16> && std::is_same_v<Tc, float>) | ||
C[m * N + n] += make_fp32(A[m * K + k]) * make_fp32(B[k * N + n]); | ||
if (std::is_same_v<Ta, int8_t> && std::is_same_v<Tc, int32_t>) | ||
C[m * N + n] += A[m * K + k] * B[k * N + n]; | ||
} | ||
} | ||
} | ||
|
||
template <typename Ta, typename Tc, int vnni_factor, size_t tM, size_t tN, | ||
size_t tK> | ||
int init_and_multiply() { | ||
Ta A[MATRIX_M][MATRIX_K]; | ||
Ta B[MATRIX_K][MATRIX_N]; | ||
Ta Bvnni[MATRIX_K / vnni_factor][MATRIX_N * vnni_factor]; | ||
Tc C[MATRIX_M][MATRIX_N]; | ||
Tc D[MATRIX_M][MATRIX_N]; | ||
|
||
for (int i = 0; i < MATRIX_M; i++) { | ||
for (int j = 0; j < MATRIX_K; j++) { | ||
if (std::is_same_v<Ta, bfloat16> && std::is_same_v<Tc, float>) | ||
A[i][j] = bfloat16(1.0f * (i + j)); | ||
if (std::is_same_v<Ta, int8_t> && std::is_same_v<Tc, int32_t>) | ||
A[i][j] = i + j; | ||
} | ||
} | ||
for (int i = 0; i < MATRIX_K; i++) { | ||
for (int j = 0; j < MATRIX_N; j++) { | ||
if (std::is_same_v<Ta, bfloat16> && std::is_same_v<Tc, float>) | ||
B[i][j] = bfloat16(2.0f * i + 3.0f * j); | ||
if (std::is_same_v<Ta, int8_t> && std::is_same_v<Tc, int32_t>) | ||
B[i][j] = i + 2 * j; | ||
} | ||
} | ||
for (int i = 0; i < MATRIX_M; i++) { | ||
for (int j = 0; j < MATRIX_N; j++) { | ||
C[i][j] = 1; | ||
D[i][j] = 1; | ||
} | ||
} | ||
|
||
big_matrix<Tc, MATRIX_M, MATRIX_N> MC((Tc *)&C); | ||
big_matrix<Tc, MATRIX_M, MATRIX_N> MD((Tc *)&D); | ||
big_matrix<Ta, MATRIX_M, MATRIX_K> MA((Ta *)&A); | ||
matrix_vnni<Ta>(MATRIX_K, MATRIX_N, (Ta *)&B, (Ta *)&Bvnni, vnni_factor); | ||
big_matrix<Ta, MATRIX_K / vnni_factor, MATRIX_N * vnni_factor> MBvnni( | ||
(Ta *)&Bvnni); | ||
|
||
matrix_multiply<Tc, Ta, MATRIX_M, MATRIX_N, MATRIX_K, vnni_factor, tM, tN, | ||
tK>(MC, MA, MBvnni); | ||
matrix_multiply_ref((Ta *)A, (Ta *)B, (Tc *)D, MATRIX_M, MATRIX_N, MATRIX_K); | ||
|
||
bool res = true; | ||
for (int i = 0; i < MATRIX_M; i++) { | ||
for (int j = 0; j < MATRIX_N; j++) { | ||
if constexpr (std::is_same_v<Ta, bfloat16> && std::is_same_v<Tc, float>) { | ||
if (fabs(C[i][j] - D[i][j]) > BF16_EPSILON) { | ||
std::cout << (res ? "passed" : "failed bfloat ") << C[i][j] | ||
<< " D is " << D[i][j] << std::endl; | ||
res = false; | ||
} | ||
} else if (std::is_same_v<Ta, int8_t> && std::is_same_v<Tc, int32_t>) { | ||
if (C[i][j] != D[i][j]) { | ||
std::cout << (res ? "passed" : "failed") << C[i][j] << " D is " | ||
<< D[i][j] << std::endl; | ||
|
||
res = false; | ||
} | ||
} | ||
} | ||
} | ||
std::cout << (res ? "passed" : "failed") << std::endl; | ||
return !res; | ||
} | ||
|
||
int main() { | ||
init_and_multiply<bfloat16, float, 2, 1, 16, 16>(); | ||
init_and_multiply<bfloat16, float, 2, 2, 16, 16>(); | ||
init_and_multiply<bfloat16, float, 2, 3, 16, 16>(); | ||
init_and_multiply<bfloat16, float, 2, 4, 16, 16>(); | ||
init_and_multiply<bfloat16, float, 2, 5, 16, 16>(); | ||
init_and_multiply<bfloat16, float, 2, 6, 16, 16>(); | ||
init_and_multiply<bfloat16, float, 2, 7, 16, 16>(); | ||
init_and_multiply<bfloat16, float, 2, 8, 16, 16>(); | ||
|
||
init_and_multiply<int8_t, int32_t, 4, 1, 16, 32>(); | ||
init_and_multiply<int8_t, int32_t, 4, 2, 16, 32>(); | ||
init_and_multiply<int8_t, int32_t, 4, 3, 16, 32>(); | ||
init_and_multiply<int8_t, int32_t, 4, 4, 16, 32>(); | ||
init_and_multiply<int8_t, int32_t, 4, 5, 16, 32>(); | ||
init_and_multiply<int8_t, int32_t, 4, 6, 16, 32>(); | ||
init_and_multiply<int8_t, int32_t, 4, 7, 16, 32>(); | ||
init_and_multiply<int8_t, int32_t, 4, 8, 16, 32>(); | ||
return 0; | ||
} |
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
could you please clarify what is the reason for xfail on gpu? Is there lack of support?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, there is lack of support of these sizes on the GPU