Skip to content

Commit

Permalink
Updated example to map results
Browse files Browse the repository at this point in the history
Signed-off-by: Alejandro Saucedo <[email protected]>
  • Loading branch information
axsaucedo committed Nov 26, 2022
1 parent 29f6d8c commit 648ec50
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 22 deletions.
5 changes: 2 additions & 3 deletions examples/logistic_regression/shader/my_shader.comp
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#version 450

layout (constant_id = 0) const uint M = 0;
layout (constant_id = 0) const float m = 0;

layout (local_size_x = 1) in;

Expand All @@ -14,8 +14,6 @@ layout(set = 0, binding = 6) buffer bbin { float bin[]; };
layout(set = 0, binding = 7) buffer bbout { float bout[]; };
layout(set = 0, binding = 8) buffer blout { float lout[]; };

float m = float(M);

float sigmoid(float z) {
return 1.0 / (1.0 + exp(-z));
}
Expand Down Expand Up @@ -52,3 +50,4 @@ void main() {

lout[idx] = calculateLoss(yHat, yCurr);
}

44 changes: 25 additions & 19 deletions examples/logistic_regression/src/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,35 +21,41 @@ main()
std::shared_ptr<kp::TensorT<float>> y = mgr.tensor({ 0, 0, 0, 1, 1 });

std::shared_ptr<kp::TensorT<float>> wIn = mgr.tensor({ 0.001, 0.001 });
std::shared_ptr<kp::TensorT<float>> wOutI = mgr.tensor({ 0, 0, 0, 0, 0 });
std::shared_ptr<kp::TensorT<float>> wOutJ = mgr.tensor({ 0, 0, 0, 0, 0 });
std::shared_ptr<kp::TensorT<float>> wOutI =
mgr.tensor({ 0, 0, 0, 0, 0 });
std::shared_ptr<kp::TensorT<float>> wOutJ =
mgr.tensor({ 0, 0, 0, 0, 0 });

std::shared_ptr<kp::TensorT<float>> bIn = mgr.tensor({ 0 });
std::shared_ptr<kp::TensorT<float>> bOut = mgr.tensor({ 0, 0, 0, 0, 0 });
std::shared_ptr<kp::TensorT<float>> bOut =
mgr.tensor({ 0, 0, 0, 0, 0 });

std::shared_ptr<kp::TensorT<float>> lOut = mgr.tensor({ 0, 0, 0, 0, 0 });
std::shared_ptr<kp::TensorT<float>> lOut =
mgr.tensor({ 0, 0, 0, 0, 0 });

const std::vector<std::shared_ptr<kp::Tensor>> params = {
xI, xJ, y, wIn, wOutI, wOutJ, bIn, bOut, lOut
};
std::vector<std::shared_ptr<kp::Tensor>> params = { xI, xJ, y,
wIn, wOutI, wOutJ,
bIn, bOut, lOut };

const std::vector<uint32_t> shader = std::vector<uint32_t>(
shader::MY_SHADER_COMP_SPV.begin(), shader::MY_SHADER_COMP_SPV.end());
mgr.sequence()->eval<kp::OpTensorSyncDevice>(params);

std::shared_ptr<kp::Algorithm> algo = mgr.algorithm(
params, shader, kp::Workgroup({ 5 }), std::vector<float>({ 5.0 }));
std::vector<uint32_t> spirv2{ 0x1, 0x2 };

mgr.sequence()->eval<kp::OpTensorSyncDevice>(params);
std::vector<uint32_t> spirv(
shader::MY_SHADER_COMP_SPV.begin(),
shader::MY_SHADER_COMP_SPV.end());

std::shared_ptr<kp::Algorithm> algorithm = mgr.algorithm(
params, spirv, kp::Workgroup({ 5 }), std::vector<float>({ 5.0 }));

std::shared_ptr<kp::Sequence> sq =
mgr.sequence()
->record<kp::OpTensorSyncDevice>({ wIn, bIn })
->record<kp::OpAlgoDispatch>(algo)
->record<kp::OpAlgoDispatch>(algorithm)
->record<kp::OpTensorSyncLocal>({ wOutI, wOutJ, bOut, lOut });

// Iterate across all expected iterations
for (size_t i = 0; i < ITERATIONS; i++) {

sq->eval();

for (size_t j = 0; j < bOut->size(); j++) {
Expand All @@ -59,12 +65,12 @@ main()
}
}

std::cout << "RESULTS" << std::endl;
std::cout << "w1: " << wIn->data()[0] << std::endl;
std::cout << "w2: " << wIn->data()[1] << std::endl;
std::cout << "b: " << bIn->data()[0] << std::endl;
KP_LOG_WARN("Result wIn i: {}, wIn j: {}, bIn: {}",
wIn->data()[0],
wIn->data()[1],
bIn->data()[0]);

if (wIn->data()[0] > 0.0 ||
if (wIn->data()[0] > 0.01 ||
wIn->data()[1] < 1.0 ||
bIn->data()[0] > 0.0) {
throw std::runtime_error("Result does not match");
Expand Down

0 comments on commit 648ec50

Please sign in to comment.