#include "gtest/gtest.h" //#include #include "kompute/Kompute.hpp" TEST(TestLogisticRegressionAlgorithm, TestMainLogisticRegression) { uint32_t ITERATIONS = 100; float learningRate = 0.1; std::shared_ptr xI{ new kp::Tensor({ 0, 1, 1, 1, 1 }) }; std::shared_ptr xJ{ new kp::Tensor({ 0, 0, 0, 1, 1 }) }; std::shared_ptr y{ new kp::Tensor({ 0, 0, 0, 1, 1 }) }; std::shared_ptr wIn{ new kp::Tensor({ 0.001, 0.001 }) }; std::shared_ptr wOutI{ new kp::Tensor({ 0, 0, 0, 0, 0 }) }; std::shared_ptr wOutJ{ new kp::Tensor({ 0, 0, 0, 0, 0 }) }; std::shared_ptr bIn{ new kp::Tensor({ 0 }) }; std::shared_ptr bOut{ new kp::Tensor({ 0, 0, 0, 0, 0 }) }; std::shared_ptr lOut{ new kp::Tensor({ 0, 0, 0, 0, 0 }) }; std::vector> params = { xI, xJ, y, wIn, wOutI, wOutJ, bIn, bOut, lOut }; { kp::Manager mgr; std::shared_ptr sqTensor = mgr.createManagedSequence(); sqTensor->begin(); sqTensor->record(params); sqTensor->end(); sqTensor->eval(); std::shared_ptr sq = mgr.createManagedSequence(); // Record op algo base sq->begin(); sq->record({ wIn, bIn }); sq->record( params, "test/shaders/glsl/test_logistic_regression.comp"); sq->record({ wOutI, wOutJ, bOut, lOut }); sq->end(); // Iterate across all expected iterations for (size_t i = 0; i < ITERATIONS; i++) { sq->eval(); for (size_t j = 0; j < bOut->size(); j++) { wIn->data()[0] -= learningRate * wOutI->data()[j]; wIn->data()[1] -= learningRate * wOutJ->data()[j]; bIn->data()[0] -= learningRate * bOut->data()[j]; } } } // Based on the inputs the outputs should be at least: // * wi < 0.01 // * wj > 1.0 // * b < 0 // TODO: Add EXPECT_DOUBLE_EQ instead EXPECT_LT(wIn->data()[0], 0.01); EXPECT_GT(wIn->data()[1], 1.0); EXPECT_LT(bIn->data()[0], 0.0); EXPECT_LT(bIn->data()[0], 0.0); //SPDLOG_WARN("Result wIn: {}, bIn: {}, loss: {}", // wIn->data(), // bIn->data(), // lOut->data()); } TEST(TestLogisticRegressionAlgorithm, TestMainLogisticRegressionManualCopy) { uint32_t ITERATIONS = 100; float learningRate = 0.1; std::vector wInVec = { 0.001, 0.001 }; std::vector bInVec = { 0 }; std::shared_ptr xI{ new kp::Tensor({ 0, 1, 1, 1, 1 }) }; std::shared_ptr xJ{ new kp::Tensor({ 0, 0, 0, 1, 1 }) }; std::shared_ptr y{ new kp::Tensor({ 0, 0, 0, 1, 1 }) }; std::shared_ptr wIn{ new kp::Tensor( wInVec, kp::Tensor::TensorTypes::eStaging) }; std::shared_ptr wOutI{ new kp::Tensor({ 0, 0, 0, 0, 0 }) }; std::shared_ptr wOutJ{ new kp::Tensor({ 0, 0, 0, 0, 0 }) }; std::shared_ptr bIn{ new kp::Tensor( bInVec, kp::Tensor::TensorTypes::eStaging) }; std::shared_ptr bOut{ new kp::Tensor({ 0, 0, 0, 0, 0 }) }; std::shared_ptr lOut{ new kp::Tensor({ 0, 0, 0, 0, 0 }) }; std::vector> params = { xI, xJ, y, wIn, wOutI, wOutJ, bIn, bOut, lOut }; { kp::Manager mgr; std::shared_ptr sqTensor = mgr.createManagedSequence(); sqTensor->begin(); sqTensor->record(params); sqTensor->end(); sqTensor->eval(); std::shared_ptr sq = mgr.createManagedSequence(); // Record op algo base sq->begin(); sq->record( params, "test/shaders/glsl/test_logistic_regression.comp"); sq->record({ wOutI, wOutJ, bOut, lOut }); sq->end(); // Iterate across all expected iterations for (size_t i = 0; i < ITERATIONS; i++) { sq->eval(); for (size_t j = 0; j < bOut->size(); j++) { wIn->data()[0] -= learningRate * wOutI->data()[j]; wIn->data()[1] -= learningRate * wOutJ->data()[j]; bIn->data()[0] -= learningRate * bOut->data()[j]; } wIn->mapDataIntoHostMemory(); bIn->mapDataIntoHostMemory(); } } // Based on the inputs the outputs should be at least: // * wi < 0.01 // * wj > 1.0 // * b < 0 // TODO: Add EXPECT_DOUBLE_EQ instead EXPECT_LT(wIn->data()[0], 0.01); EXPECT_GT(wIn->data()[1], 1.0); EXPECT_LT(bIn->data()[0], 0.0); //SPDLOG_WARN("Result wIn: {}, bIn: {}, loss: {}", // wIn->data(), // bIn->data(), // lOut->data()); }