diff --git a/python/src/main.cpp b/python/src/main.cpp index 59d16ab..5800914 100644 --- a/python/src/main.cpp +++ b/python/src/main.cpp @@ -39,10 +39,14 @@ PYBIND11_MODULE(kp, m) { return std::unique_ptr(new kp::Tensor(data, tensorTypes)); }), "Initialiser with list of data components and tensor GPU memory type.") .def("data", &kp::Tensor::data, DOC(kp, Tensor, data)) + .def("get", [](kp::Tensor &self, uint32_t index) -> float { return self.data()[index]; }, + "When only an index is necessary") + .def("set", [](kp::Tensor &self, uint32_t index, float value) { + self.data()[index] = value; }) + .def("set", &kp::Tensor::setData, "Overrides the data in the local Tensor memory.") .def("size", &kp::Tensor::size, "Retrieves the size of the Tensor data as per the local Tensor memory.") .def("tensor_type", &kp::Tensor::tensorType, "Retreves the memory type of the tensor.") .def("is_init", &kp::Tensor::isInit, "Checks whether the tensor GPU memory has been initialised.") - .def("set_data", &kp::Tensor::setData, "Overrides the data in the local Tensor memory.") .def("map_data_from_host", &kp::Tensor::mapDataFromHostMemory, "Maps data into GPU memory from tensor local data.") .def("map_data_into_host", &kp::Tensor::mapDataIntoHostMemory, "Maps data from GPU memory into tensor local data."); @@ -74,12 +78,12 @@ PYBIND11_MODULE(kp, m) { "Records an operation using a custom shader provided from a shader path") .def("record_algo_data", [](kp::Sequence &self, std::vector> tensors, - py::bytes &bytes) { + py::bytes &bytes) -> float { // Bytes have to be converted into std::vector py::buffer_info info(py::buffer(bytes).request()); const char *data = reinterpret_cast(info.ptr); size_t length = static_cast(info.size); - self.record( + return self.record( tensors, std::vector(data, data + length)); }, diff --git a/python/test/test_kompute.py b/python/test/test_kompute.py index 7d201f2..930b8d6 100644 --- a/python/test/test_kompute.py +++ b/python/test/test_kompute.py @@ -214,18 +214,10 @@ def test_logistic_regression_pyshader(): sq.eval() # Calculate the parameters based on the respective derivatives calculated - w_in_i_val = tensor_w_in.data()[0] - w_in_j_val = tensor_w_in.data()[1] - b_in_val = tensor_b_in.data()[0] - for j_iter in range(tensor_b_out.size()): - w_in_i_val -= learning_rate * tensor_w_out_i.data()[j_iter] - w_in_j_val -= learning_rate * tensor_w_out_j.data()[j_iter] - b_in_val -= learning_rate * tensor_b_out.data()[j_iter] - - # Update the parameters to process inference again - tensor_w_in.set_data([w_in_i_val, w_in_j_val]) - tensor_b_in.set_data([b_in_val]) + tensor_w_in.set(0, tensor_w_in.get(0) - learning_rate * tensor_w_out_i.data()[j_iter]) + tensor_w_in.set(1, tensor_w_in.get(1) - learning_rate * tensor_w_out_j.data()[j_iter]) + tensor_b_in.set(0, tensor_b_in.get(0) - learning_rate * tensor_b_out.data()[j_iter]) assert tensor_w_in.data()[0] < 0.01 assert tensor_w_in.data()[0] > 0.0