Implemented logistic regression in python (naive version without sequence) and added to test

This commit is contained in:
Alejandro Saucedo
2020-11-08 15:38:18 +00:00
parent 65b52f3023
commit 9af9cb7a50

View File

@@ -1,4 +1,7 @@
from pyshader import python2shader, f32, ivec3, Array
from pyshader.stdlib import exp, log
from kp import Tensor, Manager, Sequence
def test_opmult():
@@ -98,14 +101,13 @@ def test_sequence():
seq.eval()
assert tensor_out.data() == [2.0, 4.0, 6.0]
def test_pyshader_generated():
from pyshader import python2shader, f32, ivec3, Array
def test_pyshader_pyshader():
@python2shader
def compute_shader_multiply(index: ("input", "GlobalInvocationId", ivec3),
data1: ("buffer", 0, Array(f32)),
data2: ("buffer", 1, Array(f32)),
data3: ("buffer", 2, Array(f32))):
def compute_shader_multiply(index=("input", "GlobalInvocationId", ivec3),
data1=("buffer", 0, Array(f32)),
data2=("buffer", 1, Array(f32)),
data3=("buffer", 2, Array(f32))):
i = index.x
data3[i] = data1[i] * data2[i]
@@ -121,5 +123,100 @@ def test_pyshader_generated():
assert tensor_out.data() == [2.0, 4.0, 6.0]
def test_logistic_regression_pyshader():
@python2shader
def compute_shader(
index = ("input", "GlobalInvocationId", ivec3),
x_i = ("buffer", 0, Array(f32)),
x_j = ("buffer", 1, Array(f32)),
y = ("buffer", 2, Array(f32)),
w_in = ("buffer", 3, Array(f32)),
w_out_i = ("buffer", 4, Array(f32)),
w_out_j = ("buffer", 5, Array(f32)),
b_in = ("buffer", 6, Array(f32)),
b_out = ("buffer", 7, Array(f32)),
l_out = ("buffer", 8, Array(f32)),
M = ("buffer", 9, Array(f32))):
i = index.x
m = M[0]
w_curr = vec2(w_in[0], w_in[1])
b_curr = b_in[0]
x_curr = vec2(x_i[i], x_j[i])
y_curr = y[i]
z_dot = w_curr @ x_curr
z = z_dot + b_curr
y_hat = 1.0 / (1.0 + exp(-z))
d_z = y_hat - y_curr
d_w = (1.0 / m) * x_curr * d_z
d_b = (1.0 / m) * d_z
loss = -((y_curr * log(y_hat)) + ((1.0 + y_curr) * log(1.0 - y_hat)))
w_out_i[i] = d_w.x
w_out_j[i] = d_w.y
b_out[i] = d_b
l_out[i] = loss
# First we create input and ouput tensors for shader
tensor_x_i = Tensor([0.0, 1.0, 1.0, 1.0, 1.0])
tensor_x_j = Tensor([0.0, 0.0, 0.0, 1.0, 1.0])
tensor_y = Tensor([0.0, 0.0, 0.0, 1.0, 1.0])
tensor_w_in = Tensor([0.001, 0.001])
tensor_w_out_i = Tensor([0.0, 0.0, 0.0, 0.0, 0.0])
tensor_w_out_j = Tensor([0.0, 0.0, 0.0, 0.0, 0.0])
tensor_b_in = Tensor([0.0])
tensor_b_out = Tensor([0.0, 0.0, 0.0, 0.0, 0.0])
tensor_l_out = Tensor([0.0, 0.0, 0.0, 0.0, 0.0])
tensor_m = Tensor([ 5.0 ])
# We store them in an array for easier interaction
params = [tensor_x_i, tensor_x_j, tensor_y, tensor_w_in, tensor_w_out_i,
tensor_w_out_j, tensor_b_in, tensor_b_out, tensor_l_out, tensor_m]
mgr = Manager()
mgr.eval_tensor_create_def(params)
ITERATIONS = 100
learning_rate = 0.1
# Perform machine learning training and inference across all input X and Y
for i_iter in range(ITERATIONS):
mgr.eval_tensor_sync_device_def([tensor_w_in, tensor_b_in])
mgr.eval_algo_data_def(params, compute_shader.to_spirv())
mgr.eval_tensor_sync_local_def([tensor_w_out_i, tensor_w_out_j, tensor_b_out, tensor_l_out])
# Calculate the parameters based on the respective derivatives calculated
w_in_i_val = tensor_w_in.data()[0]
w_in_j_val = tensor_w_in.data()[1]
b_in_val = tensor_b_in.data()[0]
for j_iter in range(tensor_b_out.size()):
w_in_i_val -= learning_rate * tensor_w_out_i.data()[j_iter]
w_in_j_val -= learning_rate * tensor_w_out_j.data()[j_iter]
b_in_val -= learning_rate * tensor_b_out.data()[j_iter]
# Update the parameters to process inference again
tensor_w_in.set_data([w_in_i_val, w_in_j_val])
tensor_b_in.set_data([b_in_val])
assert tensor_w_in.data()[0] < 0.01
assert tensor_w_in.data()[0] > 0.0
assert tensor_w_in.data()[1] > 1.5
assert tensor_b_in.data()[0] < 0.7
if __name__ == "__main__":
test_sequence()