Updated python

This commit is contained in:
Alejandro Saucedo
2021-02-08 07:29:34 +00:00
parent d24dfb7590
commit bf401019c9
2 changed files with 13 additions and 13 deletions

View File

@@ -513,10 +513,10 @@ function not in the record function.)doc";
static const char *__doc_kp_OpTensorSyncDevice =
R"doc(Operation that syncs tensor's device by mapping local data into the
device memory. For TensorTypes::eDevice it will use a staging tensor
to perform the copy. For TensorTypes::eStaging it will only copy the
to perform the copy. For TensorTypes::eHost it will only copy the
data and perform a map, which will be executed during the record (as
opposed to during the sequence eval/submit). This function cannot be
carried out for TensorTypes::eStaging.)doc";
carried out for TensorTypes::eHost.)doc";
static const char *__doc_kp_OpTensorSyncDevice_OpTensorSyncDevice = R"doc()doc";
@@ -533,7 +533,7 @@ queues @param device Vulkan logical device for passing to Algorithm
static const char *__doc_kp_OpTensorSyncDevice_init =
R"doc(Performs basic checks such as ensuring that there is at least one
tensor provided, that they are initialized and that they are not of
type TensorTpes::eStaging. For staging tensors in host memory, the map
type TensorTpes::eHost. For staging tensors in host memory, the map
is performed during the init function.)doc";
static const char *__doc_kp_OpTensorSyncDevice_mStagingTensors = R"doc()doc";
@@ -549,11 +549,11 @@ from the temporary staging tensor.)doc";
static const char *__doc_kp_OpTensorSyncLocal =
R"doc(Operation that syncs tensor's local data by mapping the data from
device memory into the local vector. For TensorTypes::eDevice it will
use a staging tensor to perform the copy. For TensorTypes::eStaging it
use a staging tensor to perform the copy. For TensorTypes::eHost it
will only copy the data and perform a map, which will be executed
during the postSubmit (there will be no copy during the sequence
eval/submit). This function cannot be carried out for
TensorTypes::eStaging.)doc";
TensorTypes::eHost.)doc";
static const char *__doc_kp_OpTensorSyncLocal_OpTensorSyncLocal = R"doc()doc";
@@ -570,7 +570,7 @@ queues @param device Vulkan logical device for passing to Algorithm
static const char *__doc_kp_OpTensorSyncLocal_init =
R"doc(Performs basic checks such as ensuring that there is at least one
tensor provided, that they are initialized and that they are not of
type TensorTpes::eStaging.)doc";
type TensorTpes::eHost.)doc";
static const char *__doc_kp_OpTensorSyncLocal_mStagingTensors = R"doc()doc";
@@ -719,7 +719,7 @@ shader storage).)doc";
static const char *__doc_kp_Tensor_TensorTypes_eDevice = R"doc(< Type is device memory, source and destination)doc";
static const char *__doc_kp_Tensor_TensorTypes_eStaging = R"doc(< Type is host memory, source and destination)doc";
static const char *__doc_kp_Tensor_TensorTypes_eHost = R"doc(< Type is host memory, source and destination)doc";
static const char *__doc_kp_Tensor_TensorTypes_eStorage = R"doc(< Type is Device memory (only))doc";

View File

@@ -26,7 +26,7 @@ PYBIND11_MODULE(kp, m) {
py::enum_<kp::Tensor::TensorTypes>(m, "TensorTypes", DOC(kp, Tensor, TensorTypes))
.value("device", kp::Tensor::TensorTypes::eDevice, "Tensor holding data in GPU memory.")
.value("staging", kp::Tensor::TensorTypes::eStaging, "Tensor used for transfer of data to device.")
.value("host", kp::Tensor::TensorTypes::eHost, "Tensor used for CPU visible GPU data.")
.value("storage", kp::Tensor::TensorTypes::eStorage, "Tensor with host visible gpu memory.")
.export_values();
@@ -112,7 +112,7 @@ PYBIND11_MODULE(kp, m) {
.def("record_tensor_sync_device", &kp::Sequence::record<kp::OpTensorSyncDevice>,
"Records operation to sync tensor from local memory to GPU memory")
.def("record_tensor_sync_local", &kp::Sequence::record<kp::OpTensorSyncLocal>,
"Records operation to sync tensor(s) from GPU memory to local memory using staging tensors")
"Records operation to sync tensor(s) from GPU memory to local memory")
.def("record_algo_mult", &kp::Sequence::record<kp::OpMult>,
"Records operation to run multiplication compute shader to two input tensors and an output tensor")
.def("record_algo_file", [](kp::Sequence &self,
@@ -179,7 +179,7 @@ PYBIND11_MODULE(kp, m) {
.def("eval_tensor_sync_device_def", &kp::Manager::evalOpDefault<kp::OpTensorSyncDevice>,
"Evaluates operation to sync tensor from local memory to GPU memory with new anonymous Sequence")
.def("eval_tensor_sync_local_def", &kp::Manager::evalOpDefault<kp::OpTensorSyncLocal>,
"Evaluates operation to sync tensor(s) from GPU memory to local memory using staging tensors with new anonymous Sequence")
"Evaluates operation to sync tensor(s) from GPU memory to local memory with new anonymous Sequence")
.def("eval_algo_mult_def", &kp::Manager::evalOpDefault<kp::OpMult>,
"Evaluates operation to run multiplication compute shader to two input tensors and an output tensor with new anonymous Sequence")
.def("eval_algo_file_def", &kp::Manager::evalOpDefault<kp::OpAlgoBase, std::string>,
@@ -216,7 +216,7 @@ PYBIND11_MODULE(kp, m) {
.def("eval_tensor_sync_device", &kp::Manager::evalOp<kp::OpTensorSyncDevice>,
"Evaluates operation to sync tensor from local memory to GPU memory with explicitly named Sequence")
.def("eval_tensor_sync_local", &kp::Manager::evalOp<kp::OpTensorSyncLocal>,
"Evaluates operation to sync tensor(s) from GPU memory to local memory using staging tensors with explicitly named Sequence")
"Evaluates operation to sync tensor(s) from GPU memory to local memory with explicitly named Sequence")
.def("eval_algo_mult", &kp::Manager::evalOp<kp::OpMult>,
"Evaluates operation to run multiplication compute shader to two input tensors and an output tensor with explicitly named Sequence")
.def("eval_algo_file", &kp::Manager::evalOp<kp::OpAlgoBase, std::string>,
@@ -256,7 +256,7 @@ PYBIND11_MODULE(kp, m) {
.def("eval_async_tensor_sync_device_def", &kp::Manager::evalOpAsyncDefault<kp::OpTensorSyncDevice>,
"Evaluates asynchronously operation to sync tensor from local memory to GPU memory with anonymous Sequence")
.def("eval_async_tensor_sync_local_def", &kp::Manager::evalOpAsyncDefault<kp::OpTensorSyncLocal>,
"Evaluates asynchronously operation to sync tensor(s) from GPU memory to local memory using staging tensors with anonymous Sequence")
"Evaluates asynchronously operation to sync tensor(s) from GPU memory to local memory with anonymous Sequence")
.def("eval_async_algo_mult_def", &kp::Manager::evalOpAsyncDefault<kp::OpMult>,
"Evaluates asynchronously operation to run multiplication compute shader to two input tensors and an output tensor with anonymous Sequence")
.def("eval_async_algo_file_def", &kp::Manager::evalOpAsyncDefault<kp::OpAlgoBase, std::string>,
@@ -293,7 +293,7 @@ PYBIND11_MODULE(kp, m) {
.def("eval_async_tensor_sync_device", &kp::Manager::evalOpAsync<kp::OpTensorSyncDevice>,
"Evaluates asynchronously operation to sync tensor from local memory to GPU memory with explicitly named Sequence")
.def("eval_async_tensor_sync_local", &kp::Manager::evalOpAsync<kp::OpTensorSyncLocal>,
"Evaluates asynchronously operation to sync tensor(s) from GPU memory to local memory using staging tensors with explicitly named Sequence")
"Evaluates asynchronously operation to sync tensor(s) from GPU memory to local memory with explicitly named Sequence")
.def("eval_async_algo_mult", &kp::Manager::evalOpAsync<kp::OpMult>,
"Evaluates asynchronously operation to run multiplication compute shader to two input tensors and an output tensor with explicitly named Sequence")
.def("eval_async_algo_file", &kp::Manager::evalOpAsync<kp::OpAlgoBase, std::string>,