mirror of
https://github.com/NVIDIA/open-gpu-kernel-modules.git
synced 2026-02-20 15:03:58 +00:00
535.43.08
This commit is contained in:
@@ -126,6 +126,19 @@ static void nvUvmFreeSafeStack(nvidia_stack_t *sp)
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
}
|
||||
|
||||
static NV_STATUS nvUvmDestroyFaultInfoAndStacks(nvidia_stack_t *sp,
|
||||
uvmGpuDeviceHandle device,
|
||||
UvmGpuFaultInfo *pFaultInfo)
|
||||
{
|
||||
nv_kmem_cache_free_stack(pFaultInfo->replayable.cslCtx.nvidia_stack);
|
||||
nv_kmem_cache_free_stack(pFaultInfo->nonReplayable.isr_bh_sp);
|
||||
nv_kmem_cache_free_stack(pFaultInfo->nonReplayable.isr_sp);
|
||||
|
||||
return rm_gpu_ops_destroy_fault_info(sp,
|
||||
(gpuDeviceHandle)device,
|
||||
pFaultInfo);
|
||||
}
|
||||
|
||||
NV_STATUS nvUvmInterfaceRegisterGpu(const NvProcessorUuid *gpuUuid, UvmGpuPlatformInfo *gpuInfo)
|
||||
{
|
||||
nvidia_stack_t *sp = NULL;
|
||||
@@ -196,7 +209,7 @@ NV_STATUS nvUvmInterfaceSessionCreate(uvmGpuSessionHandle *session,
|
||||
memset(platformInfo, 0, sizeof(*platformInfo));
|
||||
platformInfo->atsSupported = nv_ats_supported;
|
||||
|
||||
platformInfo->sevEnabled = os_sev_enabled;
|
||||
platformInfo->sevEnabled = os_cc_enabled;
|
||||
|
||||
status = rm_gpu_ops_create_session(sp, (gpuSessionHandle *)session);
|
||||
|
||||
@@ -855,6 +868,7 @@ NV_STATUS nvUvmInterfaceInitFaultInfo(uvmGpuDeviceHandle device,
|
||||
{
|
||||
nvidia_stack_t *sp = NULL;
|
||||
NV_STATUS status;
|
||||
int err;
|
||||
|
||||
if (nv_kmem_cache_alloc_stack(&sp) != 0)
|
||||
{
|
||||
@@ -864,36 +878,48 @@ NV_STATUS nvUvmInterfaceInitFaultInfo(uvmGpuDeviceHandle device,
|
||||
status = rm_gpu_ops_init_fault_info(sp,
|
||||
(gpuDeviceHandle)device,
|
||||
pFaultInfo);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto done;
|
||||
}
|
||||
|
||||
// Preallocate a stack for functions called from ISR top half
|
||||
pFaultInfo->nonReplayable.isr_sp = NULL;
|
||||
pFaultInfo->nonReplayable.isr_bh_sp = NULL;
|
||||
if (status == NV_OK)
|
||||
pFaultInfo->replayable.cslCtx.nvidia_stack = NULL;
|
||||
|
||||
// NOTE: nv_kmem_cache_alloc_stack does not allocate a stack on PPC.
|
||||
// Therefore, the pointer can be NULL on success. Always use the
|
||||
// returned error code to determine if the operation was successful.
|
||||
err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->nonReplayable.isr_sp);
|
||||
if (err)
|
||||
{
|
||||
// NOTE: nv_kmem_cache_alloc_stack does not allocate a stack on PPC.
|
||||
// Therefore, the pointer can be NULL on success. Always use the
|
||||
// returned error code to determine if the operation was successful.
|
||||
int err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->nonReplayable.isr_sp);
|
||||
if (!err)
|
||||
{
|
||||
err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->nonReplayable.isr_bh_sp);
|
||||
if (err)
|
||||
{
|
||||
nv_kmem_cache_free_stack(pFaultInfo->nonReplayable.isr_sp);
|
||||
pFaultInfo->nonReplayable.isr_sp = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (err)
|
||||
{
|
||||
rm_gpu_ops_destroy_fault_info(sp,
|
||||
(gpuDeviceHandle)device,
|
||||
pFaultInfo);
|
||||
|
||||
status = NV_ERR_NO_MEMORY;
|
||||
}
|
||||
goto error;
|
||||
}
|
||||
|
||||
err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->nonReplayable.isr_bh_sp);
|
||||
if (err)
|
||||
{
|
||||
goto error;
|
||||
}
|
||||
|
||||
// The cslCtx.ctx pointer is not NULL only when ConfidentialComputing is enabled.
|
||||
if (pFaultInfo->replayable.cslCtx.ctx != NULL)
|
||||
{
|
||||
err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->replayable.cslCtx.nvidia_stack);
|
||||
if (err)
|
||||
{
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
goto done;
|
||||
|
||||
error:
|
||||
nvUvmDestroyFaultInfoAndStacks(sp,
|
||||
device,
|
||||
pFaultInfo);
|
||||
status = NV_ERR_NO_MEMORY;
|
||||
done:
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
return status;
|
||||
}
|
||||
@@ -949,23 +975,9 @@ NV_STATUS nvUvmInterfaceDestroyFaultInfo(uvmGpuDeviceHandle device,
|
||||
nvidia_stack_t *sp = nvUvmGetSafeStack();
|
||||
NV_STATUS status;
|
||||
|
||||
// Free the preallocated stack for functions called from ISR
|
||||
if (pFaultInfo->nonReplayable.isr_sp != NULL)
|
||||
{
|
||||
nv_kmem_cache_free_stack((nvidia_stack_t *)pFaultInfo->nonReplayable.isr_sp);
|
||||
pFaultInfo->nonReplayable.isr_sp = NULL;
|
||||
}
|
||||
|
||||
if (pFaultInfo->nonReplayable.isr_bh_sp != NULL)
|
||||
{
|
||||
nv_kmem_cache_free_stack((nvidia_stack_t *)pFaultInfo->nonReplayable.isr_bh_sp);
|
||||
pFaultInfo->nonReplayable.isr_bh_sp = NULL;
|
||||
}
|
||||
|
||||
status = rm_gpu_ops_destroy_fault_info(sp,
|
||||
(gpuDeviceHandle)device,
|
||||
pFaultInfo);
|
||||
|
||||
status = nvUvmDestroyFaultInfoAndStacks(sp,
|
||||
device,
|
||||
pFaultInfo);
|
||||
nvUvmFreeSafeStack(sp);
|
||||
return status;
|
||||
}
|
||||
@@ -1504,44 +1516,18 @@ void nvUvmInterfaceDeinitCslContext(UvmCslContext *uvmCslContext)
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceDeinitCslContext);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslLogDeviceEncryption(UvmCslContext *uvmCslContext,
|
||||
UvmCslIv *decryptIv)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
|
||||
status = rm_gpu_ops_ccsl_log_device_encryption(sp, uvmCslContext->ctx, (NvU8 *)decryptIv);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslLogDeviceEncryption);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext,
|
||||
UvmCslDirection direction)
|
||||
UvmCslOperation operation)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
|
||||
status = rm_gpu_ops_ccsl_rotate_iv(sp, uvmCslContext->ctx, direction);
|
||||
status = rm_gpu_ops_ccsl_rotate_iv(sp, uvmCslContext->ctx, operation);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslRotateIv);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslAcquireEncryptionIv(UvmCslContext *uvmCslContext,
|
||||
UvmCslIv *encryptIv)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
|
||||
BUILD_BUG_ON(NV_OFFSETOF(UvmCslIv, fresh) != sizeof(encryptIv->iv));
|
||||
|
||||
status = rm_gpu_ops_ccsl_acquire_encryption_iv(sp, uvmCslContext->ctx, (NvU8*)encryptIv);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslAcquireEncryptionIv);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslEncrypt(UvmCslContext *uvmCslContext,
|
||||
NvU32 bufferSize,
|
||||
NvU8 const *inputBuffer,
|
||||
@@ -1566,6 +1552,8 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
|
||||
NvU8 const *inputBuffer,
|
||||
UvmCslIv const *decryptIv,
|
||||
NvU8 *outputBuffer,
|
||||
NvU8 const *addAuthData,
|
||||
NvU32 addAuthDataSize,
|
||||
NvU8 const *authTagBuffer)
|
||||
{
|
||||
NV_STATUS status;
|
||||
@@ -1577,6 +1565,8 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
|
||||
inputBuffer,
|
||||
(NvU8 *)decryptIv,
|
||||
outputBuffer,
|
||||
addAuthData,
|
||||
addAuthDataSize,
|
||||
authTagBuffer);
|
||||
|
||||
return status;
|
||||
@@ -1598,18 +1588,32 @@ NV_STATUS nvUvmInterfaceCslSign(UvmCslContext *uvmCslContext,
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslSign);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslQueryMessagePool(UvmCslContext *uvmCslContext,
|
||||
UvmCslDirection direction,
|
||||
UvmCslOperation operation,
|
||||
NvU64 *messageNum)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
|
||||
status = rm_gpu_ops_ccsl_query_message_pool(sp, uvmCslContext->ctx, direction, messageNum);
|
||||
status = rm_gpu_ops_ccsl_query_message_pool(sp, uvmCslContext->ctx, operation, messageNum);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslQueryMessagePool);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslIncrementIv(UvmCslContext *uvmCslContext,
|
||||
UvmCslOperation operation,
|
||||
NvU64 increment,
|
||||
UvmCslIv *iv)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
|
||||
status = rm_gpu_ops_ccsl_increment_iv(sp, uvmCslContext->ctx, operation, increment, (NvU8 *)iv);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslIncrementIv);
|
||||
|
||||
#else // NV_UVM_ENABLE
|
||||
|
||||
NV_STATUS nv_uvm_suspend(void)
|
||||
|
||||
Reference in New Issue
Block a user