mirror of
https://github.com/NVIDIA/open-gpu-kernel-modules.git
synced 2026-02-07 08:39:58 +00:00
535.43.02
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -435,7 +435,7 @@ EXPORT_SYMBOL(nvUvmInterfacePmaUnregisterEvictionCallbacks);
|
||||
|
||||
NV_STATUS nvUvmInterfacePmaAllocPages(void *pPma,
|
||||
NvLength pageCount,
|
||||
NvU32 pageSize,
|
||||
NvU64 pageSize,
|
||||
UvmPmaAllocationOptions *pPmaAllocOptions,
|
||||
NvU64 *pPages)
|
||||
{
|
||||
@@ -462,7 +462,7 @@ EXPORT_SYMBOL(nvUvmInterfacePmaAllocPages);
|
||||
NV_STATUS nvUvmInterfacePmaPinPages(void *pPma,
|
||||
NvU64 *pPages,
|
||||
NvLength pageCount,
|
||||
NvU32 pageSize,
|
||||
NvU64 pageSize,
|
||||
NvU32 flags)
|
||||
{
|
||||
nvidia_stack_t *sp = NULL;
|
||||
@@ -483,7 +483,7 @@ EXPORT_SYMBOL(nvUvmInterfacePmaPinPages);
|
||||
NV_STATUS nvUvmInterfacePmaUnpinPages(void *pPma,
|
||||
NvU64 *pPages,
|
||||
NvLength pageCount,
|
||||
NvU32 pageSize)
|
||||
NvU64 pageSize)
|
||||
{
|
||||
nvidia_stack_t *sp = NULL;
|
||||
NV_STATUS status;
|
||||
@@ -516,7 +516,7 @@ EXPORT_SYMBOL(nvUvmInterfaceMemoryFree);
|
||||
void nvUvmInterfacePmaFreePages(void *pPma,
|
||||
NvU64 *pPages,
|
||||
NvLength pageCount,
|
||||
NvU32 pageSize,
|
||||
NvU64 pageSize,
|
||||
NvU32 flags)
|
||||
{
|
||||
nvidia_stack_t *sp = nvUvmGetSafeStack();
|
||||
@@ -529,7 +529,7 @@ EXPORT_SYMBOL(nvUvmInterfacePmaFreePages);
|
||||
|
||||
NV_STATUS nvUvmInterfaceMemoryCpuMap(uvmGpuAddressSpaceHandle vaSpace,
|
||||
UvmGpuPointer gpuPointer, NvLength length, void **cpuPtr,
|
||||
NvU32 pageSize)
|
||||
NvU64 pageSize)
|
||||
{
|
||||
nvidia_stack_t *sp = NULL;
|
||||
NV_STATUS status;
|
||||
@@ -557,7 +557,39 @@ void nvUvmInterfaceMemoryCpuUnMap(uvmGpuAddressSpaceHandle vaSpace,
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceMemoryCpuUnMap);
|
||||
|
||||
NV_STATUS nvUvmInterfaceChannelAllocate(uvmGpuAddressSpaceHandle vaSpace,
|
||||
NV_STATUS nvUvmInterfaceTsgAllocate(uvmGpuAddressSpaceHandle vaSpace,
|
||||
const UvmGpuTsgAllocParams *allocParams,
|
||||
uvmGpuTsgHandle *tsg)
|
||||
{
|
||||
nvidia_stack_t *sp = NULL;
|
||||
NV_STATUS status;
|
||||
|
||||
if (nv_kmem_cache_alloc_stack(&sp) != 0)
|
||||
{
|
||||
return NV_ERR_NO_MEMORY;
|
||||
}
|
||||
|
||||
status = rm_gpu_ops_tsg_allocate(sp,
|
||||
(gpuAddressSpaceHandle)vaSpace,
|
||||
allocParams,
|
||||
(gpuTsgHandle *)tsg);
|
||||
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceTsgAllocate);
|
||||
|
||||
void nvUvmInterfaceTsgDestroy(uvmGpuTsgHandle tsg)
|
||||
{
|
||||
nvidia_stack_t *sp = nvUvmGetSafeStack();
|
||||
rm_gpu_ops_tsg_destroy(sp, (gpuTsgHandle)tsg);
|
||||
nvUvmFreeSafeStack(sp);
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceTsgDestroy);
|
||||
|
||||
|
||||
NV_STATUS nvUvmInterfaceChannelAllocate(const uvmGpuTsgHandle tsg,
|
||||
const UvmGpuChannelAllocParams *allocParams,
|
||||
uvmGpuChannelHandle *channel,
|
||||
UvmGpuChannelInfo *channelInfo)
|
||||
@@ -571,7 +603,7 @@ NV_STATUS nvUvmInterfaceChannelAllocate(uvmGpuAddressSpaceHandle vaSpace,
|
||||
}
|
||||
|
||||
status = rm_gpu_ops_channel_allocate(sp,
|
||||
(gpuAddressSpaceHandle)vaSpace,
|
||||
(gpuTsgHandle)tsg,
|
||||
allocParams,
|
||||
(gpuChannelHandle *)channel,
|
||||
channelInfo);
|
||||
@@ -868,7 +900,8 @@ NV_STATUS nvUvmInterfaceInitFaultInfo(uvmGpuDeviceHandle device,
|
||||
EXPORT_SYMBOL(nvUvmInterfaceInitFaultInfo);
|
||||
|
||||
NV_STATUS nvUvmInterfaceInitAccessCntrInfo(uvmGpuDeviceHandle device,
|
||||
UvmGpuAccessCntrInfo *pAccessCntrInfo)
|
||||
UvmGpuAccessCntrInfo *pAccessCntrInfo,
|
||||
NvU32 accessCntrIndex)
|
||||
{
|
||||
nvidia_stack_t *sp = NULL;
|
||||
NV_STATUS status;
|
||||
@@ -880,7 +913,8 @@ NV_STATUS nvUvmInterfaceInitAccessCntrInfo(uvmGpuDeviceHandle device,
|
||||
|
||||
status = rm_gpu_ops_init_access_cntr_info(sp,
|
||||
(gpuDeviceHandle)device,
|
||||
pAccessCntrInfo);
|
||||
pAccessCntrInfo,
|
||||
accessCntrIndex);
|
||||
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
return status;
|
||||
@@ -1432,6 +1466,150 @@ NV_STATUS nvUvmInterfacePagingChannelPushStream(UvmGpuPagingChannelHandle channe
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfacePagingChannelPushStream);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslInitContext(UvmCslContext *uvmCslContext,
|
||||
uvmGpuChannelHandle channel)
|
||||
{
|
||||
nvidia_stack_t *sp = NULL;
|
||||
NV_STATUS status;
|
||||
|
||||
if (nv_kmem_cache_alloc_stack(&sp) != 0)
|
||||
{
|
||||
return NV_ERR_NO_MEMORY;
|
||||
}
|
||||
|
||||
status = rm_gpu_ops_ccsl_context_init(sp, &uvmCslContext->ctx, (gpuChannelHandle)channel);
|
||||
|
||||
// Saving the stack in the context allows UVM to safely use the CSL layer
|
||||
// in interrupt context without making new allocations. UVM serializes CSL
|
||||
// API usage for a given context so the stack pointer does not need
|
||||
// additional protection.
|
||||
if (status != NV_OK)
|
||||
{
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
}
|
||||
else
|
||||
{
|
||||
uvmCslContext->nvidia_stack = sp;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslInitContext);
|
||||
|
||||
void nvUvmInterfaceDeinitCslContext(UvmCslContext *uvmCslContext)
|
||||
{
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
rm_gpu_ops_ccsl_context_clear(sp, uvmCslContext->ctx);
|
||||
nvUvmFreeSafeStack(sp);
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceDeinitCslContext);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslLogDeviceEncryption(UvmCslContext *uvmCslContext,
|
||||
UvmCslIv *decryptIv)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
|
||||
status = rm_gpu_ops_ccsl_log_device_encryption(sp, uvmCslContext->ctx, (NvU8 *)decryptIv);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslLogDeviceEncryption);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext,
|
||||
UvmCslDirection direction)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
|
||||
status = rm_gpu_ops_ccsl_rotate_iv(sp, uvmCslContext->ctx, direction);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslRotateIv);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslAcquireEncryptionIv(UvmCslContext *uvmCslContext,
|
||||
UvmCslIv *encryptIv)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
|
||||
BUILD_BUG_ON(NV_OFFSETOF(UvmCslIv, fresh) != sizeof(encryptIv->iv));
|
||||
|
||||
status = rm_gpu_ops_ccsl_acquire_encryption_iv(sp, uvmCslContext->ctx, (NvU8*)encryptIv);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslAcquireEncryptionIv);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslEncrypt(UvmCslContext *uvmCslContext,
|
||||
NvU32 bufferSize,
|
||||
NvU8 const *inputBuffer,
|
||||
UvmCslIv *encryptIv,
|
||||
NvU8 *outputBuffer,
|
||||
NvU8 *authTagBuffer)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
|
||||
if (encryptIv != NULL)
|
||||
status = rm_gpu_ops_ccsl_encrypt_with_iv(sp, uvmCslContext->ctx, bufferSize, inputBuffer, (NvU8*)encryptIv, outputBuffer, authTagBuffer);
|
||||
else
|
||||
status = rm_gpu_ops_ccsl_encrypt(sp, uvmCslContext->ctx, bufferSize, inputBuffer, outputBuffer, authTagBuffer);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslEncrypt);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
|
||||
NvU32 bufferSize,
|
||||
NvU8 const *inputBuffer,
|
||||
UvmCslIv const *decryptIv,
|
||||
NvU8 *outputBuffer,
|
||||
NvU8 const *authTagBuffer)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
|
||||
status = rm_gpu_ops_ccsl_decrypt(sp,
|
||||
uvmCslContext->ctx,
|
||||
bufferSize,
|
||||
inputBuffer,
|
||||
(NvU8 *)decryptIv,
|
||||
outputBuffer,
|
||||
authTagBuffer);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslDecrypt);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslSign(UvmCslContext *uvmCslContext,
|
||||
NvU32 bufferSize,
|
||||
NvU8 const *inputBuffer,
|
||||
NvU8 *authTagBuffer)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
|
||||
status = rm_gpu_ops_ccsl_sign(sp, uvmCslContext->ctx, bufferSize, inputBuffer, authTagBuffer);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslSign);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslQueryMessagePool(UvmCslContext *uvmCslContext,
|
||||
UvmCslDirection direction,
|
||||
NvU64 *messageNum)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
|
||||
status = rm_gpu_ops_ccsl_query_message_pool(sp, uvmCslContext->ctx, direction, messageNum);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslQueryMessagePool);
|
||||
|
||||
#else // NV_UVM_ENABLE
|
||||
|
||||
NV_STATUS nv_uvm_suspend(void)
|
||||
|
||||
Reference in New Issue
Block a user