535.43.02

This commit is contained in:
Andy Ritger
2023-05-30 10:11:36 -07:00
parent 6dd092ddb7
commit eb5c7665a1
1403 changed files with 295367 additions and 86235 deletions

View File

@@ -99,20 +99,20 @@ typedef struct
#define RMCTRL_CLEAR_CAP(tbl,cap,field) ((tbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) &= ~(0?cap##field))
// macros to AND/OR caps between two tables
#define RMCTRL_AND_CAP(finaltbl,tmptbl,tmp,cap,field) \
#define RMCTRL_AND_CAP(finaltbl,tmptbl,tmp,cap,field) \
tmp = ((finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] & tmptbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) & (0?cap##field)); \
finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] &= ~(0?cap##field); \
finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] &= ~(0?cap##field); \
finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] |= tmp;
#define RMCTRL_OR_CAP(finaltbl,tmptbl,tmp,cap,field) \
#define RMCTRL_OR_CAP(finaltbl,tmptbl,tmp,cap,field) \
tmp = ((finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] | tmptbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) & (0?cap##field)); \
finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] &= ~(0?cap##field); \
finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] &= ~(0?cap##field); \
finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] |= tmp;
// Whether the command ID is a NULL command?
// We allow NVXXXX_CTRL_CMD_NULL (0x00000000) as well as the
// per-class NULL cmd ( _CATEGORY==0x00 and _INDEX==0x00 )
#define RMCTRL_IS_NULL_CMD(cmd) ((cmd == NVXXXX_CTRL_CMD_NULL) || \
#define RMCTRL_IS_NULL_CMD(cmd) ((cmd == NVXXXX_CTRL_CMD_NULL) || \
(FLD_TEST_DRF_NUM(XXXX, _CTRL_CMD, _CATEGORY, 0x00, cmd) && \
FLD_TEST_DRF_NUM(XXXX, _CTRL_CMD, _INDEX, 0x00, cmd)))
@@ -141,11 +141,8 @@ NV_STATUS embeddedParamCopyOut(RMAPI_PARAM_COPY *pParamCopy, RmCtrlParams *pRmC
#define _RMCTRL_PREP_ACCESS_ARG(x) | NVBIT(NV_CONCATENATE(RS_ACCESS_, x))
#define ACCESS_RIGHTS(...) (0 NV_FOREACH_ARG_NOCOMMA(_RMCTRL_PREP_ACCESS_ARG, __VA_ARGS__))
#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(ctrlFlags) \
( \
(ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL) && \
!RMCFG_FEATURE_PHYSICAL_RM \
)
#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(ctrlFlags) \
(ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL)
//
// 'FLAGS' Attribute

View File

@@ -163,7 +163,7 @@ NV_STATUS intermapDelDmaMapping (RsClient *, VirtualMemory *
void intermapFreeDmaMapping (PCLI_DMA_MAPPING_INFO);
CLI_DMA_MAPPING_INFO *intermapGetDmaMapping (VirtualMemory *pVirtualMemory, NvU64 dmaOffset, NvU32 gpuMask);
NvBool CliGetDmaMappingInfo (NvHandle, NvHandle, NvHandle, NvU64, NvU32, PCLI_DMA_MAPPING_INFO*);
NvBool CliGetDmaMappingInfo (RsClient *, NvHandle, NvHandle, NvU64, NvU32, PCLI_DMA_MAPPING_INFO*);
void CliGetDmaMappingIterator (PCLI_DMA_MAPPING_INFO *, PCLI_DMA_MAPPING_INFO_ITERATOR, PNODE pDmaMappingList);
void CliGetDmaMappingNext (PCLI_DMA_MAPPING_INFO *, PCLI_DMA_MAPPING_INFO_ITERATOR);

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -39,6 +39,7 @@
typedef struct gpuSession *gpuSessionHandle;
typedef struct gpuDevice *gpuDeviceHandle;
typedef struct gpuAddressSpace *gpuAddressSpaceHandle;
typedef struct gpuTsg *gpuTsgHandle;
typedef struct gpuChannel *gpuChannelHandle;
typedef struct gpuObject *gpuObjectHandle;
@@ -97,7 +98,11 @@ NV_STATUS nvGpuOpsPmaUnpinPages(void *pPma,
NvLength pageCount,
NvU64 pageSize);
NV_STATUS nvGpuOpsChannelAllocate(gpuAddressSpaceHandle vaSpace,
NV_STATUS nvGpuOpsTsgAllocate(gpuAddressSpaceHandle vaSpace,
const gpuTsgAllocParams *params,
gpuTsgHandle *tsgHandle);
NV_STATUS nvGpuOpsChannelAllocate(const gpuTsgHandle tsgHandle,
const gpuChannelAllocParams *params,
gpuChannelHandle *channelHandle,
gpuChannelInfo *channelInfo);
@@ -105,6 +110,8 @@ NV_STATUS nvGpuOpsChannelAllocate(gpuAddressSpaceHandle vaSpace,
NV_STATUS nvGpuOpsMemoryReopen(struct gpuAddressSpace *vaSpace,
NvHandle hSrcClient, NvHandle hSrcAllocation, NvLength length, NvU64 *gpuOffset);
void nvGpuOpsTsgDestroy(struct gpuTsg *tsg);
void nvGpuOpsChannelDestroy(struct gpuChannel *channel);
void nvGpuOpsMemoryFree(gpuAddressSpaceHandle vaSpace,
@@ -196,7 +203,7 @@ NV_STATUS nvGpuOpsGetPmaObject(struct gpuDevice *device,
void **pPma,
const UvmPmaStatistics **pPmaPubStats);
NV_STATUS nvGpuOpsInitAccessCntrInfo(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo);
NV_STATUS nvGpuOpsInitAccessCntrInfo(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo, NvU32 accessCntrIndex);
NV_STATUS nvGpuOpsDestroyAccessCntrInfo(struct gpuDevice *device,
gpuAccessCntrInfo *pAccessCntrInfo);
@@ -272,4 +279,40 @@ NV_STATUS nvGpuOpsPagingChannelPushStream(UvmGpuPagingChannel *channel,
NV_STATUS nvGpuOpsFlushReplayableFaultBuffer(struct gpuDevice *device);
// Interface used for CCSL
NV_STATUS nvGpuOpsCcslContextInit(struct ccslContext_t **ctx,
gpuChannelHandle channel);
NV_STATUS nvGpuOpsCcslContextClear(struct ccslContext_t *ctx);
NV_STATUS nvGpuOpsCcslLogDeviceEncryption(struct ccslContext_t *ctx,
NvU8 *decryptIv);
NV_STATUS nvGpuOpsCcslAcquireEncryptionIv(struct ccslContext_t *ctx,
NvU8 *encryptIv);
NV_STATUS nvGpuOpsCcslRotateIv(struct ccslContext_t *ctx,
NvU8 direction);
NV_STATUS nvGpuOpsCcslEncrypt(struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *outputBuffer,
NvU8 *authTagBuffer);
NV_STATUS nvGpuOpsCcslEncryptWithIv(struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *encryptIv,
NvU8 *outputBuffer,
NvU8 *authTagBuffer);
NV_STATUS nvGpuOpsCcslDecrypt(struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 const *decryptIv,
NvU8 *outputBuffer,
NvU8 const *authTagBuffer);
NV_STATUS nvGpuOpsCcslSign(struct ccslContext_t *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *authTagBuffer);
NV_STATUS nvGpuOpsQueryMessagePool(struct ccslContext_t *ctx,
NvU8 direction,
NvU64 *messageNum);
#endif /* _NV_GPU_OPS_H_*/

View File

@@ -77,12 +77,12 @@ typedef struct API_STATE RMAPI_PARAM_COPY;
// this initialization, there is no need to make it return a status and
// duplicate error checking.
//
#define RMAPI_PARAM_COPY_INIT(paramCopy, pKernelParams, theUserParams, numElems, sizeOfElem) \
do { \
RMAPI_PARAM_COPY_SET_MSG_TAG((paramCopy), __FUNCTION__); \
(paramCopy).ppKernelParams = (void **) &(pKernelParams); \
(paramCopy).pUserParams = (theUserParams); \
(paramCopy).flags = RMAPI_PARAM_COPY_FLAGS_NONE; \
#define RMAPI_PARAM_COPY_INIT(paramCopy, pKernelParams, theUserParams, numElems, sizeOfElem) \
do { \
RMAPI_PARAM_COPY_SET_MSG_TAG((paramCopy), __FUNCTION__); \
(paramCopy).ppKernelParams = (void **) &(pKernelParams); \
(paramCopy).pUserParams = (theUserParams); \
(paramCopy).flags = RMAPI_PARAM_COPY_FLAGS_NONE; \
(paramCopy).bSizeValid = portSafeMulU32((numElems), (sizeOfElem), &(paramCopy).paramsSize); \
} while(0)

View File

@@ -114,10 +114,16 @@ RM_API *rmapiGetInterface(RMAPI_TYPE rmapiType);
// Flags for RM_API::Alloc
#define RMAPI_ALLOC_FLAGS_NONE 0
#define RMAPI_ALLOC_FLAGS_SKIP_RPC NVBIT(0)
#define RMAPI_ALLOC_FLAGS_SERIALIZED NVBIT(1)
// Flags for RM_API::Free
#define RMAPI_FREE_FLAGS_NONE 0
// Flags for RM_API RPC's
#define RMAPI_RPC_FLAGS_NONE 0
#define RMAPI_RPC_FLAGS_COPYOUT_ON_ERROR NVBIT(0)
#define RMAPI_RPC_FLAGS_SERIALIZED NVBIT(1)
/**
* Interface for performing operations through the RM API exposed to client
* drivers. Interface provides consistent view to the RM API while abstracting
@@ -129,16 +135,16 @@ struct _RM_API
{
// Allocate a resource with default security attributes and local pointers (no NvP64)
NV_STATUS (*Alloc)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent,
NvHandle *phObject, NvU32 hClass, void *pAllocParams);
NvHandle *phObject, NvU32 hClass, void *pAllocParams, NvU32 paramsSize);
// Allocate a resource with default security attributes and local pointers (no NvP64)
// and client assigned handle
NV_STATUS (*AllocWithHandle)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent,
NvHandle hObject, NvU32 hClass, void *pAllocParams);
NvHandle hObject, NvU32 hClass, void *pAllocParams, NvU32 paramsSize);
// Allocate a resource
NV_STATUS (*AllocWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent,
NvHandle *phObject, NvU32 hClass, NvP64 pAllocParams,
NvHandle *phObject, NvU32 hClass, NvP64 pAllocParams, NvU32 paramsSize,
NvU32 flags, NvP64 pRightsRequested, API_SECURITY_INFO *pSecInfo);
// Free a resource with default security attributes
@@ -303,7 +309,7 @@ rmapiEpilogue
RM_API_CONTEXT *pContext
);
void
void
rmapiInitLockInfo
(
RM_API *pRmApi,
@@ -315,7 +321,7 @@ rmapiInitLockInfo
// RM locking modules: 24-bit group bitmask, 8-bit subgroup id
//
// Lock acquires are tagged with a RM_LOCK_MODULE_* in order to partition
// the acquires into groups, which allows read-only locks to be
// the acquires into groups, which allows read-only locks to be
// enabled / disabled on a per-group basis (via apiLockMask and gpuLockMask
// in OBJSYS.)
//

View File

@@ -46,11 +46,11 @@
MAKE_LIST(ClientHandlesList, NvHandle);
#define serverutilGetDerived(pRmClient, hResource, ppBaseRes, type) \
(clientGetResource(staticCast((pRmClient), RsClient), \
(hResource), \
classId(type), \
(ppBaseRes)) != NV_OK) \
? NULL \
(clientGetResource(staticCast((pRmClient), RsClient), \
(hResource), \
classId(type), \
(ppBaseRes)) != NV_OK) \
? NULL \
: dynamicCast(*(ppBaseRes), type)
/**
@@ -119,9 +119,8 @@ NV_STATUS serverutilGenResourceHandle(NvHandle, NvHandle*);
* Get a client pointer from a client handle without taking any locks.
*
* @param[in] hClient The client to acquire
* @param[out] ppClient Pointer to the RmClient
*/
NV_STATUS serverutilGetClientUnderLock(NvHandle hClient, RmClient **ppClient);
RmClient *serverutilGetClientUnderLock(NvHandle hClient);
/**
* Get a client pointer from a client handle and lock it.