515.43.04

This commit is contained in:
Andy Ritger
2022-05-09 13:18:59 -07:00
commit 1739a20efc
2519 changed files with 1060036 additions and 0 deletions

View File

@@ -0,0 +1,3 @@
#include "g_dbgbuffer_nvoc.h"

View File

@@ -0,0 +1,120 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2004-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
// #ifndef NVOC
// #include "g_objrpc_nvoc.h"
// #endif
#ifndef _OBJRPC_H_
#define _OBJRPC_H_
#include "vgpu/rpc_headers.h"
#include "diagnostics/nv_debug_dump.h"
#include "ctrl/ctrl2080/ctrl2080event.h" // rmcontrol params (from hal)
#include "ctrl/ctrl2080/ctrl2080gpu.h" // rmcontrol params (from hal)
#include "ctrl/ctrl2080/ctrl2080rc.h" // rmcontrol params (from hal)
#include "ctrl/ctrl2080/ctrl2080perf.h" // rmcontrol params (from hal)
#include "ctrl/ctrl0080/ctrl0080fb.h" // rmcontrol params (from hal)
#include "ctrl/ctrl0080/ctrl0080dma.h" // rmcontrol params (from hal)
#include "gpu/gsp/message_queue.h"
typedef struct GSP_FIRMWARE GSP_FIRMWARE;
typedef struct _object_vgpu OBJVGPU, *POBJVGPU;
#include "g_rpc_hal.h" // For RPC_HAL_IFACES
#include "g_rpc_odb.h" // For RPC_HAL_IFACES
struct OBJRPC{
OBJECT_BASE_DEFINITION(RPC);
struct {
NvU32 ipVersion;
}__nvoc_pbase_Object[1]; // This nested structure mechanism is to bypass NVOC
// Message buffer fields
NvU32 *message_buffer;
NvU32 *message_buffer_priv;
MEMORY_DESCRIPTOR *pMemDesc_mesg;
NvU32 maxRpcSize;
// UVM Message buffer fields
NvU32 *message_buffer_uvm;
NvU32 *message_buffer_priv_uvm;
MEMORY_DESCRIPTOR *pMemDesc_mesg_uvm;
// Buffer for initial GSP message.
void *init_msg_buf;
RmPhysAddr init_msg_buf_pa;
/* Message Queue */
struct _message_queue_info *pMessageQueueInfo;
RmPhysAddr messageQueuePhysMem;
};
//
// Utility macros for composing RPC messages.
// See <vgpu/dev_vgpu.h> for message formats.
// A message has a fixed-format header and optionally a variable length
// parameter after the header.
//
#define vgpu_rpc_message_header_v ((rpc_message_header_v*)(pRpc->message_buffer))
#define rpc_message (vgpu_rpc_message_header_v->rpc_message_data)
static inline void _objrpcAssignIpVersion(struct OBJRPC* pRpc, NvU32 ipVersion)
{
pRpc->__nvoc_pbase_Object->ipVersion = ipVersion;
}
// Initialize and free RPC infrastructure
NV_STATUS initRpcInfrastructure_VGPU(OBJGPU *pGpu);
NV_STATUS freeRpcInfrastructure_VGPU(OBJGPU *pGpu);
OBJRPC *initRpcObject(OBJGPU *pGpu);
void rpcSetIpVersion(OBJGPU *pGpu, OBJRPC *pRpc, NvU32 ipVersion);
void rpcObjIfacesSetup(OBJRPC *pRpc);
void rpcRmApiSetup(OBJGPU *pGpu);
NV_STATUS rpcWriteCommonHeader(OBJGPU *pGpu, OBJRPC *pRpc, NvU32 func, NvU32 paramLength);
NV_STATUS rpcWriteCommonHeaderSim(OBJGPU *pGpu);
NV_STATUS _allocRpcMemDesc(OBJGPU *pGpu, NvU64 size, NvBool bContig, NV_ADDRESS_SPACE addrSpace, MEMORY_DESCRIPTOR **ppMemDesc, void **ppMemBuffer, void **ppMemBufferPriv);
void _freeRpcMemDesc(OBJGPU *pGpu, MEMORY_DESCRIPTOR **ppMemDesc, void **ppMemBuffer, void **ppMemBufferPriv);
NV_STATUS vgpuGspSetupBuffers(OBJGPU *pGpu);
void vgpuGspTeardownBuffers(OBJGPU *pGpu);
//
// OBJGPU RPC member accessors.
// Historically, they have been defined inline by the following macros.
// These definitions were migrated to gpu.c in order to avoid having to include object headers in
// this file.
//
OBJRPC *gpuGetGspClientRpc(OBJGPU*);
OBJRPC *gpuGetVgpuRpc(OBJGPU*);
OBJRPC *gpuGetRpc(OBJGPU*);
#define GPU_GET_GSPCLIENT_RPC(u) gpuGetGspClientRpc(u)
#define GPU_GET_VGPU_RPC(u) gpuGetVgpuRpc(u)
#define GPU_GET_RPC(u) gpuGetRpc(u)
#endif // _OBJRPC_H_

View File

@@ -0,0 +1,3 @@
#include "g_objsweng_nvoc.h"

View File

@@ -0,0 +1,3 @@
#include "g_objtmr_nvoc.h"

View File

@@ -0,0 +1,3 @@
#include "g_dispsw_nvoc.h"

View File

@@ -0,0 +1,3 @@
#include "g_syncgpuboost_nvoc.h"

View File

@@ -0,0 +1,3 @@
#include "g_tmr_nvoc.h"

View File

@@ -0,0 +1,740 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
//******************************************************************************
//
// Declarations for the RPC module.
//
// Description:
// This module declares the RPC interface functions/macros.
//
//******************************************************************************
#ifndef __vgpu_dev_nv_rpc_h__
#define __vgpu_dev_nv_rpc_h__
#include "class/cl84a0.h"
#include "rpc_headers.h"
#include "gpu/dce_client/dce_client.h"
#include "objrpc.h"
#include "rpc_vgpu.h"
#include "vgpu_events.h"
#include "kernel/gpu/fifo/kernel_fifo.h"
typedef struct ContextDma ContextDma;
#define NV_RM_STUB_RPC 0
#if NV_RM_STUB_RPC
static inline void NV_RM_RPC_ALLOC_SHARE_DEVICE(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_ALLOC_MEMORY(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_ALLOC_CHANNEL(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_ALLOC_OBJECT(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_MAP_MEMORY_DMA(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_UNMAP_MEMORY_DMA(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_ALLOC_SUBDEVICE(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_DUP_OBJECT(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_IDLE_CHANNELS(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_ALLOC_EVENT(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_CONTROL(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_MANAGE_HW_RESOURCE_ALLOC(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_MANAGE_HW_RESOURCE_FREE(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_MANAGE_HW_RESOURCE_BIND(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_SET_GUEST_SYSTEM_INFO(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_PERF_GET_PSTATE_INFO(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_PERF_GET_VIRTUAL_PSTATE_INFO(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_PERF_GET_LEVEL_INFO(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_UNLOADING_GUEST_DRIVER(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_GPU_EXEC_REG_OPS(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_GET_STATIC_INFO(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_REGISTER_VIRTUAL_EVENT_BUFFER(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_UPDATE_BAR_PDE(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_SET_PAGE_DIRECTORY(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_UNSET_PAGE_DIRECTORY(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_GET_GSP_STATIC_INFO(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_GSP_SET_SYSTEM_INFO(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_SET_REGISTRY(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_SUBDEV_EVENT_SET_NOTIFICATION(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_DUMP_PROTOBUF_COMPONENT(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_GSP_MSG_TIMING(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_VGPU_PF_REG_READ32(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION(OBJGPU *pGpu, ...) { return; }
// RPC free stubs
static inline void NV_RM_RPC_SIM_FREE_INFRA(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_FREE(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_FREE_ON_ERROR(OBJGPU *pGpu, ...) { return; }
// Simulation stubs
static inline void NV_RM_RPC_SIM_LOAD_ESCAPE_FUNCTIONS(OBJOS *pOS, ...) { return; }
static inline void NV_RM_RPC_SIM_ADD_DISP_CONTEXT_DMA(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_SIM_UPDATE_DISP_CONTEXT_DMA(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_SIM_DELETE_DISP_CONTEXT_DMA(OBJGPU *pGpu, ...) { return; }
static inline void NV_RM_RPC_SIM_UPDATE_DISP_CHANNEL_INFO(OBJGPU *pGpu, ...) { return; }
#else // NV_RM_STUB_RPC
#define NV_RM_RPC_ALLOC_SHARE_DEVICE_FWCLIENT(pGpu, hclient, hdevice, hclientshare, htargetclient, htargetdevice, hclass, \
allocflags, vasize, vamode, status) \
do \
{ \
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \
NV0000_ALLOC_PARAMETERS root_alloc_params = {0}; \
\
root_alloc_params.hClient = hclient; \
\
if (!IsT234D(pGpu)) \
{ \
RmClient *pClient = NULL; \
\
/* Get process ID from the client database */ \
if (NV_OK == serverutilGetClientUnderLock(hclient, &pClient)) \
{ \
CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); \
NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); \
\
if (pCallContext->secInfo.privLevel >= RS_PRIV_LEVEL_KERNEL) \
{ \
root_alloc_params.processID = KERNEL_PID; \
} \
else \
{ \
root_alloc_params.processID = pClient->ProcID; \
NV_ASSERT(root_alloc_params.processID == osGetCurrentProcess()); \
} \
} \
else \
NV_ASSERT(0); \
} \
\
status = pRmApi->AllocWithHandle(pRmApi, hclient, NV01_NULL_OBJECT, \
NV01_NULL_OBJECT, NV01_ROOT, \
&root_alloc_params); \
\
if (status == NV_OK) \
{ \
NV0080_ALLOC_PARAMETERS device_alloc_params = {0}; \
\
device_alloc_params.hClientShare = hclientshare; \
device_alloc_params.hTargetClient = htargetclient; \
device_alloc_params.hTargetDevice = htargetdevice; \
device_alloc_params.flags = allocflags; \
device_alloc_params.vaSpaceSize = vasize; \
\
status = pRmApi->AllocWithHandle(pRmApi, hclient, hclient, hdevice, \
hclass, &device_alloc_params); \
} \
else \
NV_ASSERT(0); \
} \
while (0)
#define NV_RM_RPC_ALLOC_MEMORY(pGpu, hclient, hdevice, hmemory, hclass, \
flags, pmemdesc, status) \
do \
{ \
OBJRPC *pRpc; \
pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL) \
&& (!(IS_VIRTUAL_WITH_SRIOV(pGpu) && \
!gpuIsWarBug200577889SriovHeavyEnabled(pGpu) && \
!NV_IS_MODS))) { \
if (IS_GSP_CLIENT(pGpu) && IsT234D(pGpu)) \
{ \
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \
NV_MEMORY_LIST_ALLOCATION_PARAMS listAllocParams = {0}; \
listAllocParams.pteAdjust = pmemdesc->PteAdjust; \
listAllocParams.format = memdescGetPteKind(pmemdesc); \
listAllocParams.size = pmemdesc->Size; \
listAllocParams.pageCount = pmemdesc->PageCount; \
listAllocParams.pageNumberList = memdescGetPteArray(pmemdesc, AT_GPU); \
listAllocParams.hClient = NV01_NULL_OBJECT; \
listAllocParams.hParent = NV01_NULL_OBJECT; \
listAllocParams.hObject = NV01_NULL_OBJECT; \
listAllocParams.limit = pmemdesc->Size - 1; \
listAllocParams.flagsOs02 = (DRF_DEF(OS02,_FLAGS,_MAPPING,_NO_MAP) | \
DRF_DEF(OS02,_FLAGS,_PHYSICALITY,_NONCONTIGUOUS) | \
(flags & DRF_SHIFTMASK(NVOS02_FLAGS_COHERENCY))); \
status = pRmApi->AllocWithHandle(pRmApi, hclient, hdevice, \
hmemory, NV01_MEMORY_LIST_SYSTEM, &listAllocParams); \
} \
else \
{ \
status = rpcAllocMemory_HAL(pGpu, pRpc, hclient, hdevice, hmemory, \
hclass, flags, pmemdesc); \
} \
} else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} \
while (0)
#define NV_RM_RPC_MAP_MEMORY_DMA(pGpu, hclient, hdevice, hdma, hmemory, offset, length, flags, \
dmaoffset, status) \
do \
{ \
OBJRPC *pRpc; \
pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL) && \
!gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) \
status = rpcMapMemoryDma_HAL(pGpu, pRpc, hclient, hdevice, hdma, hmemory, offset, \
length, flags, dmaoffset); \
else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while (0)
#define NV_RM_RPC_UNMAP_MEMORY_DMA(pGpu, hclient, hdevice, hdma, hmemory, flags, dmaoffset, \
status) \
do \
{ \
OBJRPC *pRpc; \
pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL) && \
!gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) \
status = rpcUnmapMemoryDma_HAL(pGpu, pRpc, hclient, hdevice, hdma, hmemory, \
flags, dmaoffset); \
else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while (0)
#define NV_RM_RPC_IDLE_CHANNELS(pGpu, phclients, phdevices, phchannels, \
nentries, flags, timeout, status) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
status = rpcIdleChannels_HAL(pGpu, pRpc, phclients, phdevices, \
phchannels, nentries, flags, timeout); \
else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while(0)
#define NV_RM_RPC_ALLOC_SHARE_DEVICE(pGpu, hclient, hdevice, hclientshare, htargetclient, htargetdevice, hclass, \
allocflags, vasize, vamode, status) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
{ \
NV_ASSERT(IS_GSP_CLIENT(pGpu)); \
NV_RM_RPC_ALLOC_SHARE_DEVICE_FWCLIENT(pGpu, hclient, hdevice, hclientshare, htargetclient, htargetdevice, hclass, \
allocflags, vasize, vamode, status); \
} else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} \
while (0)
#define NV_RM_RPC_CONTROL(pGpu, hClient, hObject, cmd, pParams, paramSize, status) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
{ \
NV_ASSERT(IS_GSP_CLIENT(pGpu)); \
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \
status = pRmApi->Control(pRmApi, hClient, hObject, cmd, \
pParams, paramSize); \
} else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while (0)
#define NV_RM_RPC_ALLOC_CHANNEL(pGpu, hclient, hparent, hchannel, hclass, \
pGpfifoAllocParams, pchid, status) \
do \
{ \
OBJRPC *pRpc; \
pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
{ \
NV_ASSERT(IS_GSP_CLIENT(pGpu)); \
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \
status = pRmApi->AllocWithHandle(pRmApi, hclient, hparent, hchannel, \
hclass, pGpfifoAllocParams); \
} else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} \
while (0)
#define NV_RM_RPC_ALLOC_OBJECT(pGpu, hclient, hchannel, hobject, hclass, params, status)\
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
{ \
NV_ASSERT(IS_GSP_CLIENT(pGpu)); \
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \
status = pRmApi->AllocWithHandle(pRmApi, hclient, hchannel, hobject, \
hclass, params); \
} \
} while (0)
#define NV_RM_RPC_FREE(pGpu, hclient, hparent, hobject, status) \
do \
{ \
(void) hparent; \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
{ \
NV_ASSERT(IS_GSP_CLIENT(pGpu)); \
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \
status = pRmApi->Free(pRmApi, hclient, hobject); \
} else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while (0)
#define NV_RM_RPC_FREE_ON_ERROR(pGpu, hclient, hparent, hobject) \
do \
{ \
(void) hparent; \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
/* used in failure cases, macro doesn't overwrite rmStatus */ \
if (pRpc != NULL) \
{ \
NV_ASSERT(IS_GSP_CLIENT(pGpu)); \
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \
pRmApi->Free(pRmApi, hclient, hobject); \
} \
} \
while (0)
#define NV_RM_RPC_ALLOC_EVENT(pGpu, hclient, hparentclient, hchannel, hobject, \
hevent, hclass, idx, status) \
do \
{ \
(void) hchannel; \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
{ \
NV_ASSERT(IS_GSP_CLIENT(pGpu)); \
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \
NV0005_ALLOC_PARAMETERS allocParams = {0}; \
allocParams.hParentClient = hparentclient; \
allocParams.hClass = hclass; \
allocParams.notifyIndex = idx | NV01_EVENT_CLIENT_RM; \
allocParams.data = 0; \
status = pRmApi->AllocWithHandle(pRmApi, hclient, \
hobject, hevent, \
hclass, &allocParams); \
} else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while(0)
#define NV_RM_RPC_ALLOC_SUBDEVICE(pGpu, hclient, hdevice, hsubdevice, \
hclass, subDeviceInst, status) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
{ \
NV_ASSERT(IS_GSP_CLIENT(pGpu)); \
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \
NV2080_ALLOC_PARAMETERS alloc_params = {0}; \
\
alloc_params.subDeviceId = subDeviceInst; \
\
status = pRmApi->AllocWithHandle(pRmApi, hclient, hdevice, hsubdevice, \
hclass, &alloc_params); \
} else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while (0)
#define NV_RM_RPC_DUP_OBJECT(pGpu, hclient, hparent, hobject, hclient_src, \
hobject_src, flags, bAutoFreeRpc, pDstRef, status) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
{ \
NV_ASSERT(IS_GSP_CLIENT(pGpu)); \
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \
status = pRmApi->DupObject(pRmApi, hclient, hparent, \
&hobject, hclient_src, \
hobject_src, flags); \
if ((bAutoFreeRpc) && (pDstRef != NULL) && (status == NV_OK)) \
{ \
RmResource *pRmResource; \
pRmResource = dynamicCast(((RsResourceRef*)pDstRef)->pResource, RmResource); \
pRmResource->bRpcFree = NV_TRUE; \
} \
} else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while (0)
#define NV_RM_RPC_VGPU_PF_REG_READ32(pGpu, address, value, status) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
{ \
NV_ASSERT(IS_GSP_CLIENT(pGpu)); \
status = rpcVgpuPfRegRead32_HAL(pGpu, pRpc, address, value, 0); \
} else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while (0)
/*
* manage HW resources RPC macro
*/
#define NV_RM_RPC_MANAGE_HW_RESOURCE_ALLOC(pGpu, hclient, hdevice, hresource, \
pfballocinfo, status) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
status = RmRpcHwResourceAlloc(pGpu, pRpc, hclient, hdevice, \
hresource, pfballocinfo); \
else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while(0)
#define NV_RM_RPC_MANAGE_HW_RESOURCE_FREE(pGpu, hclient, hdevice, hresource, \
flags, status) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
status = RmRpcHwResourceFree(pGpu, pRpc, hclient, hdevice, \
hresource, flags); \
if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while(0)
#define NV_RM_RPC_MANAGE_HW_RESOURCE_BIND(pGpu, hclient, hdevice, hresource, \
virtaddr, physaddr, status) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
status = RmRpcHwResourceBind(pGpu, pRpc, hclient, hdevice, \
hresource, virtaddr, physaddr); \
if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while(0)
#define NV_RM_RPC_SIM_LOAD_ESCAPE_FUNCTIONS(pos) \
do \
{ \
NV_ASSERT(pos); \
/* load simulation escape read/write routines */ \
pos->osSimEscapeRead = RmRpcSimEscapeRead; \
pos->osSimEscapeWrite = RmRpcSimEscapeWrite; \
} \
while(0)
/* outgoing updates to the plugin */
#define NV_RM_RPC_SIM_ADD_DISP_CONTEXT_DMA(pGpu, hclient, pcontextdma, channelnum) \
do \
{ \
NV_STATUS status; \
SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) \
status = RmRpcSimAddDisplayContextDma(pGpu, hclient, pcontextdma, channelnum); \
NV_ASSERT(status == NV_OK); \
SLI_LOOP_END \
} \
while(0)
#define NV_RM_RPC_SIM_UPDATE_DISP_CONTEXT_DMA(pGpu, hclient, pcontextdma, physaddrnew, \
physlimitnew, pagesize, ptekind) \
do \
{ \
NV_STATUS status; \
SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) \
status = RmRpcSimUpdateDisplayContextDma(pGpu, hclient, pcontextdma, physaddrnew,\
physlimitnew, pagesize, ptekind); \
NV_ASSERT(status == NV_OK); \
SLI_LOOP_END \
} \
while(0)
#define NV_RM_RPC_SIM_DELETE_DISP_CONTEXT_DMA(pGpu, hclient, pcontextdma) \
do \
{ \
NV_STATUS status; \
SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) \
status = RmRpcSimDeleteDisplayContextDma(pGpu, hclient, pcontextdma); \
NV_ASSERT(status == NV_OK); \
SLI_LOOP_END \
} \
while(0)
#define NV_RM_RPC_SIM_UPDATE_DISP_CHANNEL_INFO(pGpu, hclient, pcontextdma, channelnum) \
do \
{ \
NV_STATUS status; \
SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) \
status = RmRpcSimUpdateDispChannelInfo(pGpu, hclient, pcontextdma, channelnum); \
NV_ASSERT(status == NV_OK); \
SLI_LOOP_END \
} \
while(0)
/*
* free RPC infrastructure for simulation (not VGPU object)
*/
#define NV_RM_RPC_SIM_FREE_INFRA(pGpu, status) \
do \
{ \
NV_ASSERT(status == NV_OK); \
status = RmRpcSimFreeInfra(pGpu); \
} \
while (0)
#define NV_RM_RPC_SET_GUEST_SYSTEM_INFO(pGpu, status) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
status = RmRpcSetGuestSystemInfo(pGpu, pRpc); \
else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while(0)
#define NV_RM_RPC_PERF_GET_VIRTUAL_PSTATE_INFO(pGpu, hClient, hObject, pParams, \
pClkInfos, status) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
status = RmRpcPerfGetVirtualPstateInfo(pGpu, pRpc, hClient, hObject,\
pParams, pClkInfos); \
else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while(0)
#define NV_RM_RPC_UNLOADING_GUEST_DRIVER(pGpu, status, bSuspend, bGc6Entering, newPMLevel) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
status = rpcUnloadingGuestDriver_HAL(pGpu, pRpc, bSuspend, bGc6Entering, newPMLevel); \
else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} \
while (0)
#define NV_RM_RPC_GPU_EXEC_REG_OPS(pGpu, hClient, hObject, pParams, pRegOps, status) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
status = rpcGpuExecRegOps_HAL(pGpu, pRpc, hClient, hObject, pParams, pRegOps); \
else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} \
while (0)
#define NV_RM_RPC_GET_STATIC_INFO(pGpu, status) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
status = rpcGetStaticInfo_HAL(pGpu, pRpc); \
else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while (0)
#define NV_RM_RPC_REGISTER_VIRTUAL_EVENT_BUFFER(pGpu, hClient, hSubdevice, hEventBuffer, hBufferHeader, hRecordBuffer, recordSize, recordCount, status) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
status = rpcRegisterVirtualEventBuffer_HAL(pGpu, pRpc, hClient, hSubdevice, hEventBuffer, hBufferHeader, hRecordBuffer, recordSize, recordCount); \
else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while (0)
#define NV_RM_RPC_UPDATE_BAR_PDE(pGpu, barType, entryValue, entryLevelShift, status) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
status = rpcUpdateBarPde_HAL(pGpu, pRpc, barType, entryValue, entryLevelShift); \
else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while (0)
#define NV_RM_RPC_SET_PAGE_DIRECTORY(pGpu, hClient, hDevice, pParams, status) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
status = rpcSetPageDirectory_HAL(pGpu, pRpc, hClient, hDevice, pParams); \
else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while (0)
#define NV_RM_RPC_UNSET_PAGE_DIRECTORY(pGpu, hClient, hDevice, pParams, status) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
status = rpcUnsetPageDirectory_HAL(pGpu, pRpc, hClient, hDevice, pParams); \
else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while (0)
#define NV_RM_RPC_PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION(pGpu, operation, status) \
do \
{ \
/* Call into RPC layer */ \
OBJRPC *pRpc; \
pRpc = GPU_GET_RPC(pGpu); \
if ((status == NV_OK) && (pRpc != NULL)) \
{ \
status = rpcPmaScrubberSharedBufferGuestPagesOperation_HAL(pGpu, pRpc, operation); \
} \
} \
while (0)
//
// DCE_CLIENT_RM specific RPCs
//
#define NV_RM_RPC_DCE_RM_INIT(pGpu, bInit, status) do {} while (0)
//
// GSP_CLIENT_RM specific RPCs
//
#define NV_RM_RPC_GET_GSP_STATIC_INFO(pGpu, status) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
status = rpcGetGspStaticInfo_HAL(pGpu, pRpc); \
else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while (0)
#define NV_RM_RPC_GSP_SET_SYSTEM_INFO(pGpu, status) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
status = rpcGspSetSystemInfo_HAL(pGpu, pRpc); \
else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while (0)
#define NV_RM_RPC_SET_REGISTRY(pGpu, status) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
status = rpcSetRegistry_HAL(pGpu, pRpc); \
else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while (0)
#define NV_RM_RPC_DUMP_PROTOBUF_COMPONENT(pGpu, status, pPrbEnc, pNvDumpState, \
component) \
do \
{ \
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
NV_ASSERT(pRpc != NULL); \
if ((status == NV_OK) && (pRpc != NULL)) \
status = rpcDumpProtobufComponent_HAL(pGpu, pRpc, pPrbEnc, \
pNvDumpState, component); \
else if (pRpc == NULL) \
status = NV_ERR_INSUFFICIENT_RESOURCES; \
} while (0)
#define NV_RM_RPC_RMFS_INIT(pGpu, statusQueueMemDesc, status) do {} while(0)
#define NV_RM_RPC_RMFS_CLOSE_QUEUE(pGpu, status) do {} while(0)
#define NV_RM_RPC_RMFS_CLEANUP(pGpu, status) do {} while(0)
#define NV_RM_RPC_RMFS_TEST(pGpu, numReps, testData1, testData2, \
testData3, status) do {} while(0)
static inline NV_STATUS RmRpcSimFreeInfra(OBJGPU *pGpu, ...) { return NV_OK; }
static inline NV_STATUS RmRpcSimAddDisplayContextDma(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; }
static inline NV_STATUS RmRpcSimUpdateDisplayContextDma(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; }
static inline NV_STATUS RmRpcSimDeleteDisplayContextDma(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; }
static inline NV_STATUS RmRpcSimUpdateDispChannelInfo(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; }
static inline NV_STATUS RmRpcHwResourceAlloc(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; }
static inline NV_STATUS RmRpcHwResourceFree(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; }
static inline NV_STATUS RmRpcHwResourceBind(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; }
static inline NV_STATUS RmRpcPerfGetPstateInfo(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; }
static inline NV_STATUS RmRpcPerfGetCurrentPstate(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; }
static inline NV_STATUS RmRpcPerfGetVirtualPstateInfo(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; }
static inline NV_STATUS RmRpcSimEscapeRead(OBJGPU *pGpu, const char *path, NvU32 index,
NvU32 count, NvU32 *data) { return NV_ERR_NOT_SUPPORTED; }
static inline NV_STATUS RmRpcSimEscapeWrite(OBJGPU *pGpu, const char *path, NvU32 index,
NvU32 count, NvU32 data) { return NV_ERR_NOT_SUPPORTED; }
NV_STATUS RmRpcSetGuestSystemInfo(OBJGPU *pGpu, OBJRPC *pRpc);
/*!
* Defines the size of the GSP sim access buffer.
*/
#define GSP_SIM_ACCESS_BUFFER_SIZE 0x4000
/*!
* Defines the structure used to pass SimRead data from Kernel to Physical RM.
*/
typedef struct SimAccessBuffer
{
volatile NvU32 data[GSP_SIM_ACCESS_BUFFER_SIZE];
volatile NvU32 seq;
} SimAccessBuffer;
#endif // NV_RM_STUB_RPC
#endif // __vgpu_dev_nv_rpc_h__

View File

@@ -0,0 +1,236 @@
#ifndef _RPC_GLOBAL_ENUMS_H_
#define _RPC_GLOBAL_ENUMS_H_
#ifndef X
# define X(UNIT, RPC) NV_VGPU_MSG_FUNCTION_##RPC,
# define DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
enum {
#endif
X(RM, NOP) // 0
X(RM, SET_GUEST_SYSTEM_INFO) // 1
X(RM, ALLOC_ROOT) // 2
X(RM, ALLOC_DEVICE) // 3 deprecated
X(RM, ALLOC_MEMORY) // 4
X(RM, ALLOC_CTX_DMA) // 5
X(RM, ALLOC_CHANNEL_DMA) // 6
X(RM, MAP_MEMORY) // 7
X(RM, BIND_CTX_DMA) // 8 deprecated
X(RM, ALLOC_OBJECT) // 9
X(RM, FREE) //10
X(RM, LOG) //11
X(RM, ALLOC_VIDMEM) //12
X(RM, UNMAP_MEMORY) //13
X(RM, MAP_MEMORY_DMA) //14
X(RM, UNMAP_MEMORY_DMA) //15
X(RM, GET_EDID) //16
X(RM, ALLOC_DISP_CHANNEL) //17
X(RM, ALLOC_DISP_OBJECT) //18
X(RM, ALLOC_SUBDEVICE) //19
X(RM, ALLOC_DYNAMIC_MEMORY) //20
X(RM, DUP_OBJECT) //21
X(RM, IDLE_CHANNELS) //22
X(RM, ALLOC_EVENT) //23
X(RM, SEND_EVENT) //24
X(RM, REMAPPER_CONTROL) //25 deprecated
X(RM, DMA_CONTROL) //26
X(RM, DMA_FILL_PTE_MEM) //27
X(RM, MANAGE_HW_RESOURCE) //28
X(RM, BIND_ARBITRARY_CTX_DMA) //29 deprecated
X(RM, CREATE_FB_SEGMENT) //30
X(RM, DESTROY_FB_SEGMENT) //31
X(RM, ALLOC_SHARE_DEVICE) //32
X(RM, DEFERRED_API_CONTROL) //33
X(RM, REMOVE_DEFERRED_API) //34
X(RM, SIM_ESCAPE_READ) //35
X(RM, SIM_ESCAPE_WRITE) //36
X(RM, SIM_MANAGE_DISPLAY_CONTEXT_DMA) //37
X(RM, FREE_VIDMEM_VIRT) //38
X(RM, PERF_GET_PSTATE_INFO) //39 deprecated for vGPU, used by GSP
X(RM, PERF_GET_PERFMON_SAMPLE) //40
X(RM, PERF_GET_VIRTUAL_PSTATE_INFO) //41 deprecated
X(RM, PERF_GET_LEVEL_INFO) //42
X(RM, MAP_SEMA_MEMORY) //43
X(RM, UNMAP_SEMA_MEMORY) //44
X(RM, SET_SURFACE_PROPERTIES) //45
X(RM, CLEANUP_SURFACE) //46
X(RM, UNLOADING_GUEST_DRIVER) //47
X(RM, TDR_SET_TIMEOUT_STATE) //48
X(RM, SWITCH_TO_VGA) //49
X(RM, GPU_EXEC_REG_OPS) //50
X(RM, GET_STATIC_INFO) //51
X(RM, ALLOC_VIRTMEM) //52
X(RM, UPDATE_PDE_2) //53
X(RM, SET_PAGE_DIRECTORY) //54
X(RM, GET_STATIC_PSTATE_INFO) //55
X(RM, TRANSLATE_GUEST_GPU_PTES) //56
X(RM, RESERVED_57) //57
X(RM, RESET_CURRENT_GR_CONTEXT) //58
X(RM, SET_SEMA_MEM_VALIDATION_STATE) //59
X(RM, GET_ENGINE_UTILIZATION) //60
X(RM, UPDATE_GPU_PDES) //61
X(RM, GET_ENCODER_CAPACITY) //62
X(RM, VGPU_PF_REG_READ32) //63
X(RM, SET_GUEST_SYSTEM_INFO_EXT) //64
X(GSP, GET_GSP_STATIC_INFO) //65
X(RM, RMFS_INIT) //66
X(RM, RMFS_CLOSE_QUEUE) //67
X(RM, RMFS_CLEANUP) //68
X(RM, RMFS_TEST) //69
X(RM, UPDATE_BAR_PDE) //70
X(RM, CONTINUATION_RECORD) //71
X(RM, GSP_SET_SYSTEM_INFO) //72
X(RM, SET_REGISTRY) //73
X(GSP, GSP_INIT_POST_OBJGPU) //74 deprecated
X(RM, SUBDEV_EVENT_SET_NOTIFICATION) //75 deprecated
X(GSP, GSP_RM_CONTROL) //76
X(RM, GET_STATIC_INFO2) //77
X(RM, DUMP_PROTOBUF_COMPONENT) //78
X(RM, UNSET_PAGE_DIRECTORY) //79
X(RM, GET_CONSOLIDATED_STATIC_INFO) //80
X(RM, GMMU_REGISTER_FAULT_BUFFER) //81 deprecated
X(RM, GMMU_UNREGISTER_FAULT_BUFFER) //82 deprecated
X(RM, GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER) //83 deprecated
X(RM, GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER) //84 deprecated
X(RM, CTRL_SET_VGPU_FB_USAGE) //85
X(RM, CTRL_NVFBC_SW_SESSION_UPDATE_INFO) //86
X(RM, CTRL_NVENC_SW_SESSION_UPDATE_INFO) //87
X(RM, CTRL_RESET_CHANNEL) //88
X(RM, CTRL_RESET_ISOLATED_CHANNEL) //89
X(RM, CTRL_GPU_HANDLE_VF_PRI_FAULT) //90
X(RM, CTRL_CLK_GET_EXTENDED_INFO) //91
X(RM, CTRL_PERF_BOOST) //92
X(RM, CTRL_PERF_VPSTATES_GET_CONTROL) //93
X(RM, CTRL_GET_ZBC_CLEAR_TABLE) //94
X(RM, CTRL_SET_ZBC_COLOR_CLEAR) //95
X(RM, CTRL_SET_ZBC_DEPTH_CLEAR) //96
X(RM, CTRL_GPFIFO_SCHEDULE) //97
X(RM, CTRL_SET_TIMESLICE) //98
X(RM, CTRL_PREEMPT) //99
X(RM, CTRL_FIFO_DISABLE_CHANNELS) //100
X(RM, CTRL_SET_TSG_INTERLEAVE_LEVEL) //101
X(RM, CTRL_SET_CHANNEL_INTERLEAVE_LEVEL) //102
X(GSP, GSP_RM_ALLOC) //103
X(RM, CTRL_GET_P2P_CAPS_V2) //104
X(RM, CTRL_CIPHER_AES_ENCRYPT) //105
X(RM, CTRL_CIPHER_SESSION_KEY) //106
X(RM, CTRL_CIPHER_SESSION_KEY_STATUS) //107
X(RM, CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES) //108
X(RM, CTRL_DBG_READ_ALL_SM_ERROR_STATES) //109
X(RM, CTRL_DBG_SET_EXCEPTION_MASK) //110
X(RM, CTRL_GPU_PROMOTE_CTX) //111
X(RM, CTRL_GR_CTXSW_PREEMPTION_BIND) //112
X(RM, CTRL_GR_SET_CTXSW_PREEMPTION_MODE) //113
X(RM, CTRL_GR_CTXSW_ZCULL_BIND) //114
X(RM, CTRL_GPU_INITIALIZE_CTX) //115
X(RM, CTRL_VASPACE_COPY_SERVER_RESERVED_PDES) //116
X(RM, CTRL_FIFO_CLEAR_FAULTED_BIT) //117
X(RM, CTRL_GET_LATEST_ECC_ADDRESSES) //118
X(RM, CTRL_MC_SERVICE_INTERRUPTS) //119
X(RM, CTRL_DMA_SET_DEFAULT_VASPACE) //120
X(RM, CTRL_GET_CE_PCE_MASK) //121
X(RM, CTRL_GET_ZBC_CLEAR_TABLE_ENTRY) //122
X(RM, CTRL_GET_NVLINK_PEER_ID_MASK) //123
X(RM, CTRL_GET_NVLINK_STATUS) //124
X(RM, CTRL_GET_P2P_CAPS) //125
X(RM, CTRL_GET_P2P_CAPS_MATRIX) //126
X(RM, RESERVED_0) //127
X(RM, CTRL_RESERVE_PM_AREA_SMPC) //128
X(RM, CTRL_RESERVE_HWPM_LEGACY) //129
X(RM, CTRL_B0CC_EXEC_REG_OPS) //130
X(RM, CTRL_BIND_PM_RESOURCES) //131
X(RM, CTRL_DBG_SUSPEND_CONTEXT) //132
X(RM, CTRL_DBG_RESUME_CONTEXT) //133
X(RM, CTRL_DBG_EXEC_REG_OPS) //134
X(RM, CTRL_DBG_SET_MODE_MMU_DEBUG) //135
X(RM, CTRL_DBG_READ_SINGLE_SM_ERROR_STATE) //136
X(RM, CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE) //137
X(RM, CTRL_DBG_SET_MODE_ERRBAR_DEBUG) //138
X(RM, CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE) //139
X(RM, CTRL_ALLOC_PMA_STREAM) //140
X(RM, CTRL_PMA_STREAM_UPDATE_GET_PUT) //141
X(RM, CTRL_FB_GET_INFO_V2) //142
X(RM, CTRL_FIFO_SET_CHANNEL_PROPERTIES) //143
X(RM, CTRL_GR_GET_CTX_BUFFER_INFO) //144
X(RM, CTRL_KGR_GET_CTX_BUFFER_PTES) //145
X(RM, CTRL_GPU_EVICT_CTX) //146
X(RM, CTRL_FB_GET_FS_INFO) //147
X(RM, CTRL_GRMGR_GET_GR_FS_INFO) //148
X(RM, CTRL_STOP_CHANNEL) //149
X(RM, CTRL_GR_PC_SAMPLING_MODE) //150
X(RM, CTRL_PERF_RATED_TDP_GET_STATUS) //151
X(RM, CTRL_PERF_RATED_TDP_SET_CONTROL) //152
X(RM, CTRL_FREE_PMA_STREAM) //153
X(RM, CTRL_TIMER_SET_GR_TICK_FREQ) //154
X(RM, CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB) //155
X(RM, GET_CONSOLIDATED_GR_STATIC_INFO) //156
X(RM, CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP) //157
X(RM, CTRL_GR_GET_TPC_PARTITION_MODE) //158
X(RM, CTRL_GR_SET_TPC_PARTITION_MODE) //159
X(UVM, UVM_PAGING_CHANNEL_ALLOCATE) //160
X(UVM, UVM_PAGING_CHANNEL_DESTROY) //161
X(UVM, UVM_PAGING_CHANNEL_MAP) //162
X(UVM, UVM_PAGING_CHANNEL_UNMAP) //163
X(UVM, UVM_PAGING_CHANNEL_PUSH_STREAM) //164
X(UVM, UVM_PAGING_CHANNEL_SET_HANDLES) //165
X(UVM, UVM_METHOD_STREAM_GUEST_PAGES_OPERATION) //166
X(RM, CTRL_INTERNAL_QUIESCE_PMA_CHANNEL) //167
X(RM, DCE_RM_INIT) //168
X(RM, REGISTER_VIRTUAL_EVENT_BUFFER) //169
X(RM, CTRL_EVENT_BUFFER_UPDATE_GET) //170
X(RM, GET_PLCABLE_ADDRESS_KIND) //171
X(RM, CTRL_PERF_LIMITS_SET_STATUS_V2) //172
X(RM, CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM) //173
X(RM, CTRL_GET_MMU_DEBUG_MODE) //174
X(RM, CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS) //175
X(RM, CTRL_FLCN_GET_CTX_BUFFER_SIZE) //176
X(RM, CTRL_FLCN_GET_CTX_BUFFER_INFO) //177
X(RM, DISABLE_CHANNELS) //178
X(RM, CTRL_FABRIC_MEMORY_DESCRIBE) //179
X(RM, CTRL_FABRIC_MEM_STATS) //180
X(RM, SAVE_HIBERNATION_DATA) //181
X(RM, RESTORE_HIBERNATION_DATA) //182
X(RM, CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED) //183
X(RM, CTRL_EXEC_PARTITIONS_CREATE) //184
X(RM, CTRL_EXEC_PARTITIONS_DELETE) //185
X(RM, CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN) //186
X(RM, CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX) //187
X(RM, PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION) //188
X(RM, NUM_FUNCTIONS) //END
#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
};
# undef X
# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
#endif
// RPC Events. Used by GSP-RM.
#ifndef E
# define E(RPC) NV_VGPU_MSG_EVENT_##RPC,
# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
enum {
#endif
E(FIRST_EVENT = 0x1000) // 0x1000
E(GSP_INIT_DONE) // 0x1001
E(GSP_RUN_CPU_SEQUENCER) // 0x1002
E(POST_EVENT) // 0x1003
E(RC_TRIGGERED) // 0x1004
E(MMU_FAULT_QUEUED) // 0x1005
E(OS_ERROR_LOG) // 0x1006
E(RG_LINE_INTR) // 0x1007
E(GPUACCT_PERFMON_UTIL_SAMPLES) // 0x1008
E(SIM_READ) // 0x1009
E(SIM_WRITE) // 0x100a
E(SEMAPHORE_SCHEDULE_CALLBACK) // 0x100b
E(UCODE_LIBOS_PRINT) // 0x100c
E(VGPU_GSP_PLUGIN_TRIGGERED) // 0x100d
E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK) // 0x100e
E(PERF_BRIDGELESS_INFO_UPDATE) // 0x100f
E(VGPU_CONFIG) // 0x1010
E(DISPLAY_MODESET) // 0x1011
E(NUM_EVENTS) // END
#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
};
# undef E
# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
#endif
#endif /*_RPC_GLOBAL_ENUMS_H_*/

View File

@@ -0,0 +1,230 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __vgpu_rpc_nv_headers_h__
#define __vgpu_rpc_nv_headers_h__
#include "ctrl/ctrl0080/ctrl0080perf.h"
#include "ctrl/ctrl2080/ctrl2080perf.h"
#include "ctrl/ctrl2080/ctrl2080internal.h"
#include "nvstatus.h"
#define MAX_GPC_COUNT 32
/*
* Maximum number of RegOps that can be accommodated within one RPC call
* due to RPC message buffer size being limited to 4k
*/
#define VGPU_MAX_REGOPS_PER_RPC 100
#define VGPU_RESERVED_HANDLE_BASE 0xCAF3F000
#define VGPU_RESERVED_HANDLE_RANGE 0x1000
#define VGPU_CALC_PARAM_OFFSET(prev_offset, prev_params) (prev_offset + NV_ALIGN_UP(sizeof(prev_params), sizeof(NvU32)))
/*
* Message header (in buffer addressed by ring entry)
*
* If message is invalid (bad length or signature), signature and length
* are forced to be valid (if in range of descriptor) and result is set to
* NV_VGPU_RESULT_INVALID_MESSAGE_FORMAT. Otherwise, signature, length, and
* function are always unchanged and result is always set.
*
* The function message header, if defined, immediately follows the main message
* header.
*/
#define NV_VGPU_MSG_HEADER_VERSION_MAJOR 31:24 /* R---D */
#define NV_VGPU_MSG_HEADER_VERSION_MINOR 23:16 /* R---D */
#define NV_VGPU_MSG_HEADER_VERSION_MAJOR_TOT 0x00000003 /* R---D */
#define NV_VGPU_MSG_HEADER_VERSION_MINOR_TOT 0x00000000 /* R---D */
/* signature must equal valid value */
#define NV_VGPU_MSG_SIGNATURE_VALID 0x43505256 /* RW--V */
#include "rpc_global_enums.h"
/* result code */
/* codes below 0xFF000000 must match exactly the NV_STATUS codes in nvos.h */
#define NV_VGPU_MSG_RESULT__RM NV_ERR_GENERIC:0x00000000 /* RW--D */
#define NV_VGPU_MSG_RESULT_SUCCESS NV_OK
#define NV_VGPU_MSG_RESULT_CARD_NOT_PRESENT NV_ERR_CARD_NOT_PRESENT
#define NV_VGPU_MSG_RESULT_DUAL_LINK_INUSE NV_ERR_DUAL_LINK_INUSE
#define NV_VGPU_MSG_RESULT_GENERIC NV_ERR_GENERIC
#define NV_VGPU_MSG_RESULT_GPU_NOT_FULL_POWER NV_ERR_GPU_NOT_FULL_POWER
#define NV_VGPU_MSG_RESULT_IN_USE NV_ERR_IN_USE
#define NV_VGPU_MSG_RESULT_INSUFFICIENT_RESOURCES NV_ERR_INSUFFICIENT_RESOURCES
#define NV_VGPU_MSG_RESULT_INVALID_ACCESS_TYPE NV_ERR_INVALID_ACCESS_TYPE
#define NV_VGPU_MSG_RESULT_INVALID_ARGUMENT NV_ERR_INVALID_ARGUMENT
#define NV_VGPU_MSG_RESULT_INVALID_BASE NV_ERR_INVALID_BASE
#define NV_VGPU_MSG_RESULT_INVALID_CHANNEL NV_ERR_INVALID_CHANNEL
#define NV_VGPU_MSG_RESULT_INVALID_CLASS NV_ERR_INVALID_CLASS
#define NV_VGPU_MSG_RESULT_INVALID_CLIENT NV_ERR_INVALID_CLIENT
#define NV_VGPU_MSG_RESULT_INVALID_COMMAND NV_ERR_INVALID_COMMAND
#define NV_VGPU_MSG_RESULT_INVALID_DATA NV_ERR_INVALID_DATA
#define NV_VGPU_MSG_RESULT_INVALID_DEVICE NV_ERR_INVALID_DEVICE
#define NV_VGPU_MSG_RESULT_INVALID_DMA_SPECIFIER NV_ERR_INVALID_DMA_SPECIFIER
#define NV_VGPU_MSG_RESULT_INVALID_EVENT NV_ERR_INVALID_EVENT
#define NV_VGPU_MSG_RESULT_INVALID_FLAGS NV_ERR_INVALID_FLAGS
#define NV_VGPU_MSG_RESULT_INVALID_FUNCTION NV_ERR_INVALID_FUNCTION
#define NV_VGPU_MSG_RESULT_INVALID_HEAP NV_ERR_INVALID_HEAP
#define NV_VGPU_MSG_RESULT_INVALID_INDEX NV_ERR_INVALID_INDEX
#define NV_VGPU_MSG_RESULT_INVALID_LIMIT NV_ERR_INVALID_LIMIT
#define NV_VGPU_MSG_RESULT_INVALID_METHOD NV_ERR_INVALID_METHOD
#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_BUFFER NV_ERR_INVALID_OBJECT_BUFFER
#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_ERROR NV_ERR_INVALID_OBJECT
#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_HANDLE NV_ERR_INVALID_OBJECT_HANDLE
#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_NEW NV_ERR_INVALID_OBJECT_NEW
#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_OLD NV_ERR_INVALID_OBJECT_OLD
#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_PARENT NV_ERR_INVALID_OBJECT_PARENT
#define NV_VGPU_MSG_RESULT_INVALID_OFFSET NV_ERR_INVALID_OFFSET
#define NV_VGPU_MSG_RESULT_INVALID_OWNER NV_ERR_INVALID_OWNER
#define NV_VGPU_MSG_RESULT_INVALID_PARAM_STRUCT NV_ERR_INVALID_PARAM_STRUCT
#define NV_VGPU_MSG_RESULT_INVALID_PARAMETER NV_ERR_INVALID_PARAMETER
#define NV_VGPU_MSG_RESULT_INVALID_POINTER NV_ERR_INVALID_POINTER
#define NV_VGPU_MSG_RESULT_INVALID_REGISTRY_KEY NV_ERR_INVALID_REGISTRY_KEY
#define NV_VGPU_MSG_RESULT_INVALID_STATE NV_ERR_INVALID_STATE
#define NV_VGPU_MSG_RESULT_INVALID_STRING_LENGTH NV_ERR_INVALID_STRING_LENGTH
#define NV_VGPU_MSG_RESULT_INVALID_XLATE NV_ERR_INVALID_XLATE
#define NV_VGPU_MSG_RESULT_IRQ_NOT_FIRING NV_ERR_IRQ_NOT_FIRING
#define NV_VGPU_MSG_RESULT_MULTIPLE_MEMORY_TYPES NV_ERR_MULTIPLE_MEMORY_TYPES
#define NV_VGPU_MSG_RESULT_NOT_SUPPORTED NV_ERR_NOT_SUPPORTED
#define NV_VGPU_MSG_RESULT_OPERATING_SYSTEM NV_ERR_OPERATING_SYSTEM
#define NV_VGPU_MSG_RESULT_PROTECTION_FAULT NV_ERR_PROTECTION_FAULT
#define NV_VGPU_MSG_RESULT_TIMEOUT NV_ERR_TIMEOUT
#define NV_VGPU_MSG_RESULT_TOO_MANY_PRIMARIES NV_ERR_TOO_MANY_PRIMARIES
#define NV_VGPU_MSG_RESULT_IRQ_EDGE_TRIGGERED NV_ERR_IRQ_EDGE_TRIGGERED
#define NV_VGPU_MSG_RESULT_GUEST_HOST_DRIVER_MISMATCH NV_ERR_LIB_RM_VERSION_MISMATCH
/*
* codes above 0xFF000000 and below 0xFF100000 must match one-for-one
* the vmiop_error_t codes in vmioplugin.h, with 0xFF000000 added.
*/
#define NV_VGPU_MSG_RESULT__VMIOP 0xFF000007:0xFF000000 /* RW--D */
#define NV_VGPU_MSG_RESULT_VMIOP_INVAL 0xFF000001 /* RW--V */
#define NV_VGPU_MSG_RESULT_VMIOP_RESOURCE 0xFF000002 /* RW--V */
#define NV_VGPU_MSG_RESULT_VMIOP_RANGE 0xFF000003 /* RW--V */
#define NV_VGPU_MSG_RESULT_VMIOP_READ_ONLY 0xFF000004 /* RW--V */
#define NV_VGPU_MSG_RESULT_VMIOP_NOT_FOUND 0xFF000005 /* RW--V */
#define NV_VGPU_MSG_RESULT_VMIOP_NO_ADDRESS_SPACE 0xFF000006 /* RW--V */
#define NV_VGPU_MSG_RESULT_VMIOP_TIMEOUT 0xFF000007 /* RW--V */
/* RPC-specific error codes */
#define NV_VGPU_MSG_RESULT__RPC 0xFF100007:0xFF100000 /* RW--D */
#define NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION 0xFF100001 /* RW--V */
#define NV_VGPU_MSG_RESULT_RPC_INVALID_MESSAGE_FORMAT 0xFF100002 /* RW--V */
#define NV_VGPU_MSG_RESULT_RPC_HANDLE_NOT_FOUND 0xFF100003 /* RW--V */
#define NV_VGPU_MSG_RESULT_RPC_HANDLE_EXISTS 0xFF100004 /* RW--V */
#define NV_VGPU_MSG_RESULT_RPC_UNKNOWN_RM_ERROR 0xFF100005 /* RW--V */
#define NV_VGPU_MSG_RESULT_RPC_UNKNOWN_VMIOP_ERROR 0xFF100006 /* RW--V */
#define NV_VGPU_MSG_RESULT_RPC_RESERVED_HANDLE 0xFF100007 /* RW--V */
/* RPC-specific code in result for incomplete request */
#define NV_VGPU_MSG_RESULT_RPC_PENDING 0xFFFFFFFF /* RW--V */
/* shared union field */
#define NV_VGPU_MSG_UNION_INIT 0x00000000 /* RW--V */
/*
* common PTEDESC message defines (used w/ ALLOC_MEMORY, ALLOC_VIDMEM, FILL_PTE_MEM)
*/
#define NV_VGPU_PTEDESC_INIT 0x00000000 /* RWI-V */
#define NV_VGPU_PTEDESC__PROD 0x00000000 /* RW--V */
#define NV_VGPU_PTEDESC_IDR_NONE 0x00000000 /* RW--V */
#define NV_VGPU_PTEDESC_IDR_SINGLE 0x00000001 /* RW--V */
#define NV_VGPU_PTEDESC_IDR_DOUBLE 0x00000002 /* RW--V */
#define NV_VGPU_PTEDESC_IDR_TRIPLE 0x00000003 /* RW--V */
#define NV_VGPU_PTE_PAGE_SIZE 0x1000 /* R---V */
#define NV_VGPU_PTE_SIZE 4 /* R---V */
#define NV_VGPU_PTE_INDEX_SHIFT 10 /* R---V */
#define NV_VGPU_PTE_INDEX_MASK 0x3FF /* R---V */
#define NV_VGPU_PTE_64_PAGE_SIZE 0x1000 /* R---V */
#define NV_VGPU_PTE_64_SIZE 8 /* R---V */
#define NV_VGPU_PTE_64_INDEX_SHIFT 9 /* R---V */
#define NV_VGPU_PTE_64_INDEX_MASK 0x1FF /* R---V */
/*
* LOG message
*/
#define NV_VGPU_LOG_LEVEL_FATAL 0x00000000 /* RW--V */
#define NV_VGPU_LOG_LEVEL_ERROR 0x00000001 /* RW--V */
#define NV_VGPU_LOG_LEVEL_NOTICE 0x00000002 /* RW--V */
#define NV_VGPU_LOG_LEVEL_STATUS 0x00000003 /* RW--V */
#define NV_VGPU_LOG_LEVEL_DEBUG 0x00000004 /* RW--V */
/*
* Enums specifying the BAR number that we are going to update its PDE
*/
typedef enum
{
NV_RPC_UPDATE_PDE_BAR_1,
NV_RPC_UPDATE_PDE_BAR_2,
NV_RPC_UPDATE_PDE_BAR_INVALID,
} NV_RPC_UPDATE_PDE_BAR_TYPE;
/*
* UVM method stream guest pages operation
*/
typedef enum
{
NV_RPC_GUEST_PAGE_MAP,
NV_RPC_GUEST_PAGE_UNMAP,
} NV_RPC_GUEST_PAGE_OPERATION;
/*
* UVM method stream guest page size
*/
typedef enum
{
NV_RPC_GUEST_PAGE_SIZE_4K,
NV_RPC_GUEST_PAGE_SIZE_UNSUPPORTED,
} NV_RPC_GUEST_PAGE_SIZE;
/*
* UVM paging channel VASPACE operation
*/
typedef enum
{
UVM_PAGING_CHANNEL_VASPACE_ALLOC,
UVM_PAGING_CHANNEL_VASPACE_FREE,
} UVM_PAGING_CHANNEL_VASPACE_OPERATION;
typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS
{
NvU32 headIndex;
NvU32 maxHResolution;
NvU32 maxVResolution;
} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS;
typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS
{
NvU32 numHeads;
NvU32 maxNumHeads;
} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS;
/*
* Maximum guest pages that can be mapped for UVM method stream
*/
#define UVM_METHOD_STREAM_MAX_GUEST_PAGES_v1C_05 500
#define PMA_SCRUBBER_SHARED_BUFFER_MAX_GUEST_PAGES_v1F_0C 500
#endif // __vgpu_rpc_nv_headers_h__

View File

@@ -0,0 +1,64 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __vgpu_dev_nv_rpc_vgpu_h__
#define __vgpu_dev_nv_rpc_vgpu_h__
static NV_INLINE void NV_RM_RPC_ALLOC_LOCAL_USER(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_ALLOC_VIDMEM(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_ALLOC_VIRTMEM(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_MAP_MEMORY(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_UNMAP_MEMORY(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_READ_EDID(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_DMA_FILL_PTE_MEM(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_CREATE_FB_SEGMENT(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_DESTROY_FB_SEGMENT(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_DEFERRED_API_CONTROL(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_REMOVE_DEFERRED_API(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_FREE_VIDMEM_VIRT(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_LOG(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_SET_GUEST_SYSTEM_INFO_EXT(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_GET_ENGINE_UTILIZATION(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_MAP_SEMA_MEMORY(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_UNMAP_SEMA_MEMORY(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_SET_SURFACE_PROPERTIES(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_CLEANUP_SURFACE(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_SWITCH_TO_VGA(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_TDR_SET_TIMEOUT_STATE(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_GET_CONSOLIDATED_STATIC_INFO(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_GET_CONSOLIDATED_GR_STATIC_INFO(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_GET_STATIC_PSTATE_INFO(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_UPDATE_PDE_2(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_TRANSLATE_GUEST_GPU_PTES(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_SET_SEMA_MEM_VALIDATION_STATE(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_RESET_CURRENT_GR_CONTEXT(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_GET_ENCODER_CAPACITY(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_GET_STATIC_INFO2(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_ALLOC_CONTEXT_DMA(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_GET_PLCABLE_ADDRESS_KIND(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_UPDATE_GPU_PDES(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_DISABLE_CHANNELS(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_SAVE_HIBERNATION_DATA(OBJGPU *pGpu, ...) { }
static NV_INLINE void NV_RM_RPC_RESTORE_HIBERNATION_DATA(OBJGPU *pGpu, ...) { }
#endif // __vgpu_dev_nv_rpc_vgpu_h__

View File

@@ -0,0 +1,272 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _RPC_SDK_STRUCTURES_H_
#define _RPC_SDK_STRUCTURES_H_
#include <ctrl/ctrl83de.h>
#include <ctrl/ctrlc36f.h>
#include <ctrl/ctrlc637.h>
#include <ctrl/ctrl0000/ctrl0000system.h>
#include <ctrl/ctrl0080/ctrl0080nvjpg.h>
#include <ctrl/ctrl0080/ctrl0080bsp.h>
#include <ctrl/ctrl0080/ctrl0080dma.h>
#include <ctrl/ctrl0080/ctrl0080fb.h>
#include <ctrl/ctrl0080/ctrl0080gr.h>
#include <ctrl/ctrl2080/ctrl2080ce.h>
#include <ctrl/ctrl2080/ctrl2080bus.h>
#include <ctrl/ctrl2080/ctrl2080fifo.h>
#include <ctrl/ctrl2080/ctrl2080gr.h>
#include <ctrl/ctrl2080/ctrl2080fb.h>
#include <ctrl/ctrl83de/ctrl83dedebug.h>
#include <ctrl/ctrl0080/ctrl0080fifo.h>
#include <ctrl/ctrl2080/ctrl2080nvlink.h>
#include <ctrl/ctrl2080/ctrl2080fla.h>
#include <ctrl/ctrl2080/ctrl2080internal.h>
#include <ctrl/ctrl2080/ctrl2080mc.h>
#include <ctrl/ctrl2080/ctrl2080grmgr.h>
#include <ctrl/ctrl2080/ctrl2080ecc.h>
#include <ctrl/ctrl0090.h>
#include <ctrl/ctrl9096.h>
#include <ctrl/ctrlb0cc.h>
#include <ctrl/ctrla06f.h>
#include <ctrl/ctrl00f8.h>
#include <class/cl2080.h>
#include <class/cl0073.h>
#include <class/clc670.h>
#include <class/clc673.h>
#include <class/clc67b.h>
#include <class/clc67d.h>
#include <class/clc67e.h>
#include "rpc_headers.h"
#include "nvctassert.h"
typedef struct vmiopd_SM_info {
NvU32 version;
NvU32 regBankCount;
NvU32 regBankRegCount;
NvU32 maxWarpsPerSM;
NvU32 maxThreadsPerWarp;
NvU32 geomGsObufEntries;
NvU32 geomXbufEntries;
NvU32 maxSPPerSM;
NvU32 rtCoreCount;
} VMIOPD_GRSMINFO;
// NV_SCAL_FAMILY_MAX_FBPS 16
#define MAX_FBPS 16 //Maximum number of FBPs
#define OBJ_MAX_HEADS 4
#define MAX_NVDEC_ENGINES 5 // Maximum number of NVDEC engines
// NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_DEVICES(256) / NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES(32)
#define MAX_ITERATIONS_DEVICE_INFO_TABLE 8
// NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_PAGES(512) / NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_ENTRIES(64)
#define MAX_ITERATIONS_DYNAMIC_BLACKLIST 8
#define NV0000_GPUACCT_RPC_PID_MAX_QUERY_COUNT 1000
#define NV2080_CTRL_CLK_ARCH_MAX_DOMAINS_v1E_0D 32
#define NV_RM_RPC_NO_MORE_DATA_TO_READ 0
#define NV_RM_RPC_MORE_RPC_DATA_TO_READ 1
//Maximum EXEC_PARTITIONS
#define NVC637_CTRL_MAX_EXEC_PARTITIONS_v18_05 8
//Maximum ECC Addresses
#define NV2080_CTRL_ECC_GET_LATEST_ECC_ADDRESSES_MAX_COUNT_v18_04 32
#define NV2080_CTRL_NVLINK_MAX_LINKS_v15_02 6
#define NV2080_CTRL_NVLINK_MAX_LINKS_v1A_18 12
#define NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE_v15_02 8
#define NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE_v1F_0D 9
#define NV2080_CTRL_FB_FS_INFO_MAX_QUERIES_v1A_1D 96
#define NV2080_CTRL_FB_FS_INFO_MAX_QUERY_SIZE_v1A_1D 24
#define NV2080_CTRL_GRMGR_GR_FS_INFO_MAX_QUERIES_v1A_1D 96
#define NV2080_CTRL_GRMGR_MAX_SMC_IDS_v1A_1D 8
#define NV0080_CTRL_GR_INFO_MAX_SIZE_1B_04 (0x0000002C)
#define NV0080_CTRL_GR_INFO_MAX_SIZE_1C_01 (0x00000030)
#define NV0080_CTRL_GR_INFO_MAX_SIZE_1E_02 (0x00000032)
#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES_1B_04 8
#define NV2080_CTRL_INTERNAL_GR_MAX_SM_v1B_05 256
#define NV2080_CTRL_INTERNAL_GR_MAX_SM_v1E_03 240
#define NV2080_CTRL_INTERNAL_GR_MAX_GPC_v1B_05 8
#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT_v1B_05 0x19
#define NV2080_CTRL_INTERNAL_MAX_TPC_PER_GPC_COUNT_v1C_03 10
#define NV2080_CTRL_INTERNAL_GR_MAX_GPC_v1C_03 12
#define NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_MAX_v1E_09 32
// Defined this intermediate RM-RPC structure for making RPC call from Guest as
// we have the restriction of passing max 4kb of data to plugin and the
// NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS is way more than that.
// This structure is similar to NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS
// RM control structure.
// Added passIndex member to identify from which index (in the full RM pid list
// on host)onwards the data needs to be read. Caller should initialize passIndex
// to NV_RM_RPC_MORE_RPC_DATA_TO_READ, and keep making RPC calls until the
// passIndex value is returned as NV_RM_RPC_NO_MORE_DATA_TO_READ by the RPC.
typedef struct
{
NvU32 gpuId;
NvU32 passIndex;
NvU32 pidTbl[NV0000_GPUACCT_RPC_PID_MAX_QUERY_COUNT];
NvU32 pidCount;
} NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS_RPC_EX;
typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG_v03_00[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES];
typedef NvV32 NvRmctrlCmd;
struct pte_desc
{
NvU32 idr:2;
NvU32 reserved1:14;
NvU32 length:16;
union {
NvU64 pte; // PTE when IDR==0; PDE when IDR > 0
NvU64 pde; // PTE when IDR==0; PDE when IDR > 0
} pte_pde[] NV_ALIGN_BYTES(8); // PTE when IDR==0; PDE when IDR > 0
};
/*
* VGPU_CACHED_RMCTRL_LIST
*
* This macro contains the list of RmCtrls which return static values and can be cached in
* guest RM.
*
* To cache a RmCtrl, add it to VGPU_CACHED_RMCTRL_LIST in the format:
* VGPU_CACHED_RMCTRL_ENTRY(<RmCtrl Command>, <RmCtrl Parameter Type>)
*/
#define VGPU_CACHED_RMCTRL_LIST \
VGPU_CACHED_RMCTRL_ENTRY(NV2080_CTRL_CMD_PERF_VPSTATES_GET_INFO, NV2080_CTRL_PERF_VPSTATES_INFO) \
VGPU_CACHED_RMCTRL_ENTRY(NV2080_CTRL_CMD_GPU_GET_MAX_SUPPORTED_PAGE_SIZE, NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS)
enum VGPU_CACHED_RMCTRL_INDICES
{
#define VGPU_CACHED_RMCTRL_ENTRY(ctrlCmd,type) \
VGPU_CACHED_RMCTRL_IDX_##ctrlCmd,
VGPU_CACHED_RMCTRL_LIST
#undef VGPU_CACHED_RMCTRL_ENTRY
VGPU_CACHED_RMCTRL_IDX_COUNT,
};
typedef struct vgpu_cached_rmctrl
{
void *ptr;
NvBool bCached;
NV_STATUS status;
}vgpu_cached_rmctrl;
typedef struct vgpu_cached_rmctrl_list
{
vgpu_cached_rmctrl vgpu_cached_rmctrls[VGPU_CACHED_RMCTRL_IDX_COUNT];
} vgpu_cached_rmctrl_list;
typedef struct VGPU_BSP_CAPS
{
NvU8 capsTbl[NV0080_CTRL_BSP_CAPS_TBL_SIZE];
} VGPU_BSP_CAPS;
#define VGPU_PAGE_SIZE 4096
#define NUM_MFN_PAGES 16
typedef struct HYPERV_SHARED_MEMORY_DESCRIPTOR
{
union
{
struct
{
NvU32 shm_lock;
NvU64 vmbus_packet_id NV_ALIGN_BYTES(8);
};
char control_page[VGPU_PAGE_SIZE];
};
NvU32 mfn_data[NUM_MFN_PAGES * VGPU_PAGE_SIZE / sizeof(NvU32)];
} HYPERV_SHARED_MEMORY_DESCRIPTOR;
#define HYPERV_SHM_MFN_WRITE_WAIT 0
#define HYPERV_SHM_MFN_WRITE_COMPLETE 1
#define NV2080_CTRL_GPU_ECC_UNIT_COUNT_v15_01 (0x00000014)
#define NV2080_CTRL_GPU_ECC_UNIT_COUNT_v1A_04 (0x00000014)
#define NV2080_CTRL_GPU_ECC_UNIT_COUNT_v1C_09 (0x00000016)
#define NV2080_ENGINE_TYPE_LAST_v18_01 (0x0000002a)
#define NV2080_ENGINE_TYPE_LAST_v1C_09 (0x00000034)
#define NV2080_CTRL_BUS_INFO_MAX_LIST_SIZE_v1A_0F (0x00000033)
#define NV2080_CTRL_BUS_INFO_MAX_LIST_SIZE_v1C_09 (0x00000034)
//Maximum GMMU_FMT_LEVELS
#define GMMU_FMT_MAX_LEVELS_v05_00 5
#define GMMU_FMT_MAX_LEVELS_v1A_12 6
//Maximum MMU FMT sub levels
#define MMU_FMT_MAX_SUB_LEVELS_v09_02 2
//Maximum number of supported TDP clients
#define NV2080_CTRL_PERF_RATED_TDP_CLIENT_NUM_CLIENTS_v1A_1F 5
//Maximum number of SMs whose error state can be read in single call
#define NV83DE_CTRL_DEBUG_MAX_SMS_PER_CALL_v16_03 100
// Workaround for bug 200702083 (#15)
#define NV2080_CTRL_FB_INFO_MAX_LIST_SIZE_1A_15 0x2F
#define NV2080_CTRL_FB_INFO_MAX_LIST_SIZE_1A_24 0x33
#define NV2080_CTRL_FB_INFO_MAX_LIST_SIZE_1E_01 0x35
#define NV2080_CTRL_FB_INFO_MAX_LIST_SIZE_1F_0F 0x36
#define NV2080_CTRL_PERF_MAX_LIMITS_v1C_0B 0x100
// Maximum guest address that can we queried in one RPC.
// Below number is calculated as per Max. Guest Adrresses and their
// state can be returned in a single 4K (RPC Page size) iteration
#define GET_PLCABLE_MAX_GUEST_ADDRESS_v1D_05 60
//
// Versioned define for
// NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_MAX_RUNQUEUES
//
#define NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_MAX_RUNQUEUES_v1E_07 2
// Versioned define for
// NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_COUNT
#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_COUNT_v1F_08 13
#endif /*_RPC_SDK_STRUCTURES_H_*/

View File

@@ -0,0 +1,124 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
//******************************************************************************
//
// Declarations for the VGPU event module.
//
// Description:
// This module declares the VGPU event interface functions/macros.
//
//******************************************************************************
#ifndef VGPU_EVENTS_H
#define VGPU_EVENTS_H
#include "rmconfig.h"
#include "ctrl/ctrlc637.h"
#include "ctrl/ctrl2080/ctrl2080bios.h"
#include "ctrl/ctrl2080/ctrl2080fb.h"
#include "ctrl/ctrl2080/ctrl2080gpu.h"
#include "ctrl/ctrl2080/ctrl2080gr.h"
#include "ctrl/ctrl0080/ctrl0080nvjpg.h"
#include "vgpu/rpc_headers.h"
#include "gpu/device/device.h"
#include "vgpu/sdk-structures.h"
#include "kernel/gpu/mig_mgr/kernel_mig_manager.h"
typedef MC_ENGINE_BITVECTOR *PMC_ENGINE_BITVECTOR;
typedef struct HOST_VGPU_DEVICE HOST_VGPU_DEVICE, KERNEL_HOST_VGPU_DEVICE;
typedef struct _object_vgpu OBJVGPU, *POBJVGPU;
// Create and destroy OBJVGPU *object
NV_STATUS vgpuCreateObject(OBJGPU *pGpu);
void vgpuDestructObject(OBJGPU *pGpu);
// Check if a VGPU event is pending
NvBool vgpuGetPendingEvent(OBJGPU *pGpu, THREAD_STATE_NODE *pThreadState);
// Service VGPU events
void vgpuService(OBJGPU *pGpu);
// Overwrite registry keys
void vgpuInitRegistryOverWrite(OBJGPU *pGpu);
// Get the device pointer from the calling context
Device *vgpuGetCallingContextDevice(OBJGPU *pGpu);
// Stubs for virtualization-disabled builds
static inline NV_STATUS vgpuGetCallingContextHostVgpuDevice(OBJGPU *pGpu, HOST_VGPU_DEVICE **ppHostVgpuDevice)
{
*ppHostVgpuDevice = NULL;
return NV_OK;
}
static inline NV_STATUS vgpuGetCallingContextKernelHostVgpuDevice(OBJGPU *pGpu, KERNEL_HOST_VGPU_DEVICE **ppKernelHostVgpuDevice)
{
*ppKernelHostVgpuDevice = NULL;
return NV_OK;
}
static inline NV_STATUS vgpuGetCallingContextGfid(OBJGPU *pGpu, NvU32 *pGfid)
{
*pGfid = GPU_GFID_PF;
return NV_OK;
}
static inline NV_STATUS vgpuIsCallingContextPlugin(OBJGPU *pGpu, NvBool *pIsCallingContextPlugin)
{
*pIsCallingContextPlugin = NV_FALSE;
return NV_OK;
}
static inline NV_STATUS vgpuGetGfidFromDeviceInfo(OBJGPU *pGpu, Device *pDevice, NvU32 *pGfid)
{
*pGfid = GPU_GFID_PF;
return NV_OK;
}
// Update Interrupt using shared memory through vGPU
void vgpuUpdateShmIntr(OBJGPU *pGpu, NvU32 offset, NvU32 value, THREAD_STATE_NODE *pThreadState);
// Check if SW stalling interrupt is pending, using shared memory
NV_STATUS vgpuShmIsSwPending(OBJGPU *pGpu, NvU32 *isSwPending);
// Check if non-stalling interrupts are enabled, using shared memory
NV_STATUS vgpuShmIsNonStallEnabled(OBJGPU *pGpu, NvU32 *isNonStallEnabled);
// Check if non-stall interrupts are pening, using shared memory
NV_STATUS vgpuIsNonStallPending(OBJGPU *pGpu, PMC_ENGINE_BITVECTOR pEngines);
// Service non-stalling interrupts using shared memory
NV_STATUS vgpuServiceNonStall(OBJGPU *pGpu, PMC_ENGINE_BITVECTOR pEngines);
// Initialize and free event infrastructure
NV_STATUS _setupEventInfrastructure(OBJGPU *pGpu, OBJVGPU *pVGpu);
NV_STATUS _teardownEventInfrastructure(OBJGPU *pGpu, OBJVGPU *pVGpu);
NV_STATUS _setupGspEventInfrastructure(OBJGPU *pGpu, OBJVGPU *pVGpu);
void _teardownGspEventInfrastructure(OBJGPU *pGpu, OBJVGPU *pVGpu);
#endif // VGPU_EVENTS_H

View File

@@ -0,0 +1,62 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
//******************************************************************************
//
// Declarations for VGPU PMA Guest Scrubber Shared memory structures.
//
// Description:
// This module declares the shared memory structures for VGPU PMA Guest
// Scrubber.
//
//******************************************************************************
#ifndef __vgpu_pma_guest_scrubber_h__
#define __vgpu_pma_guest_scrubber_h__
#include "gpu/mem_mgr/virt_mem_allocator_common.h"
#define VGPU_GUEST_PMA_MAX_SCRUB_ITEMS 4096
#define VGPU_GUEST_PMA_SCRUBBER_SHARED_BUFFER_SIZE ((sizeof(VGPU_SCRUB_NODE) * VGPU_GUEST_PMA_MAX_SCRUB_ITEMS) + RM_PAGE_SIZE)
#define VGPU_GUEST_PMA_SCRUBBER_SHARED_BUFFER_PFNS (NV_DIV_AND_CEIL(VGPU_GUEST_PMA_SCRUBBER_SHARED_BUFFER_SIZE, RM_PAGE_SIZE))
typedef struct VGPU_SCRUB_NODE {
volatile NvU32 workId; // The 32 bit ID assigned to each work
volatile NvU64 base; // The base address from which the scrub to start
volatile NvU64 size; // The size of a scrub work
} VGPU_SCRUB_NODE;
typedef struct VGPU_GUEST_PMA_SCRUB_BUFFER_RING_HEADER {
volatile NvU32 lastSubmittedWorkId;
volatile NvU32 lastSWSemaphoreDone;
volatile NvU64 scrubberGetIdx;
volatile NvU64 scrubberPutIdx;
} VGPU_GUEST_PMA_SCRUB_BUFFER_RING_HEADER;
typedef struct VGPU_GUEST_PMA_SCRUB_BUFFER_RING {
VGPU_GUEST_PMA_SCRUB_BUFFER_RING_HEADER *pScrubBuffRingHeader;
VGPU_SCRUB_NODE *pScrubList;
} VGPU_GUEST_PMA_SCRUB_BUFFER_RING;
#endif // __vgpu_pma_guest_scrubber_h__

View File

@@ -0,0 +1,41 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __vgpu_vgpu_version_h__
#define __vgpu_vgpu_version_h__
/* VGX interface version */
#define NV_RPC_VERSION_NUMBER_MAJOR 31:24 /* R---D */
#define NV_RPC_VERSION_NUMBER_MINOR 23:16 /* R---D */
#define RPC_VERSION_FROM_VGX_VERSION(major, minor) ( DRF_NUM(_RPC, _VERSION_NUMBER, _MAJOR, major) | \
DRF_NUM(_RPC, _VERSION_NUMBER, _MINOR, minor))
#define VGX_MAJOR_VERSION_NUMBER 0x1F
#define VGX_MINOR_VERSION_NUMBER 0x0F
// The NV_VGPU_GRIDSW_VERSION_MIN_SUPPORTED_INTERNAL macros are auto-generated using the value from rpc-structures.def file.
#define AUTOGENERATE_RPC_MIN_SUPPORTED_VERSION_INFORMATION
#include "g_rpc-structures.h"
#undef AUTOGENERATE_RPC_MIN_SUPPORTED_VERSION_INFORMATION
#endif // __vgpu_vgpu_version_h__

View File

@@ -0,0 +1,167 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "os/os.h"
#include "dbgbuffer.h"
#include "gpu/subdevice/subdevice.h"
#include "rmapi/client.h"
NV_STATUS
dbgbufConstruct_IMPL
(
DebugBufferApi *pDebugBufferApi,
CALL_CONTEXT *pCallContext,
RS_RES_ALLOC_PARAMS_INTERNAL *pParams
)
{
NV_STATUS status = NV_OK;
OBJGPU *pGpu = GPU_RES_GET_GPU(pDebugBufferApi);
NvDebugDump *pNvd = GPU_GET_NVD(pGpu);
MEMORY_DESCRIPTOR *pMemDesc = NULL;
NV00DB_ALLOCATION_PARAMETERS *pUserParams = pParams->pAllocParams;
// Allocate a memory descriptor and backing memory for this historical buffer
status = nvdAllocDebugBuffer(pGpu, pNvd, pUserParams->tag, &pUserParams->size, &pMemDesc);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "DebugBuffer object could not be allocated.\n");
return NV_ERR_INSUFFICIENT_RESOURCES;
}
pDebugBufferApi->pMemDesc = pMemDesc;
return status;
}
void
dbgbufDestruct_IMPL
(
DebugBufferApi *pDebugBufferApi
)
{
NV_STATUS status;
CALL_CONTEXT *pCallContext;
RS_RES_FREE_PARAMS_INTERNAL *pParams;
OBJGPU *pGpu = GPU_RES_GET_GPU(pDebugBufferApi);
NvDebugDump *pNvd = GPU_GET_NVD(pGpu);
resGetFreeParams(staticCast(pDebugBufferApi, RsResource), &pCallContext, &pParams);
// Unlink and free historical buffer
status = nvdFreeDebugBuffer(pGpu, pNvd, pDebugBufferApi->pMemDesc);
NV_ASSERT(status == NV_OK);
pParams->status = status;
}
NV_STATUS
dbgbufMap_IMPL
(
DebugBufferApi *pDebugBufferApi,
CALL_CONTEXT *pCallContext,
RS_CPU_MAP_PARAMS *pParams,
RsCpuMapping *pCpuMapping
)
{
NV_STATUS status;
NvBool bKernel;
RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient);
status = rmapiValidateKernelMapping(rmclientGetCachedPrivilege(pClient),
pCpuMapping->flags,
&bKernel);
if (status != NV_OK)
return status;
pCpuMapping->processId = osGetCurrentProcess();
// Map entire buffer (no offsets supported)
return memdescMap(pDebugBufferApi->pMemDesc,
0,
pDebugBufferApi->pMemDesc->Size,
bKernel,
pCpuMapping->pPrivate->protect,
&pCpuMapping->pLinearAddress,
&pCpuMapping->pPrivate->pPriv);
}
NV_STATUS
dbgbufUnmap_IMPL
(
DebugBufferApi *pDebugBufferApi,
CALL_CONTEXT *pCallContext,
RsCpuMapping *pCpuMapping
)
{
NV_STATUS status;
NvBool bKernel;
RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient);
status = rmapiValidateKernelMapping(rmclientGetCachedPrivilege(pClient),
pCpuMapping->flags,
&bKernel);
if (status != NV_OK)
return status;
memdescUnmap(pDebugBufferApi->pMemDesc,
bKernel,
pCpuMapping->processId,
pCpuMapping->pLinearAddress,
pCpuMapping->pPrivate->pPriv);
return NV_OK;
}
NV_STATUS
dbgbufGetMapAddrSpace_IMPL
(
DebugBufferApi *pDebugBufferApi,
CALL_CONTEXT *pCallContext,
NvU32 mapFlags,
NV_ADDRESS_SPACE *pAddrSpace
)
{
OBJGPU *pGpu = GPU_RES_GET_GPU(pDebugBufferApi);
NV_ADDRESS_SPACE addrSpace;
PMEMORY_DESCRIPTOR pMemDesc = pDebugBufferApi->pMemDesc;
if (pMemDesc == NULL)
return NV_ERR_INVALID_OBJECT;
NV_ASSERT_OK_OR_RETURN(rmapiGetEffectiveAddrSpace(pGpu, pMemDesc, mapFlags, &addrSpace));
if (pAddrSpace)
*pAddrSpace = addrSpace;
return NV_OK;
}
NV_STATUS
dbgbufGetMemoryMappingDescriptor_IMPL
(
DebugBufferApi *pDebugBufferApi,
MEMORY_DESCRIPTOR **ppMemDesc
)
{
*ppMemDesc = pDebugBufferApi->pMemDesc;
return NV_OK;
}

View File

@@ -0,0 +1,306 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvdump.h"
#include "os/os.h"
#include "diagnostics/nv_debug_dump.h"
#include "kernel/gpu/gpu_resource.h"
#include "kernel/gpu/subdevice/subdevice.h"
#include "lib/protobuf/prb.h"
#include "g_nvdebug_pb.h"
#include "lib/protobuf/prb_util.h"
#include "diagnostics/journal.h"
//
// NVD RM SubDevice Controls
//
/*!
* @brief Get Dump Size. Returns an estimate of the number of bytes in the dump
* that can be used to allocate a buffer. The size is based on the component
* argument.
*
* @param[in] pSubDevice
* @param[in] pDumpSizeParams
*
* @returns NV_OK on success
*/
NV_STATUS
subdeviceCtrlCmdNvdGetDumpSize_IMPL
(
Subdevice *pSubdevice,
NV2080_CTRL_NVD_GET_DUMP_SIZE_PARAMS *pDumpSizeParams
)
{
OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice);
NvDebugDump *pNvd = GPU_GET_NVD(pGpu);
NVDUMP_BUFFER nvDumpBuffer = {0};
NV_STATUS rmStatus;
// Allow for the largest possible dump size, if needed
nvDumpBuffer.size = NVDUMP_MAX_DUMP_SIZE;
rmStatus = nvdDumpComponent(pGpu,
pNvd,
pDumpSizeParams->component,
&nvDumpBuffer,
NVDUMP_BUFFER_COUNT,
NULL);
pDumpSizeParams->size = nvDumpBuffer.curNumBytes;
return rmStatus;
}
/*!
* @brief Get Dump. Returns a dump that includes the component specified
* when the conditions in the trigger are set.
*
* @param[in] pSubDevice
* @param[in] pDumpParams
*
* @returns NV_OK on success
*/
NV_STATUS
subdeviceCtrlCmdNvdGetDump_IMPL
(
Subdevice *pSubdevice,
NV2080_CTRL_NVD_GET_DUMP_PARAMS *pDumpParams
)
{
OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice);
NvDebugDump *pNvd = GPU_GET_NVD(pGpu);
NVDUMP_BUFFER nvDumpBuffer = {0};
NV_STATUS rmStatus = NV_OK;
nvDumpBuffer.size = pDumpParams->size;
nvDumpBuffer.address = pDumpParams->pBuffer;
// Dump the component
rmStatus = nvdDumpComponent(pGpu,
pNvd,
pDumpParams->component,
&nvDumpBuffer,
NVDUMP_BUFFER_PROVIDED,
NULL);
pDumpParams->size = nvDumpBuffer.curNumBytes;
return rmStatus;
}
/*!
* @brief helper function to convert timestamps from hi res timer to time in ms since 1970
* OCA records time in tick since boot. so in order to convert to a time stamp we need to
* convert the ticks to ms & add it to the boot time.
*
* @returns time since 1970 in ms
*/
static NvU64 createTimestampFromTimer(NvU64 timerVal)
{
NvU32 currTimeSec = 0;
NvU32 currTimeUsec = 0;
NvU64 currTimeMsec;
NvU64 timeSinceBootNsec = 0;
NvU64 timeSinceBootMsec = 0;
NvU64 timerFreq;
NvU64 timeValMsec;
NvU64 timestampMs;
// get all the current time info.
osGetCurrentTick(&timeSinceBootNsec); // get the time since boot in ns
osGetCurrentTime(&currTimeSec, &currTimeUsec); // get the current time
timerFreq = osGetTimestampFreq(); // get the ticks/second.
// convert everything to the same base (ms)
// convert the time value from ticks to ms since boot.
timeValMsec = (timerVal * 1000) / timerFreq;
// scale time since boot to from ns to ms
timeSinceBootMsec = timeSinceBootNsec / 1000000;
// put it together in ms
currTimeMsec = currTimeSec; // need to move this to the 64 bit value
currTimeMsec *= 1000; // before multiply to avoid overflow.
currTimeMsec += currTimeUsec / 1000;
// put it all together.
timestampMs = currTimeMsec - timeSinceBootMsec; // determine boot time.
timestampMs += timeValMsec; // add in the timeVal since boot
return timestampMs;
}
/*!
* @brief Get the NOCAT journal Rpt. Returns the entries in the NOCAT Journal
*
* @returns NV_OK on success
*/
NV_STATUS
subdeviceCtrlCmdNvdGetNocatJournalRpt_IMPL
(
Subdevice *pSubdevice,
NV2080_CTRL_NVD_GET_NOCAT_JOURNAL_PARAMS *pReportParams
)
{
OBJSYS *pSys = SYS_GET_INSTANCE();
Journal *pRcdb = SYS_GET_RCDB(pSys);
NvU32 idx;
NV_STATUS status;
NvU32 flags;
if (pRcdb == NULL)
{
return NV_ERR_INVALID_STATE;
}
// start with a clean slate
flags = pReportParams->flags;
portMemSet(pReportParams, 0, sizeof(*pReportParams));
pReportParams->flags = flags;
// get reports until we run out of reports or run out of space.
for (idx = 0; idx < NV2080_NOCAT_JOURNAL_MAX_JOURNAL_RECORDS; idx++)
{
status = rcdbReportNextNocatJournalEntry(&pReportParams->journalRecords[idx]);
if (status != NV_OK)
{
if ((status == NV_ERR_OBJECT_NOT_FOUND) || (idx != 0))
{
// call to get the next record failed,
// either we have run out of records,
// or we have put at least one record into report.
// we will call that a success so we report the records we have, or a 0 count.
// NOTE -- NvAPI translates OBJECT_NOT_FOUND to a general NVAPI_ERROR,
// so the caller can not tell the reason for the failure is we ran out of records.
// that is why we are translating that to a success here.
status = NV_OK;
}
break;
}
// fix up the time stamp
pReportParams->journalRecords[idx].timeStamp =
createTimestampFromTimer(pReportParams->journalRecords[idx].timeStamp);
}
if (status == NV_OK)
{
//update the counters.
pReportParams->nocatRecordCount = idx;
pReportParams->nocatOutstandingRecordCount = rcdbGetNocatOutstandingCount(pRcdb);
// add in the activity counters.
portMemCopy(pReportParams->activityCounters, NV_SIZEOF32(pReportParams->activityCounters),
pRcdb->nocatJournalDescriptor.nocatEventCounters,
NV_SIZEOF32(pRcdb->nocatJournalDescriptor.nocatEventCounters));
}
return status;
}
/*!
* @brief Set the NOCAT TDR data collected by KMD in the NOCAT journal record
*
* @returns NV_OK on success
*/
NV_STATUS
subdeviceCtrlCmdNvdSetNocatJournalData_IMPL
(
Subdevice *pSubdevice,
NV2080_CTRL_NVD_SET_NOCAT_JOURNAL_DATA_PARAMS* pReportParams
)
{
OBJSYS *pSys = SYS_GET_INSTANCE();
Journal *pRcdb = SYS_GET_RCDB(pSys);
switch (pReportParams->dataType)
{
case NV2080_CTRL_NOCAT_JOURNAL_DATA_TYPE_TDR_REASON:
rcdbSetNocatTdrReason(&pReportParams->nocatJournalData.tdrReason);
break;
case NV2080_CTRL_NOCAT_JOURNAL_DATA_TYPE_INSERT_RECORD:
{
NOCAT_JOURNAL_PARAMS newEntry;
portMemSet(&newEntry, 0, sizeof(newEntry));
// fill in the newEntry structure with the data from the insertData.
newEntry.recType = pReportParams->nocatJournalData.insertData.recType;
newEntry.pSource = (char *)pReportParams->nocatJournalData.insertData.source;
newEntry.bugcheck = pReportParams->nocatJournalData.insertData.bugcheck;
newEntry.subsystem = pReportParams->nocatJournalData.insertData.subsystem;
newEntry.errorCode = pReportParams->nocatJournalData.insertData.errorCode;
// for now we are not supporting external events with diag buffers.
newEntry.pDiagBuffer = NULL;
newEntry.diagBufferLen = 0;
newEntry.pFaultingEngine = (char *)pReportParams->nocatJournalData.insertData.faultingEngine;
// do we want to allow NULL strings?
if (FLD_TEST_DRF(2080_CTRL, _NOCAT_INSERT, _ALLOW_NULL_STR, _NO,
pReportParams->nocatJournalData.insertData.flags))
{
if (pReportParams->nocatJournalData.insertData.source[0] != '\0')
{
// don't pass in a pointer to null source string.
newEntry.pSource = NULL;
}
if (pReportParams->nocatJournalData.insertData.faultingEngine[0] != '\0')
{
// don't pass in a pointer to null faulting engine string.
newEntry.pFaultingEngine = NULL;
}
}
pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_INSERT_RECORDS_IDX]++;
rcdbNocatInsertNocatError(NULL, &newEntry);
}
break;
case NV2080_CTRL_NOCAT_JOURNAL_DATA_TYPE_SET_TAG:
if ((pReportParams->nocatJournalData.tagData.tag[0] == '\0') ||
FLD_TEST_DRF(2080_CTRL, _NOCAT_TAG, _CLEAR, _YES,
pReportParams->nocatJournalData.insertData.flags))
{
// clear the tag
portMemSet(pRcdb->nocatJournalDescriptor.tag, 0,
sizeof(pRcdb->nocatJournalDescriptor.tag));
}
else
{
// save the tag
portStringCopy((char *)pRcdb->nocatJournalDescriptor.tag,
NV2080_NOCAT_JOURNAL_MAX_STR_LEN,
(char *)pReportParams->nocatJournalData.tagData.tag,
portStringLength((char *)pReportParams->nocatJournalData.tagData.tag) + 1);
}
break;
default:
break;
}
return NV_OK;
}

File diff suppressed because it is too large Load Diff