mirror of
https://github.com/NVIDIA/open-gpu-kernel-modules.git
synced 2026-02-08 17:19:59 +00:00
committed by
Gaurav Juvekar
parent
caa2dd11a0
commit
3084c04453
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -106,14 +106,12 @@ typedef struct
|
||||
#define GPUS_LOCK_FLAGS_NONE (0x00000000)
|
||||
// conditional acquire; if lock is already held then return error
|
||||
#define GPU_LOCK_FLAGS_COND_ACQUIRE NVBIT(0)
|
||||
// acquire the lock in read (shared) mode, if applicable
|
||||
#define GPU_LOCK_FLAGS_READ NVBIT(1)
|
||||
// Attempt acquire even if it potentially violates the locking order
|
||||
// But do not block in a way that could cause a deadlock
|
||||
#define GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE NVBIT(2)
|
||||
#define GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE NVBIT(1)
|
||||
// Additionally acquire the GPU alloc lock (implied if locking all GPUs)
|
||||
// to prevent the set of lockable GPUs from changing
|
||||
#define GPU_LOCK_FLAGS_LOCK_ALLOC NVBIT(3)
|
||||
#define GPU_LOCK_FLAGS_LOCK_ALLOC NVBIT(2)
|
||||
|
||||
//
|
||||
// RM Lock Related Functions
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -81,7 +81,6 @@ typedef struct THREAD_STATE_NODE THREAD_STATE_NODE; // FW declare thread state
|
||||
#define NV_ROUNDUP(a,b) ((NV_CEIL(a,b))*(b))
|
||||
#define NV_ROUND_TO_QUANTA(a, quanta) (((quanta) == 0) ? (a): ((((a) + ((quanta) >> 1)) / (quanta)) * (quanta)))
|
||||
#define NV_FLOOR_TO_QUANTA(a, quanta) (((a) / (quanta)) * (quanta))
|
||||
#define NV_ARRAY_ELEMENTS(x) ((sizeof(x)/sizeof((x)[0])))
|
||||
#define NV_BYTESWAP16(a) ((((a) & 0xff00)>>8) | \
|
||||
(((a) & 0x00ff)<<8))
|
||||
#define NV_BYTESWAP32(a) ((((a) & 0xff000000)>>24) | \
|
||||
|
||||
@@ -27,18 +27,9 @@
|
||||
#include "nvtypes.h"
|
||||
#include "cc_drv.h"
|
||||
|
||||
struct decryptBundle_t
|
||||
{
|
||||
NvU8 keyIn[CC_AES_256_GCM_KEY_SIZE_BYTES];
|
||||
NvU8 ivMaskIn[CC_AES_256_GCM_IV_SIZE_BYTES];
|
||||
};
|
||||
|
||||
typedef struct decryptBundle_t *pDecryptBundle;
|
||||
|
||||
struct ccslContext_t
|
||||
{
|
||||
NvHandle hClient;
|
||||
NvHandle hSubdevice;
|
||||
NvHandle hChannel;
|
||||
|
||||
enum {CSL_MSG_CTR_32, CSL_MSG_CTR_64} msgCounterSize;
|
||||
@@ -60,19 +51,11 @@ struct ccslContext_t
|
||||
|
||||
NvU64 keyHandleIn;
|
||||
NvU64 keyHandleOut;
|
||||
NvU64 keyHandleOutFallback;
|
||||
|
||||
NvU32 globalKeyIdIn;
|
||||
NvU32 globalKeyIdOut;
|
||||
|
||||
void *openrmCtx;
|
||||
|
||||
MEMORY_DESCRIPTOR *pMemDesc;
|
||||
volatile CC_CRYPTOBUNDLE_STATS *pEncStatsBuffer;
|
||||
void * pConfCompute;
|
||||
|
||||
pDecryptBundle pDecryptBundles;
|
||||
NvU32 currDecryptBundle;
|
||||
};
|
||||
|
||||
typedef struct ccslContext_t *pCcslContext;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2004-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
||||
@@ -35,10 +35,13 @@
|
||||
#define GPU_FABRIC_PROBE_DEFAULT_PROBE_SLOWDOWN_THRESHOLD 10
|
||||
|
||||
typedef struct GPU_FABRIC_PROBE_INFO_KERNEL GPU_FABRIC_PROBE_INFO_KERNEL;
|
||||
typedef struct GPU_FABRIC_PROBE_INFO_PHYSICAL GPU_FABRIC_PROBE_INFO_PHYSICAL;
|
||||
|
||||
NV_STATUS gpuFabricProbeStart(OBJGPU *pGpu,
|
||||
GPU_FABRIC_PROBE_INFO_KERNEL **ppGpuFabricProbeInfoKernel);
|
||||
void gpuFabricProbeStop(GPU_FABRIC_PROBE_INFO_KERNEL *pGpuFabricProbeInfoKernel);
|
||||
void gpuFabricProbeStopPhysical(GPU_FABRIC_PROBE_INFO_PHYSICAL *pGpuFabricProbeInfoPhysical,
|
||||
NvU32 gfId);
|
||||
|
||||
void gpuFabricProbeSuspend(GPU_FABRIC_PROBE_INFO_KERNEL *pGpuFabricProbeInfoKernel);
|
||||
NV_STATUS gpuFabricProbeResume(GPU_FABRIC_PROBE_INFO_KERNEL *pGpuFabricProbeInfoKernel);
|
||||
@@ -52,7 +55,6 @@ NV_STATUS gpuFabricProbeGetGpaAddress(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64
|
||||
NV_STATUS gpuFabricProbeGetGpaAddressRange(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *pGpaAddressRange);
|
||||
NV_STATUS gpuFabricProbeGetFlaAddress(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *pFlaAddress);
|
||||
NV_STATUS gpuFabricProbeGetFlaAddressRange(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *pFlaAddressRange);
|
||||
NV_STATUS gpuFabricProbeGetEgmGpaAddress(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *pEgmGpaAddress);
|
||||
NV_STATUS gpuFabricProbeGetNumProbeReqs(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *numProbes);
|
||||
NV_STATUS gpuFabricProbeGetFabricCliqueId(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU32 *pFabricCliqueId);
|
||||
NV_STATUS gpuFabricProbeGetFabricHealthStatus(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU32 *pFabricHealthStatusMask);
|
||||
@@ -72,6 +74,6 @@ NV_STATUS gpuFabricProbeReceivePhysicalCallback(NvU32 gpuInstance, NvU64 *pNotif
|
||||
NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_PARAMS *pInbandRcvParams);
|
||||
NV_STATUS gpuFabricProbeReceiveUpdatePhysicalCallback(NvU32 gpuInstance, NvU64 *pNotifyGfIdMask,
|
||||
NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_PARAMS *pInbandRcvParams);
|
||||
|
||||
NV_STATUS gpuFabricProbeGetGfid(OBJGPU *pGpu, NvU32 *pGfid);
|
||||
|
||||
#endif // GPU_FABRIC_PROBE_H
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2004-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2004-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -26,17 +26,11 @@
|
||||
|
||||
#include "core/core.h"
|
||||
|
||||
typedef struct {
|
||||
RmPhysAddr cmdQueuePhysAddr;
|
||||
} GSP_RMFS_INIT_ARGUMENTS;
|
||||
|
||||
typedef struct {
|
||||
RmPhysAddr sharedMemPhysAddr;
|
||||
NvU32 pageTableEntryCount;
|
||||
NvLength cmdQueueOffset;
|
||||
NvLength statQueueOffset;
|
||||
NvLength locklessCmdQueueOffset;
|
||||
NvLength locklessStatQueueOffset;
|
||||
} MESSAGE_QUEUE_INIT_ARGUMENTS;
|
||||
|
||||
typedef struct {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -67,6 +67,13 @@ typedef struct GspSMInfo_t
|
||||
NvU32 rtCoreCount;
|
||||
} GspSMInfo;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
NvU32 ecidLow;
|
||||
NvU32 ecidHigh;
|
||||
NvU32 ecidExtended;
|
||||
} EcidManufacturingInfo;
|
||||
|
||||
// Fetched from GSP-RM into CPU-RM
|
||||
typedef struct GspStaticConfigInfo_t
|
||||
{
|
||||
@@ -88,10 +95,10 @@ typedef struct GspStaticConfigInfo_t
|
||||
NvBool poisonFuseEnabled;
|
||||
|
||||
NvU64 fb_length;
|
||||
NvU32 fbio_mask;
|
||||
NvU64 fbio_mask;
|
||||
NvU32 fb_bus_width;
|
||||
NvU32 fb_ram_type;
|
||||
NvU32 fbp_mask;
|
||||
NvU64 fbp_mask;
|
||||
NvU32 l2_cache_size;
|
||||
|
||||
NvU32 gfxpBufferSize[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
|
||||
@@ -110,9 +117,24 @@ typedef struct GspStaticConfigInfo_t
|
||||
NvBool bIsTesla;
|
||||
NvBool bIsMobile;
|
||||
NvBool bIsGc6Rtd3Allowed;
|
||||
NvBool bIsGc8Rtd3Allowed;
|
||||
NvBool bIsGcOffRtd3Allowed;
|
||||
NvBool bIsGcoffLegacyAllowed;
|
||||
|
||||
/* "Total Board Power" refers to power requirement of GPU,
|
||||
* while in GC6 state. Majority of this power will be used
|
||||
* to keep V-RAM active to preserve its content.
* Some energy maybe consumed by Always-on components on GPU chip.
|
||||
* This power will be provided by 3.3v voltage rail.
|
||||
*/
|
||||
NvU16 RTD3GC6TotalBoardPower;
|
||||
|
||||
/* PERST# (i.e. PCI Express Reset) is a sideband signal
|
||||
* generated by the PCIe Host to indicate the PCIe devices,
|
||||
* that the power-rails and the reference-clock are stable.
|
||||
* The endpoint device typically uses this signal as a global reset.
|
||||
*/
|
||||
NvU16 RTD3GC6PerstDelay;
|
||||
|
||||
NvU64 bar1PdeBase;
|
||||
NvU64 bar2PdeBase;
|
||||
|
||||
@@ -143,6 +165,8 @@ typedef struct GspStaticConfigInfo_t
|
||||
NvBool bAtsSupported;
|
||||
|
||||
NvBool bIsGpuUefi;
|
||||
|
||||
EcidManufacturingInfo ecidInfo;
|
||||
} GspStaticConfigInfo;
|
||||
|
||||
// Pushed from CPU-RM to GSP-RM
|
||||
@@ -151,6 +175,7 @@ typedef struct GspSystemInfo
|
||||
NvU64 gpuPhysAddr;
|
||||
NvU64 gpuPhysFbAddr;
|
||||
NvU64 gpuPhysInstAddr;
|
||||
NvU64 gpuPhysIoAddr;
|
||||
NvU64 nvDomainBusDeviceFunc;
|
||||
NvU64 simAccessBufPhysAddr;
|
||||
NvU64 notifyOpSharedSurfacePhysAddr;
|
||||
@@ -159,6 +184,9 @@ typedef struct GspSystemInfo
|
||||
NvU64 maxUserVa;
|
||||
NvU32 pciConfigMirrorBase;
|
||||
NvU32 pciConfigMirrorSize;
|
||||
NvU32 PCIDeviceID;
|
||||
NvU32 PCISubDeviceID;
|
||||
NvU32 PCIRevisionID;
|
||||
NvU8 oorArch;
|
||||
NvU64 clPdbProperties;
|
||||
NvU32 Chipset;
|
||||
@@ -179,9 +207,10 @@ typedef struct GspSystemInfo
|
||||
NvBool bIsPassthru;
|
||||
NvU64 sysTimerOffsetNs;
|
||||
GSP_VF_INFO gspVFInfo;
|
||||
NvBool bIsPrimary;
|
||||
NvBool isGridBuild;
|
||||
NvU32 gridBuildCsp;
|
||||
NvBool bTdrEventSupported;
|
||||
NvBool bPreserveVideoMemoryAllocations;
|
||||
} GspSystemInfo;
|
||||
|
||||
|
||||
|
||||
@@ -35,7 +35,10 @@
|
||||
#define GSP_TRACING_RATS_ENABLED 0
|
||||
#define GSP_TRACE_RATS_ADD_RECORD(recordIdentifier, pGpu, info) (void) 0
|
||||
|
||||
#define KERNEL_GSP_TRACING_RATS_ENABLED 0
|
||||
#include "kernel/gpu/gsp/kernel_gsp_trace_rats.h"
|
||||
#include "class/cl90cdtrace.h"
|
||||
|
||||
#define KERNEL_GSP_TRACING_RATS_ENABLED 1
|
||||
|
||||
#ifndef GET_RATS_TIMESTAMP_NS
|
||||
#define GET_RATS_TIMESTAMP_NS() NV_ASSERT(0)
|
||||
|
||||
59
src/nvidia/inc/kernel/gpu/gsp/kernel_gsp_trace_rats.h
Normal file
59
src/nvidia/inc/kernel/gpu/gsp/kernel_gsp_trace_rats.h
Normal file
@@ -0,0 +1,59 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef KERNEL_RATS_GSP_TRACE_H
|
||||
#define KERNEL_RATS_GSP_TRACE_H
|
||||
|
||||
#include "core/core.h"
|
||||
#include "containers/multimap.h"
|
||||
#include "class/cl90cdtrace.h"
|
||||
#include "rmapi/event_buffer.h"
|
||||
|
||||
typedef struct
|
||||
{
|
||||
EventBuffer *pEventBuffer;
|
||||
NvHandle hClient;
|
||||
NvHandle hNotifier;
|
||||
NvHandle hEventBuffer;
|
||||
NvU64 pUserInfo;
|
||||
} NV_EVENT_BUFFER_BIND_POINT_GSP_TRACE;
|
||||
|
||||
MAKE_MULTIMAP(GspTraceEventBufferBindMultiMap, NV_EVENT_BUFFER_BIND_POINT_GSP_TRACE);
|
||||
|
||||
void gspTraceNotifyAllConsumers(OBJGPU *pGpu, void *pArgs);
|
||||
|
||||
void gspTraceEventBufferLogRecord(OBJGPU *pGpu, NV_RATS_GSP_TRACE_RECORD *intrTraceRecord);
|
||||
|
||||
NV_STATUS gspTraceAddBindpoint(OBJGPU *pGpu,
|
||||
RsClient *pClient,
|
||||
RsResourceRef *pEventBufferRef,
|
||||
NvHandle hNotifier,
|
||||
NvU64 tracepointMask,
|
||||
NvU32 gspLoggingBufferSize,
|
||||
NvU32 gspLoggingBufferWatermark);
|
||||
|
||||
void gspTraceRemoveBindpoint(OBJGPU *pGpu, NvU64 uid, NV_EVENT_BUFFER_BIND_POINT_GSP_TRACE* pBind);
|
||||
|
||||
void gspTraceRemoveAllBindpoints(EventBuffer *pEventBuffer);
|
||||
|
||||
#endif
|
||||
@@ -29,8 +29,7 @@
|
||||
|
||||
// Used for indexing into the MESSAGE_QUEUE_COLLECTION array.
|
||||
#define RPC_TASK_RM_QUEUE_IDX 0
|
||||
#define RPC_TASK_ISR_QUEUE_IDX 1
|
||||
#define RPC_QUEUE_COUNT 2
|
||||
#define RPC_QUEUE_COUNT 1
|
||||
|
||||
typedef struct _message_queue_info MESSAGE_QUEUE_INFO;
|
||||
typedef struct MESSAGE_QUEUE_COLLECTION MESSAGE_QUEUE_COLLECTION;
|
||||
|
||||
@@ -152,8 +152,12 @@
|
||||
#define MC_ENGINE_IDX_PXUC 168
|
||||
#define MC_ENGINE_IDX_SYSLTC 169
|
||||
#define MC_ENGINE_IDX_LRCC 170
|
||||
#define MC_ENGINE_IDX_RESERVED171 171
|
||||
#define MC_ENGINE_IDX_RESERVED172 172
|
||||
#define MC_ENGINE_IDX_RESERVED173 173
|
||||
#define MC_ENGINE_IDX_RESERVED174 174
|
||||
// This must be kept as the max bit if we need to add more engines
|
||||
#define MC_ENGINE_IDX_MAX 171
|
||||
#define MC_ENGINE_IDX_MAX 175
|
||||
|
||||
// Index GR reference
|
||||
#define MC_ENGINE_IDX_GRn(x) (MC_ENGINE_IDX_GR0 + (x))
|
||||
@@ -178,7 +182,7 @@
|
||||
((MC_ENGINE_IDX_CE(0) <= (x)) && ((x) <= MC_ENGINE_IDX_CE_MAX))
|
||||
|
||||
// Index OFA reference
|
||||
#define MC_ENGINE_IDX_OFA(x) (MC_ENGINE_IDX_OFA0 + (x))
|
||||
#define MC_ENGINE_IDX_OFA(x) (MC_ENGINE_IDX_OFA0 + (x))
|
||||
|
||||
MAKE_BITVECTOR(MC_ENGINE_BITVECTOR, MC_ENGINE_IDX_MAX);
|
||||
typedef MC_ENGINE_BITVECTOR *PMC_ENGINE_BITVECTOR;
|
||||
|
||||
@@ -42,35 +42,15 @@ typedef enum
|
||||
typedef struct
|
||||
{
|
||||
NvU32 refCount;
|
||||
NvU16 GC6PerstDelay; // waiting time for Upstream Port of GPU,
|
||||
// before asserting perst# signal,
|
||||
// during RTD3/GC6 Entry.
|
||||
NvU16 GC6TotalBoardPower; // Power required by GPU to sustain RTD3/GC6.
|
||||
GPU_GC6_STATE currentState;
|
||||
NvU32 executedStepMask; // step mask executed during entry sequence
|
||||
NvU32 stepMask[NV2080_CTRL_GC6_FLAVOR_ID_MAX]; // step mask cache
|
||||
} _GPU_GC6_STATE;
|
||||
|
||||
// GPU event mask operation
|
||||
#define GC6_REFCOUNT_MASK_SET(pGpu, refCountBit) \
|
||||
do \
|
||||
{ \
|
||||
if (pGpu != NULL) \
|
||||
{ \
|
||||
((pGpu->gc6State.refCountMask) |= (NVBIT(refCountBit))); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#define GC6_REFCOUNT_MASK_CLEAR(pGpu, refCountBit) \
|
||||
do \
|
||||
{ \
|
||||
if (pGpu != NULL) \
|
||||
{ \
|
||||
((pGpu->gc6State.refCountMask) &= ~(NVBIT(refCountBit))); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#define GC6_REFCOUNT_MASK_GET_FROM_EVENT(event) ((event / 2))
|
||||
|
||||
// GC6 related defines
|
||||
#define GC6_FB_CLAMP_TIMEOUT_MS 10
|
||||
|
||||
// Macros for GPU_GC6_STATE
|
||||
#define IS_GPU_GC6_STATE_POWERED_ON(obj) (obj->gc6State.currentState == GPU_GC6_STATE_POWERED_ON)
|
||||
#define IS_GPU_GC6_STATE_EXITED(obj) (obj->gc6State.currentState == GPU_GC6_STATE_EXITED)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -33,7 +33,7 @@
|
||||
|
||||
#define CE_MAX_BYTES_PER_LINE 0xffffffffULL
|
||||
#define CE_METHOD_SIZE_PER_BLOCK 0x64
|
||||
#define FAST_SCRUBBER_METHOD_SIZE_PER_BLOCK 0x94
|
||||
#define FAST_SCRUBBER_METHOD_SIZE_PER_BLOCK 0x78
|
||||
|
||||
// number of bytes per sec2 method-stream (including host methods)
|
||||
#define SEC2_METHOD_SIZE_PER_BLOCK 0x94
|
||||
|
||||
@@ -130,12 +130,6 @@ typedef struct
|
||||
NV_ADDRESS_SPACE srcAddressSpace;
|
||||
NvU32 dstCpuCacheAttrib;
|
||||
NvU32 srcCpuCacheAttrib;
|
||||
|
||||
NvBool bSecureCopy; // The copy encrypts/decrypts protected memory
|
||||
NvBool bEncrypt; // encrypt/decrypt
|
||||
NvU64 authTagAddr;
|
||||
NvU64 encryptIvAddr;
|
||||
|
||||
} CHANNEL_PB_INFO;
|
||||
|
||||
NV_STATUS channelSetupIDs(OBJCHANNEL *pChannel, OBJGPU *pGpu, NvBool bUseVasForCeCopy, NvBool bMIGInUse);
|
||||
|
||||
@@ -35,11 +35,17 @@
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(SRT_BUILD)
|
||||
#define RMCFG_MODULE_x 1
|
||||
#define RMCFG_FEATURE_x 1
|
||||
#else
|
||||
#include "rmconfig.h"
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
// Default page size 64KB
|
||||
#define PMA_GRANULARITY 0x10000
|
||||
#define PMA_PAGE_SHIFT 16
|
||||
@@ -71,6 +77,7 @@ typedef NvU32 PMA_PAGESTATUS;
|
||||
#define ATTRIB_PERSISTENT NVBIT(MAP_IDX_PERSISTENT)
|
||||
#define ATTRIB_NUMA_REUSE NVBIT(MAP_IDX_NUMA_REUSE)
|
||||
#define ATTRIB_BLACKLIST NVBIT(MAP_IDX_BLACKLIST)
|
||||
|
||||
#define ATTRIB_MASK (ATTRIB_EVICTING | ATTRIB_SCRUBBING \
|
||||
| ATTRIB_PERSISTENT | ATTRIB_NUMA_REUSE \
|
||||
| ATTRIB_BLACKLIST)
|
||||
@@ -79,6 +86,7 @@ typedef NvU32 PMA_PAGESTATUS;
|
||||
|
||||
#define PMA_STATE_BITS_PER_PAGE 2 // Alloc & pinned state
|
||||
#define PMA_ATTRIB_BITS_PER_PAGE 5 // Persistence, Scrubbing, Evicting, Reuse & Blacklisting attributes
|
||||
|
||||
#define PMA_BITS_PER_PAGE (PMA_STATE_BITS_PER_PAGE + PMA_ATTRIB_BITS_PER_PAGE)
|
||||
|
||||
//
|
||||
|
||||
@@ -46,6 +46,7 @@
|
||||
|
||||
#if defined(SRT_BUILD)
|
||||
#define RMCFG_MODULE_x 1
|
||||
#define RMCFG_FEATURE_x 1
|
||||
#else
|
||||
#include "rmconfig.h"
|
||||
#endif
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -34,7 +34,6 @@
|
||||
#include "internal/libspdm_common_lib.h"
|
||||
#include "internal/libspdm_secured_message_lib.h"
|
||||
#include "library/spdm_requester_lib.h"
|
||||
#include "nvspdm_cryptlib_extensions.h"
|
||||
|
||||
/* ------------------------ Macros and Defines ----------------------------- */
|
||||
//
|
||||
|
||||
@@ -33,3 +33,7 @@
|
||||
RMCTRL_FLAGS(KERNEL_PRIVILEGED, ROUTE_TO_PHYSICAL, INTERNAL))
|
||||
NV_STATUS subdeviceCtrlCmdCcuSetStreamState(Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_CCU_STREAM_STATE_PARAMS *pParams);
|
||||
|
||||
RMCTRL_EXPORT(NV2080_CTRL_CMD_INTERNAL_CCU_GET_SAMPLE_INFO,
|
||||
RMCTRL_FLAGS(KERNEL_PRIVILEGED, ROUTE_TO_PHYSICAL, INTERNAL))
|
||||
NV_STATUS subdeviceCtrlCmdCcuGetSampleInfo(Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_CCU_SAMPLE_INFO_PARAMS *pParams);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -200,6 +200,9 @@ typedef struct
|
||||
|
||||
#define VIDEO_ENGINE_EVENT__LOG_DATA_SIZE(s) (sizeof(VIDEO_ENGINE_EVENT__RECORD) + s.event_log_data.size)
|
||||
|
||||
#define VIDEO_ENGINE_EVENT__TRACE_ADDR__OFFSET_LO (0xF90UL)
|
||||
#define VIDEO_ENGINE_EVENT__TRACE_ADDR__OFFSET_HI (0xF94UL)
|
||||
|
||||
/*!
|
||||
* Client information passing down by RM and saved at offset VIDEO_ENGINE_EVENT__LOG_INFO__OFFSET
|
||||
* with the size of VIDEO_ENGINE_EVENT__LOG_INFO__SIZE in context allocation.
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
#include "g_videoeventlist_nvoc.h"
|
||||
|
||||
#ifndef VIDEO_EVENT_LIST_H
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -28,10 +28,10 @@
|
||||
|
||||
NV_STATUS RmP2PGetPages (NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, OBJGPU **, void *, void (*)(void *), void *);
|
||||
NV_STATUS RmP2PGetPagesWithoutCallbackRegistration (NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, OBJGPU **, void *);
|
||||
NV_STATUS RmP2PGetPagesPersistent (NvU64, NvU64, void **, NvU64 *, NvU32 *, void *, void *);
|
||||
NV_STATUS RmP2PGetPagesPersistent (NvU64, NvU64, void **, NvU64 *, NvU32 *, void *, void *, void **);
|
||||
NV_STATUS RmP2PRegisterCallback (NvU64, NvU64, NvU64, void *, void (*)(void *), void *);
|
||||
NV_STATUS RmP2PPutPages (NvU64, NvU32, NvU64, void *);
|
||||
NV_STATUS RmP2PGetGpuByAddress (NvU64, NvU64, OBJGPU **);
|
||||
NV_STATUS RmP2PPutPagesPersistent (void *, void *);
|
||||
NV_STATUS RmP2PPutPagesPersistent (void *, void *, void *);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -39,14 +39,9 @@
|
||||
OSnv_rdcr4 stubOsnv_rdcr4;
|
||||
OSnv_rdxcr0 stubOsnv_rdxcr0;
|
||||
OSnv_cpuid stubOsnv_cpuid;
|
||||
OSnv_rdmsr stubOsnv_rdmsr;
|
||||
OSnv_wrmsr stubOsnv_wrmsr;
|
||||
OSSpinLoop stubOsSpinLoop;
|
||||
OSSetSurfaceName stubOsSetSurfaceName;
|
||||
|
||||
OSObjectEventNotification stubOsObjectEventNotification;
|
||||
OSPageArrayGetPhysAddr stubOsPageArrayGetPhysAddr;
|
||||
OSInternalReserveFreeCallback stubOsInternalReserveFreeCallback;
|
||||
OSInternalReserveAllocCallback stubOsInternalReserveAllocCallback;
|
||||
|
||||
#endif // OS_STUB_H
|
||||
|
||||
@@ -26,11 +26,6 @@
|
||||
|
||||
#include "gpu/gpu.h" // NBADDR, POBJGPU
|
||||
|
||||
// HWBC_UPSTREAM_BUS_SPEED commands
|
||||
#define HWBC_UPSTREAM_BUS_SPEED_GEN1PCIE 1
|
||||
#define HWBC_UPSTREAM_BUS_SPEED_GEN2PCIE 2
|
||||
#define HWBC_UPSTREAM_BUS_SPEED_GEN3PCIE 3
|
||||
|
||||
/**************** Resource Manager Defines and Structures ******************\
|
||||
* *
|
||||
* Module: HWBC.H *
|
||||
@@ -40,37 +35,12 @@
|
||||
struct OBJCL;
|
||||
typedef struct OBJHWBC *POBJHWBC;
|
||||
typedef struct OBJHWBC OBJHWBC;
|
||||
typedef struct HWBC_APERTURE *PHWBC_APERTURE;
|
||||
typedef struct HWBC_APERTURE HWBC_APERTURE;
|
||||
|
||||
// These values define maximum number of targets/apertures to be supported in
|
||||
// the OBJHWBC object.
|
||||
#define NUM_HWBC_TARGETS 4
|
||||
#define NUM_HWBC_APERTURES 3
|
||||
|
||||
#define PCI_P2P_PRE_BL 0x00000024 /* RW-4R */
|
||||
#define PCI_P2P_PRE_BL_B64BIT 3:0 /* C--VF */
|
||||
#define PCI_P2P_PRE_BL_B64BIT_YES 0x00000001 /* C---V */
|
||||
#define PCI_P2P_PRE_BL_PREFETCH_MEM_BASE 15:4 /* RWIUF */
|
||||
#define PCI_P2P_PRE_BL_L64BIT 19:16 /* C--VF */
|
||||
#define PCI_P2P_PRE_BL_L64BIT_YES 0x00000001 /* C---V */
|
||||
#define PCI_P2P_PRE_BL_PREFETCH_MEM_LIMIT 31:20 /* RWIUF */
|
||||
#define PCI_P2P_PRE_BU32 0x00000028 /* RW-4R */
|
||||
#define PCI_P2P_PRE_BU32_BASE_UPPER_BITS 31:0 /* RWIUF */
|
||||
#define PCI_P2P_PRE_LU32 0x0000002C /* RW-4R */
|
||||
#define PCI_P2P_PRE_LU32_LIMIT_UPPER_BITS 31:0 /* RWIUF */
|
||||
|
||||
#define BR03_REG(p, i) (p[NV_PES_XVU_ ## i / sizeof(*p)])
|
||||
|
||||
#define BR03_BAR0_SIZE (16*1024)
|
||||
#define BR03_GPU_REGISTER_ALIAS_OFFSET 0x4FC000
|
||||
|
||||
NvBool objClSetPcieHWBC(OBJGPU *, OBJCL*); // Find all Broadcast resource in the higher hierarchy of the GPU
|
||||
|
||||
// Disables ASPM on downstream ports of any BR04 A03 (or later) that is parent of device at 'bus'.
|
||||
NV_STATUS Nvidia_BR04_disableDownstreamASPM(NvU8);
|
||||
|
||||
|
||||
//
|
||||
// Bridge resource type
|
||||
//
|
||||
@@ -104,20 +74,6 @@ struct OBJHWBC
|
||||
|
||||
RmPhysAddr gpuPhysAddr;
|
||||
|
||||
//
|
||||
// BR04: This array is indexed by GPU instance number. If the GPU referred
|
||||
// to by that instance is not behind this BR04 -1 is stored at that index;
|
||||
// if it is behind this BR04 the downstream port it's behind is stored
|
||||
// there. The information is necessary to determine which BR04s must be
|
||||
// involved to broadcast between some set of GPUs, and also to determine
|
||||
// how to program redirection windows for unicast access.
|
||||
//
|
||||
NvS8 dpForGpuInstance[NV_MAX_DEVICES];
|
||||
|
||||
// For mapping state
|
||||
NvS8 mappingTarget;
|
||||
NvU32 mappingCount;
|
||||
|
||||
// Private data
|
||||
NvBool hasPlxFirmwareInfo;
|
||||
NvU32 fwVersion;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2000-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2000-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -31,6 +31,7 @@
|
||||
|
||||
#include "core/core.h"
|
||||
#include "nvlimits.h"
|
||||
#include "nvmisc.h"
|
||||
|
||||
#define IsDeviceDestroyed(p) (gpuGetDeviceInstance(p) == NV_MAX_DEVICES)
|
||||
|
||||
@@ -58,7 +59,7 @@ void RmInitScalability(OBJGPU *pGpu);
|
||||
do { \
|
||||
if (sizeof(arr) > sizeof(void *)) \
|
||||
{ \
|
||||
NV_ASSERT(SLI_LOOP_ARRAY_SIZE == (sizeof(arr) / sizeof(arr[0]))); \
|
||||
NV_ASSERT(SLI_LOOP_ARRAY_SIZE == NV_ARRAY_ELEMENTS(arr)); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2004-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -198,7 +198,7 @@ NV_STATUS embeddedParamCopyOut(RMAPI_PARAM_COPY *pParamCopy, RmCtrlParams *pRmC
|
||||
// If the NON_PRIVILEGED flag is specified, the call will be allowed from any
|
||||
// client.
|
||||
//
|
||||
#define RMCTRL_FLAGS_NON_PRIVILEGED 0x000000010
|
||||
#define RMCTRL_FLAGS_NON_PRIVILEGED 0x000000008
|
||||
|
||||
//
|
||||
// The resman rmcontrol handler will grab the per-device lock instead
|
||||
@@ -206,7 +206,7 @@ NV_STATUS embeddedParamCopyOut(RMAPI_PARAM_COPY *pParamCopy, RmCtrlParams *pRmC
|
||||
//
|
||||
// Please be sure you know what you're doing before using this!
|
||||
//
|
||||
#define RMCTRL_FLAGS_GPU_LOCK_DEVICE_ONLY 0x000000040
|
||||
#define RMCTRL_FLAGS_GPU_LOCK_DEVICE_ONLY 0x000000010
|
||||
|
||||
//
|
||||
// This flag is equivalent to PRIVILEGED when the RM access rights
|
||||
@@ -217,19 +217,19 @@ NV_STATUS embeddedParamCopyOut(RMAPI_PARAM_COPY *pParamCopy, RmCtrlParams *pRmC
|
||||
// previously PRIVILEGED. Once access rights are enabled, this flag will no
|
||||
// longer be necessary.
|
||||
//
|
||||
#define RMCTRL_FLAGS_PRIVILEGED_IF_RS_ACCESS_DISABLED 0x000000100 // for Resserv Access Rights migration
|
||||
#define RMCTRL_FLAGS_PRIVILEGED_IF_RS_ACCESS_DISABLED 0x000000020 // for Resserv Access Rights migration
|
||||
|
||||
//
|
||||
// This flag specifies that the control shall be directly forwarded to the
|
||||
// physical object if called on the CPU-RM kernel.
|
||||
//
|
||||
#define RMCTRL_FLAGS_ROUTE_TO_PHYSICAL 0x000000200
|
||||
#define RMCTRL_FLAGS_ROUTE_TO_PHYSICAL 0x000000040
|
||||
|
||||
//
|
||||
// If the INTERNAL flag is specified, the call will only be allowed
|
||||
// to be issued from RM itself. Otherwise, NV_ERR_NOT_SUPPORTED is returned.
|
||||
//
|
||||
#define RMCTRL_FLAGS_INTERNAL 0x000000400
|
||||
#define RMCTRL_FLAGS_INTERNAL 0x000000080
|
||||
|
||||
//
|
||||
// If the API_LOCK_READONLY flag is specified, the call will acquire the
|
||||
@@ -237,65 +237,57 @@ NV_STATUS embeddedParamCopyOut(RMAPI_PARAM_COPY *pParamCopy, RmCtrlParams *pRmC
|
||||
// also taken the read-only API lock. This flag is ignored if read-only API
|
||||
// locking is disabled in RM.
|
||||
//
|
||||
#define RMCTRL_FLAGS_API_LOCK_READONLY 0x000000800
|
||||
|
||||
//
|
||||
// If the :GPU_LOCK_READONLY flag is specified, the call will acquire a
|
||||
// read-only GPU lock and may run concurrently with other operations that have
|
||||
// also taken a read-only GPU lock. This flag is ignored if read-only GPU
|
||||
// locking is disabled in RM.
|
||||
//
|
||||
#define RMCTRL_FLAGS_GPU_LOCK_READONLY 0x000001000
|
||||
#define RMCTRL_FLAGS_API_LOCK_READONLY 0x000000100
|
||||
|
||||
//
|
||||
// This flag specifies that the control shall be directly forwarded to the
|
||||
// the VGPU host if called from a guest (where IS_VIRTUAL() is true)
|
||||
//
|
||||
#define RMCTRL_FLAGS_ROUTE_TO_VGPU_HOST 0x000002000
|
||||
#define RMCTRL_FLAGS_ROUTE_TO_VGPU_HOST 0x000000200
|
||||
|
||||
//
|
||||
// This flag specifies that the control output does not depend on the input
|
||||
// parameters and can be cached on the receiving end.
|
||||
// The cache is transparent and may not exist on all platforms.
|
||||
//
|
||||
#define RMCTRL_FLAGS_CACHEABLE 0x000004000
|
||||
#define RMCTRL_FLAGS_CACHEABLE 0x000000400
|
||||
|
||||
//
|
||||
// This flag specifies that the control parameters will be
|
||||
// copied out back to the caller even if the control call fails.
|
||||
//
|
||||
#define RMCTRL_FLAGS_COPYOUT_ON_ERROR 0x000008000
|
||||
#define RMCTRL_FLAGS_COPYOUT_ON_ERROR 0x000000800
|
||||
|
||||
// ??
|
||||
#define RMCTRL_FLAGS_ALLOW_WITHOUT_SYSMEM_ACCESS 0x000010000
|
||||
#define RMCTRL_FLAGS_ALLOW_WITHOUT_SYSMEM_ACCESS 0x000001000
|
||||
|
||||
//
|
||||
// This flag specifies that the control can be run by an admin privileged
|
||||
// client running in a full SRIOV, vGPU-GSP-ENABLED hypervisor environment.
|
||||
// Overrides regular privilege level flags.
|
||||
//
|
||||
#define RMCTRL_FLAGS_CPU_PLUGIN_FOR_VGPU_GSP 0x000020000
|
||||
#define RMCTRL_FLAGS_CPU_PLUGIN_FOR_VGPU_GSP 0x000002000
|
||||
|
||||
//
|
||||
// This flag specifies that the control can be run by an admin privileged
|
||||
// client running in a full SRIOV, vGPU-GSP-DISABLED hypervisor environment.
|
||||
// Overrides regular privilege level flags.
|
||||
//
|
||||
#define RMCTRL_FLAGS_CPU_PLUGIN_FOR_SRIOV 0x000040000
|
||||
#define RMCTRL_FLAGS_CPU_PLUGIN_FOR_SRIOV 0x000004000
|
||||
|
||||
//
|
||||
// This flag specifies that the control can be run by an admin privileged
|
||||
// client running in a non-SRIOV or SRIOV-Heavy hypervisor environment.
|
||||
// Overrides regular privilege level flags.
|
||||
//
|
||||
#define RMCTRL_FLAGS_CPU_PLUGIN_FOR_LEGACY 0x000080000
|
||||
#define RMCTRL_FLAGS_CPU_PLUGIN_FOR_LEGACY 0x000008000
|
||||
|
||||
//
|
||||
// This flag specifies that the control can be run by an unprivileged
|
||||
// client running in GSP-RM when SRIOV and vGPU-GSP are ENABLED.
|
||||
// Overrides regular privilege level flags.
|
||||
//
|
||||
#define RMCTRL_FLAGS_GSP_PLUGIN_FOR_VGPU_GSP 0x000100000
|
||||
#define RMCTRL_FLAGS_GSP_PLUGIN_FOR_VGPU_GSP 0x000010000
|
||||
|
||||
//
|
||||
// This flag specifies that the control output depends on the input
|
||||
@@ -303,16 +295,17 @@ NV_STATUS embeddedParamCopyOut(RMAPI_PARAM_COPY *pParamCopy, RmCtrlParams *pRmC
|
||||
// result depends on the input and the input varifies with controls,
|
||||
// the cache should be handled in a per-control bases.
|
||||
//
|
||||
#define RMCTRL_FLAGS_CACHEABLE_BY_INPUT 0x000200000
|
||||
#define RMCTRL_FLAGS_CACHEABLE_BY_INPUT 0x000020000
|
||||
|
||||
|
||||
//
|
||||
// This flag specifies that ROUTE_TO_PHYSICAL control is implemented on vGPU Guest RM.
|
||||
// If a ROUTE_TO_PHYSICAL control is supported within vGPU Guest RM,
|
||||
// it should either have this flag set (indicating the implementation in the vGPU Guest RM) or
|
||||
// the ROUTE_TO_VGPU_HOST flag set (indicating the implementation in vGPU Host RM).
|
||||
// Without either of these flags set, the control will return NV_ERR_NOT_SUPPORTED.
|
||||
//
|
||||
#define RMCTRL_FLAGS_PHYSICAL_IMPLEMENTED_ON_VGPU_GUEST 0x000400000
|
||||
#define RMCTRL_FLAGS_PHYSICAL_IMPLEMENTED_ON_VGPU_GUEST 0x000040000
|
||||
|
||||
// The combination of cacheable flags
|
||||
#define RMCTRL_FLAGS_CACHEABLE_ANY (RMCTRL_FLAGS_CACHEABLE | RMCTRL_FLAGS_CACHEABLE_BY_INPUT)
|
||||
@@ -322,7 +315,29 @@ NV_STATUS embeddedParamCopyOut(RMAPI_PARAM_COPY *pParamCopy, RmCtrlParams *pRmC
|
||||
// An entry is required for any control calls that set this in
|
||||
// serverControlLookupSecondClient or Resource Server will NV_ASSERT(0).
|
||||
//
|
||||
#define RMCTRL_FLAGS_DUAL_CLIENT_LOCK 0x000800000
|
||||
#define RMCTRL_FLAGS_DUAL_CLIENT_LOCK 0x000080000
|
||||
|
||||
//
|
||||
// This flag specifies that the control call is for RM test only code.
|
||||
//
|
||||
#define RMCTRL_FLAGS_RM_TEST_ONLY_CODE 0x000100000
|
||||
|
||||
//
|
||||
// This flag specifies that all client handles in RM need to be locked.
|
||||
// This flag should almost never be used, the only cases where it is required
|
||||
// are cases where an RM API loops accessed several/arbitrary clients in RM using
|
||||
// something like serverutilGetFirstClientUnderLock. The RW API lock is required
|
||||
// to use this flag
|
||||
//
|
||||
#define RMCTRL_FLAGS_ALL_CLIENT_LOCK 0x000200000
|
||||
|
||||
//
|
||||
// This flag specifies that the API lock should not be acquired for this
|
||||
// RM Control. DO NOT use this flag without consulting Locking/Resource Server
|
||||
// experts first and please consider other alternatives as much as possible
|
||||
// before resorting to using this flag!
|
||||
//
|
||||
#define RMCTRL_FLAGS_NO_API_LOCK 0x000400000
|
||||
|
||||
//
|
||||
// 'ACCESS_RIGHTS' Attribute
|
||||
|
||||
108
src/nvidia/inc/kernel/rmapi/lock_stress.h
Normal file
108
src/nvidia/inc/kernel/rmapi/lock_stress.h
Normal file
@@ -0,0 +1,108 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
#include "g_lock_stress_nvoc.h"
|
||||
|
||||
#ifndef LOCK_STRESS_H
|
||||
#define LOCK_STRESS_H
|
||||
|
||||
#include "gpu/gpu_resource.h"
|
||||
#include "nvoc/prelude.h"
|
||||
#include "nvstatus.h"
|
||||
#include "resserv/resserv.h"
|
||||
|
||||
#include "ctrl/ctrl0100.h"
|
||||
|
||||
NVOC_PREFIX(lockStressObj) class LockStressObject : GpuResource
|
||||
{
|
||||
public:
|
||||
NV_STATUS lockStressObjConstruct(LockStressObject *pResource,
|
||||
CALL_CONTEXT *pCallContext,
|
||||
RS_RES_ALLOC_PARAMS_INTERNAL *pParams) :
|
||||
GpuResource(pCallContext, pParams);
|
||||
|
||||
void lockStressObjDestruct(LockStressObject *pResource);
|
||||
|
||||
//
|
||||
// RMCTRL Exported methods -- Category: LOCK_STRESS
|
||||
//
|
||||
RMCTRL_EXPORT(NV0100_CTRL_CMD_RESET_LOCK_STRESS_STATE,
|
||||
RMCTRL_FLAGS(NON_PRIVILEGED))
|
||||
NV_STATUS lockStressObjCtrlCmdResetLockStressState(LockStressObject *pResource);
|
||||
|
||||
RMCTRL_EXPORT(NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_ALL_RM_LOCKS,
|
||||
RMCTRL_FLAGS(NON_PRIVILEGED))
|
||||
NV_STATUS lockStressObjCtrlCmdPerformLockStressAllRmLocks(LockStressObject *pResource,
|
||||
NV0100_CTRL_PERFORM_LOCK_STRESS_ALL_RM_LOCKS_PARAMS *pParams);
|
||||
|
||||
RMCTRL_EXPORT(NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_NO_GPUS_LOCK,
|
||||
RMCTRL_FLAGS(NON_PRIVILEGED, NO_GPUS_LOCK))
|
||||
NV_STATUS lockStressObjCtrlCmdPerformLockStressNoGpusLock(LockStressObject *pResource,
|
||||
NV0100_CTRL_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_PARAMS *pParams);
|
||||
|
||||
RMCTRL_EXPORT(NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_API_LOCK_READ_MODE,
|
||||
RMCTRL_FLAGS(NON_PRIVILEGED, API_LOCK_READONLY))
|
||||
NV_STATUS lockStressObjCtrlCmdPerformLockStressApiLockReadMode(LockStressObject *pResource,
|
||||
NV0100_CTRL_PERFORM_LOCK_STRESS_API_LOCK_READ_MODE_PARAMS *pParams);
|
||||
|
||||
RMCTRL_EXPORT(NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_API_LOCK_READ_MODE,
|
||||
RMCTRL_FLAGS(NON_PRIVILEGED, NO_GPUS_LOCK, API_LOCK_READONLY))
|
||||
NV_STATUS lockStressObjCtrlCmdPerformLockStressNoGpusLockApiLockReadMode(LockStressObject *pResource,
|
||||
NV0100_CTRL_PERFORM_LOCK_STRESS_NO_GPUS_LOCK_API_LOCK_READ_MODE_PARAMS *pParams);
|
||||
|
||||
RMCTRL_EXPORT(NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_INTERNAL_ALL_RM_LOCKS,
|
||||
RMCTRL_FLAGS(NON_PRIVILEGED))
|
||||
NV_STATUS lockStressObjCtrlCmdPerformLockStressInternalAllRmLocks(LockStressObject *pResource,
|
||||
NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_ALL_RM_LOCKS_PARAMS *pParams);
|
||||
|
||||
RMCTRL_EXPORT(NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK,
|
||||
RMCTRL_FLAGS(NON_PRIVILEGED, NO_GPUS_LOCK))
|
||||
NV_STATUS lockStressObjCtrlCmdPerformLockStressInternalNoGpusLock(LockStressObject *pResource,
|
||||
NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_PARAMS *pParams);
|
||||
|
||||
RMCTRL_EXPORT(NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_INTERNAL_API_LOCK_READ_MODE,
|
||||
RMCTRL_FLAGS(NON_PRIVILEGED, API_LOCK_READONLY))
|
||||
NV_STATUS lockStressObjCtrlCmdPerformLockStressInternalApiLockReadMode(LockStressObject *pResource,
|
||||
NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_API_LOCK_READ_MODE_PARAMS *pParams);
|
||||
|
||||
RMCTRL_EXPORT(NV0100_CTRL_CMD_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_API_LOCK_READ_MODE,
|
||||
RMCTRL_FLAGS(NON_PRIVILEGED, NO_GPUS_LOCK, API_LOCK_READONLY))
|
||||
NV_STATUS lockStressObjCtrlCmdPerformLockStressInternalNoGpusLockApiLockReadMode(LockStressObject *pResource,
|
||||
NV0100_CTRL_PERFORM_LOCK_STRESS_INTERNAL_NO_GPUS_LOCK_API_LOCK_READ_MODE_PARAMS *pParams);
|
||||
|
||||
RMCTRL_EXPORT(NV0100_CTRL_CMD_GET_LOCK_STRESS_COUNTERS,
|
||||
RMCTRL_FLAGS(NON_PRIVILEGED))
|
||||
NV_STATUS lockStressObjCtrlCmdGetLockStressCounters(LockStressObject *pResource,
|
||||
NV0100_CTRL_GET_LOCK_STRESS_COUNTERS_PARAMS *pParams);
|
||||
|
||||
private:
|
||||
|
||||
// Internal RM objects for internal RM API invocation
|
||||
NvHandle hInternalClient;
|
||||
NvHandle hInternalDevice;
|
||||
NvHandle hInternalSubdevice;
|
||||
NvHandle hInternalLockStressObject;
|
||||
};
|
||||
|
||||
#endif // LOCK_STRESS_H
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -45,11 +45,6 @@ typedef struct gpuObject *gpuObjectHandle;
|
||||
|
||||
typedef struct gpuRetainedChannel_struct gpuRetainedChannel;
|
||||
|
||||
|
||||
NV_STATUS calculatePCIELinkRateMBps(NvU32 lanes,
|
||||
NvU32 pciLinkMaxSpeed,
|
||||
NvU32 *pcieLinkRate);
|
||||
|
||||
NV_STATUS nvGpuOpsCreateSession(struct gpuSession **session);
|
||||
|
||||
NV_STATUS nvGpuOpsDestroySession(struct gpuSession *session);
|
||||
@@ -284,12 +279,15 @@ NV_STATUS nvGpuOpsFlushReplayableFaultBuffer(gpuFaultInfo *pFaultInfo,
|
||||
NV_STATUS nvGpuOpsTogglePrefetchFaults(gpuFaultInfo *pFaultInfo,
|
||||
NvBool bEnable);
|
||||
|
||||
NV_STATUS nvGpuOpsKeyRotationChannelDisable(struct gpuChannel *channelList[],
|
||||
NvU32 channelListCount);
|
||||
|
||||
// Interface used for CCSL
|
||||
NV_STATUS nvGpuOpsCcslContextInit(struct ccslContext_t **ctx,
|
||||
gpuChannelHandle channel);
|
||||
NV_STATUS nvGpuOpsCcslContextClear(struct ccslContext_t *ctx);
|
||||
NV_STATUS nvGpuOpsCcslRotateKey(UvmCslContext *contextList[],
|
||||
NvU32 contextListCount);
|
||||
NV_STATUS nvGpuOpsCcslContextUpdate(UvmCslContext *contextList[],
|
||||
NvU32 contextListCount);
|
||||
NV_STATUS nvGpuOpsCcslRotateIv(struct ccslContext_t *ctx,
|
||||
NvU8 direction);
|
||||
NV_STATUS nvGpuOpsCcslEncrypt(struct ccslContext_t *ctx,
|
||||
@@ -307,7 +305,6 @@ NV_STATUS nvGpuOpsCcslDecrypt(struct ccslContext_t *ctx,
|
||||
NvU32 bufferSize,
|
||||
NvU8 const *inputBuffer,
|
||||
NvU8 const *decryptIv,
|
||||
NvU32 keyRotationId,
|
||||
NvU8 *outputBuffer,
|
||||
NvU8 const *addAuthData,
|
||||
NvU32 addAuthDataSize,
|
||||
@@ -323,8 +320,7 @@ NV_STATUS nvGpuOpsIncrementIv(struct ccslContext_t *ctx,
|
||||
NvU8 direction,
|
||||
NvU64 increment,
|
||||
NvU8 *iv);
|
||||
NV_STATUS nvGpuOpsLogEncryption(struct ccslContext_t *ctx,
|
||||
NvU8 direction,
|
||||
NvU32 bufferSize);
|
||||
NV_STATUS nvGpuOpsLogDeviceEncryption(struct ccslContext_t *ctx,
|
||||
NvU32 bufferSize);
|
||||
|
||||
#endif /* _NV_GPU_OPS_H_*/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2018-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -32,6 +32,7 @@
|
||||
typedef struct _RM_API RM_API;
|
||||
typedef struct RsServer RsServer;
|
||||
typedef struct OBJGPU OBJGPU;
|
||||
typedef struct RsClient RsClient;
|
||||
typedef struct RsResource RsResource;
|
||||
typedef struct RsCpuMapping RsCpuMapping;
|
||||
typedef struct CALL_CONTEXT CALL_CONTEXT;
|
||||
@@ -101,6 +102,24 @@ NvBool rmapiLockIsWriteOwner(void);
|
||||
*/
|
||||
void rmapiLockGetTimes(NV0000_CTRL_SYSTEM_GET_LOCK_TIMES_PARAMS *);
|
||||
|
||||
/**
|
||||
* Indicates current thread is in the RTD3 PM path (rm_transition_dynamic_power) which
|
||||
* means that certain locking asserts/checks must be skipped due to inability to acquire
|
||||
* the API lock in this path.
|
||||
*/
|
||||
void rmapiEnterRtd3PmPath(void);
|
||||
|
||||
/**
|
||||
* Signifies that current thread is leaving the RTD3 PM path, restoring lock
|
||||
* asserting/checking behavior to normal.
|
||||
*/
|
||||
void rmapiLeaveRtd3PmPath(void);
|
||||
|
||||
/**
|
||||
* Checks if current thread is currently running in the RTD3 PM path.
|
||||
*/
|
||||
NvBool rmapiInRtd3PmPath(void);
|
||||
|
||||
/**
|
||||
* Type of RM API client interface
|
||||
*/
|
||||
@@ -287,7 +306,7 @@ NV_STATUS rmapiControlCacheGet(NvHandle hClient, NvHandle hObject, NvU32 cmd,
|
||||
void* params, NvU32 paramsSize);
|
||||
NV_STATUS rmapiControlCacheSet(NvHandle hClient, NvHandle hObject, NvU32 cmd,
|
||||
void* params, NvU32 paramsSize);
|
||||
NV_STATUS rmapiControlCacheSetGpuInstForObject(NvHandle hClient, NvHandle hObject, NvU32 gpuInst);
|
||||
NV_STATUS rmapiControlCacheSetGpuAttrForObject(NvHandle hClient, NvHandle hObject, OBJGPU *pGpu);
|
||||
void rmapiControlCacheFreeAllCacheForGpu(NvU32 gpuInst);
|
||||
void rmapiControlCacheSetMode(NvU32 mode);
|
||||
NvU32 rmapiControlCacheGetMode(void);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2018-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -127,17 +127,18 @@ RmClient *serverutilGetClientUnderLock(NvHandle hClient);
|
||||
*
|
||||
* @param[in] hClient The client to acquire
|
||||
* @param[in] access LOCK_ACCESS_*
|
||||
* @param[out] ppClientEntry Pointer to the CLIENT_ENTRY
|
||||
* @param[out] ppClient Pointer to the RmClient
|
||||
*/
|
||||
NV_STATUS serverutilAcquireClient(NvHandle hClient, LOCK_ACCESS_TYPE access, RmClient **ppClient);
|
||||
NV_STATUS serverutilAcquireClient(NvHandle hClient, LOCK_ACCESS_TYPE access, CLIENT_ENTRY **ppClientEntry, RmClient **ppClient);
|
||||
|
||||
/**
|
||||
* Unlock a client
|
||||
*
|
||||
* @param[in] access LOCK_ACCESS_*
|
||||
* @param[in] pClient Pointer to the RmClient
|
||||
* @param[in] pClientEntry Pointer to the CLIENT_ENTRY
|
||||
*/
|
||||
void serverutilReleaseClient(LOCK_ACCESS_TYPE access, RmClient *pClient);
|
||||
void serverutilReleaseClient(LOCK_ACCESS_TYPE access, CLIENT_ENTRY *pClientEntry);
|
||||
|
||||
/**
|
||||
* Get the first valid client pointer in resource server without taking any locks.
|
||||
|
||||
324
src/nvidia/inc/kernel/vgpu/dev_vgpu.h
Normal file
324
src/nvidia/inc/kernel/vgpu/dev_vgpu.h
Normal file
@@ -0,0 +1,324 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __vgpu_dev_nv_vgpu_h__
|
||||
#define __vgpu_dev_nv_vgpu_h__
|
||||
|
||||
#include "nvtypes.h"
|
||||
#include "nvmisc.h"
|
||||
|
||||
#include "nvctassert.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* EMULATED REGISTERS - START */
|
||||
/******************************************************************************/
|
||||
|
||||
#define NV_VGPU_GUEST_OS_TYPE_ARCH 2:0 /* -W-4F */
|
||||
#define NV_VGPU_GUEST_OS_TYPE_ARCH_UNKNOWN 0 /* -W--V */
|
||||
#define NV_VGPU_GUEST_OS_TYPE_ARCH_AARCH64 1 /* -W--V */
|
||||
#define NV_VGPU_GUEST_OS_TYPE_ARCH_X86_64 2 /* -W--V */
|
||||
#define NV_VGPU_GUEST_OS_TYPE_ARCH_INVALID 3 /* -W--V */
|
||||
#define NV_VGPU_GUEST_OS_TYPE_OS 7:3 /* -W-4F */
|
||||
#define NV_VGPU_GUEST_OS_TYPE_OS_UNKNOWN 0 /* -W--V */
|
||||
#define NV_VGPU_GUEST_OS_TYPE_OS_LINUX 1 /* -W--V */
|
||||
#define NV_VGPU_GUEST_OS_TYPE_OS_WINDOWS7 2 /* -W--V */
|
||||
#define NV_VGPU_GUEST_OS_TYPE_OS_WINDOWS10 3 /* -W--V */
|
||||
#define NV_VGPU_GUEST_OS_TYPE_OS_INVALID 4 /* -W--V */
|
||||
// All remaining values for NV_VGPU_GUEST_OS_TYPE_OS are reserved/not supported.
|
||||
#define NV_VGPU_GUEST_OS_TYPE_PAGE_SIZE 15:8 /* -W-4F */
|
||||
|
||||
#define NV_VGPU_SHARED_MEMORY__SIZE_1 4 /* */
|
||||
#define NV_VGPU_SHARED_MEMORY_TARGET 1:0 /* RWIVF */
|
||||
#define NV_VGPU_SHARED_MEMORY_TARGET_PHYS_NVM 0x00000001 /* RW--V */
|
||||
#define NV_VGPU_SHARED_MEMORY_TARGET_PHYS_PCI_COHERENT 0x00000003 /* RW--V */
|
||||
#define NV_VGPU_SHARED_MEMORY_STATUS 3:3 /* RWIVF */
|
||||
#define NV_VGPU_SHARED_MEMORY_STATUS_INVALID 0x00000000 /* RW--V */
|
||||
#define NV_VGPU_SHARED_MEMORY_STATUS_VALID 0x00000001 /* RW--V */
|
||||
#define NV_VGPU_SHARED_MEMORY_SIZE 5:4 /* RWIVF */
|
||||
#define NV_VGPU_SHARED_MEMORY_SIZE_4KB 0x00000000 /* RW--V */
|
||||
#define NV_VGPU_SHARED_MEMORY_ADDR_LO 31:12 /* RWIVF */
|
||||
|
||||
#define NV_VGPU_SHARED_MEMORY_HI_ADDR 19:0 /* RWIVF */
|
||||
|
||||
/******************************************************************************/
|
||||
/* EMULATED REGISTERS - END */
|
||||
/******************************************************************************/
|
||||
|
||||
/******************************************************************************/
|
||||
/* SHARED MEMORY - START */
|
||||
/******************************************************************************/
|
||||
|
||||
/* vGPU Current Pstate */
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_CURRENT_PSTATE 0x00000090
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_CURRENT_PSTATE_VALUE 31:0
|
||||
|
||||
/* vGPU ECC errors */
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_ECC_TYPE 0x00000094
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_ECC_TYPE_VALUE 31:0
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_ECC_UNIT 0x00000098
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_ECC_UNIT_VALUE 31:0
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_ECC_ERROR_COUNT 0x0000009c
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_ECC_ERROR_COUNT_VALUE 31:0
|
||||
|
||||
/* vGPU backdoor VNC state */
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_VNC 0x000000a0
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_VNC_STATE 31:0
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_VNC_STATE_ENABLED 0x00000001
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_VNC_STATE_DISABLED 0x00000000
|
||||
|
||||
/* vGPU backdoor VNC support */
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_VNC_SUPPORT 0x000000a4
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_VNC_SUPPORT_VALUE 31:0
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_VNC_SUPPORT_ENABLED 0x0001
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_VNC_SUPPORT_DISABLED 0x0000
|
||||
|
||||
/* ecc fatal poison error*/
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_ECC_POISON_ERROR 0x000000a8
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_ECC_POISON_ERROR_VALUE 31:0
|
||||
|
||||
/* NvEnc Stats Reporting State */
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_NVENC_STATS_REPORTING_STATE 0x000000ac
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_NVENC_STATS_REPORTING_STATE_VALUE 31:0
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_NVENC_STATS_REPORTING_STATE_DISABLED 0x00000000
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_NVENC_STATS_REPORTING_STATE_ENABLED 0x00000001
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_NVENC_STATS_REPORTING_STATE_NOT_SUPPORTED 0x00000002
|
||||
|
||||
/* Nvlink inband message response available*/
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_NVLINK_INBAND_RESPONSE 0x000000b0
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_NVLINK_INBAND_RESPONSE_NONE 0x00000000
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_NVLINK_INBAND_RESPONSE_PROBE 0:0
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_NVLINK_INBAND_RESPONSE_PROBE_CLEAR 0x00000000
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_NVLINK_INBAND_RESPONSE_PROBE_PENDING 0x00000001
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_NVLINK_INBAND_RESPONSE_MC_SETUP 1:1
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_NVLINK_INBAND_RESPONSE_MC_SETUP_CLEAR 0x00000000
|
||||
#define NV_VGPU_SHARED_MEMORY_POINTER_NVLINK_INBAND_RESPONSE_MC_SETUP_PENDING 0x00000001
|
||||
|
||||
/******************************************************************************/
|
||||
/* SHARED MEMORY - END */
|
||||
/******************************************************************************/
|
||||
|
||||
/******************************************************************************/
|
||||
/* EVENT MEMORY - START */
|
||||
/******************************************************************************/
|
||||
|
||||
/* Event ring entry (9 words) */
|
||||
#define NV_VGPU_EV__SIZE_1 0x0000000a /* */
|
||||
#define NV_VGPU_EV_HCLIENT 0x00000000 /* RW-4R */
|
||||
#define NV_VGPU_EV_HOBJECT 0x00000004 /* RW-4R */
|
||||
#define NV_VGPU_EV_NOTIFY_INDEX 0x00000008 /* RW-4R */
|
||||
#define NV_VGPU_EV_FLAGS 0x0000000c /* RW-4R */
|
||||
#define NV_VGPU_EV_FLAGS_ALLOCATED 3:0 /* RW-4R */
|
||||
#define NV_VGPU_EV_FLAGS_ALLOCATED_GUEST 0x00000000 /* RW--V */
|
||||
#define NV_VGPU_EV_FLAGS_ALLOCATED_PLUGIN 0x00000001 /* RW--V */
|
||||
#define NV_VGPU_EV_FLAGS_HAS_NOTIFIER_DATA 4:4 /* RW-4R */
|
||||
#define NV_VGPU_EV_FLAGS_HAS_NOTIFIER_DATA_FALSE 0x00000000 /* RW--V */
|
||||
#define NV_VGPU_EV_FLAGS_HAS_NOTIFIER_DATA_TRUE 0x00000001 /* RW--V */
|
||||
#define NV_VGPU_EV_FLAGS_TYPE 31:16 /* RW-4R */
|
||||
#define NV_VGPU_EV_FLAGS_TYPE_ROBUST_CHANNEL_ERROR 0x00000000 /* RW--V */
|
||||
#define NV_VGPU_EV_FLAGS_TYPE_EVENT_INTR_MTHD 0x00000001 /* RW--V */
|
||||
#define NV_VGPU_EV_FLAGS_TYPE_VBLANK_INTR 0x00000002 /* RW--V */
|
||||
#define NV_VGPU_EV_FLAGS_TYPE_VNC 0x00000003 /* RW--V */
|
||||
#define NV_VGPU_EV_FLAGS_TYPE_PSTATE 0x00000004 /* RW--V */
|
||||
#define NV_VGPU_EV_FLAGS_TYPE_ECC 0x00000005 /* RW--V */
|
||||
#define NV_VGPU_EV_FLAGS_TYPE_NVENC_REPORTING_STATE 0x00000006 /* RW--V */
|
||||
#define NV_VGPU_EV_FLAGS_TYPE_INBAND_RESPONSE 0x00000007 /* RW--V */
|
||||
#define NV_VGPU_EV_NOTIFIER_TIMESTAMP 0x00000010 /* RW-4R */
|
||||
#define NV_VGPU_EV_NOTIFIER_TIMESTAMP_HI 0x00000014 /* RW-4R */
|
||||
#define NV_VGPU_EV_NOTIFIER_INFO32 0x00000018 /* RW-4R */
|
||||
#define NV_VGPU_EV_NOTIFIER_INFO16 0x0000001c /* RW-4R */
|
||||
#define NV_VGPU_EV_NOTIFIER_INFO16_VALUE 15:0 /* RW-4R */
|
||||
#define NV_VGPU_EV_NOTIFIER_STATUS 0x00000020 /* RW-4R */
|
||||
#define NV_VGPU_EV_ROBUST_CHANNEL_ERROR_CHID 0x00000024 /* RW-4R */
|
||||
|
||||
typedef struct {
|
||||
volatile NvU32 hClient;
|
||||
volatile NvU32 hObject;
|
||||
volatile NvU32 notifyIndex;
|
||||
volatile NvU32 flags;
|
||||
volatile NvU32 timestampLo;
|
||||
volatile NvU32 timestampHi;
|
||||
volatile NvU32 info32;
|
||||
volatile NvU32 info16;
|
||||
volatile NvU32 status;
|
||||
volatile NvU32 rcChid;
|
||||
} VGPU_EVENT_BUF_ENTRY;
|
||||
|
||||
#define VGPU_EVENT_BUF_ENTRY_SIZE (sizeof(VGPU_EVENT_BUF_ENTRY))
|
||||
#define VGPU_EVENT_BUF_ENTRY_COUNT (RM_PAGE_SIZE / VGPU_EVENT_BUF_ENTRY_SIZE)
|
||||
|
||||
ct_assert(NV_OFFSETOF(VGPU_EVENT_BUF_ENTRY, hClient ) == NV_VGPU_EV_HCLIENT);
|
||||
ct_assert(NV_OFFSETOF(VGPU_EVENT_BUF_ENTRY, hObject ) == NV_VGPU_EV_HOBJECT);
|
||||
ct_assert(NV_OFFSETOF(VGPU_EVENT_BUF_ENTRY, notifyIndex) == NV_VGPU_EV_NOTIFY_INDEX);
|
||||
ct_assert(NV_OFFSETOF(VGPU_EVENT_BUF_ENTRY, flags ) == NV_VGPU_EV_FLAGS);
|
||||
ct_assert(NV_OFFSETOF(VGPU_EVENT_BUF_ENTRY, timestampLo) == NV_VGPU_EV_NOTIFIER_TIMESTAMP);
|
||||
ct_assert(NV_OFFSETOF(VGPU_EVENT_BUF_ENTRY, timestampHi) == NV_VGPU_EV_NOTIFIER_TIMESTAMP_HI);
|
||||
ct_assert(NV_OFFSETOF(VGPU_EVENT_BUF_ENTRY, info32 ) == NV_VGPU_EV_NOTIFIER_INFO32);
|
||||
ct_assert(NV_OFFSETOF(VGPU_EVENT_BUF_ENTRY, info16 ) == NV_VGPU_EV_NOTIFIER_INFO16);
|
||||
ct_assert(NV_OFFSETOF(VGPU_EVENT_BUF_ENTRY, status ) == NV_VGPU_EV_NOTIFIER_STATUS);
|
||||
ct_assert(NV_OFFSETOF(VGPU_EVENT_BUF_ENTRY, rcChid ) == NV_VGPU_EV_ROBUST_CHANNEL_ERROR_CHID);
|
||||
ct_assert(VGPU_EVENT_BUF_ENTRY_SIZE == (NV_VGPU_EV__SIZE_1 * sizeof (NvU32)));
|
||||
|
||||
/******************************************************************************/
|
||||
/* EVENT MEMORY - END */
|
||||
/******************************************************************************/
|
||||
|
||||
/* virtual GPU */
|
||||
#ifndef NV_XVE_ID_DEVICE_CHIP_VGPU
|
||||
#define NV_XVE_ID_DEVICE_CHIP_VGPU 0x00000f00 /* R---V */
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
/* GSP Control buffer shared between Guest RM and GSP Plugin - START */
|
||||
/******************************************************************************/
|
||||
/******************************************************************************/
|
||||
/* GSP Control buffer format - Version 1 - START */
|
||||
/******************************************************************************/
|
||||
|
||||
#define VGPU_GSP_BUF_ADDR_V1_VALIDITY 0:0
|
||||
#define VGPU_GSP_BUF_ADDR_V1_VALIDITY_INVALID 0x00000000
|
||||
#define VGPU_GSP_BUF_ADDR_V1_VALIDITY_VALID 0x00000001
|
||||
#define VGPU_GSP_BUF_ADDR_V1_APERTURE 2:1
|
||||
#define VGPU_GSP_BUF_ADDR_V1_APERTURE_INVALID 0x00000000
|
||||
#define VGPU_GSP_BUF_ADDR_V1_APERTURE_SYSMEM 0x00000001
|
||||
#define VGPU_GSP_BUF_ADDR_V1_APERTURE_FBMEM 0x00000002
|
||||
#define VGPU_GSP_BUF_ADDR_V2_SIZE 4:3
|
||||
#define VGPU_GSP_BUF_ADDR_V2_SIZE_4K 0x00000000
|
||||
#define VGPU_GSP_BUF_ADDR_V2_SIZE_128K 0x00000001
|
||||
#define VGPU_GSP_BUF_ADDR_V1_PFN 63:12
|
||||
|
||||
#define VGPU_GSP_CTRL_BUF_V1_VERSION 1
|
||||
#define VGPU_GSP_CTRL_BUF_V2_VERSION 2
|
||||
|
||||
/****** Control buffer: written by guest RM and read by GSP vGPU plugin *******/
|
||||
|
||||
#define VGPU_GSP_CTRL_BUF_SIZE_V1 4096
|
||||
|
||||
typedef struct {
|
||||
NvU64 addr;
|
||||
NvU64 bar2Offset;
|
||||
} VGPU_GSP_BUF_INFO;
|
||||
|
||||
typedef union {
|
||||
struct {
|
||||
volatile NvU32 version; // Version of control buffer format
|
||||
volatile NvU32 requestId; // Request sequence number
|
||||
volatile VGPU_GSP_BUF_INFO responseBuf; // Response buffer address
|
||||
volatile VGPU_GSP_BUF_INFO msgBuf; // RPC message buffer address
|
||||
volatile VGPU_GSP_BUF_INFO sharedMem; // Shared memory buffer
|
||||
volatile VGPU_GSP_BUF_INFO eventBuf; // Event buffer address
|
||||
volatile NvU32 getEventBuf; // GET index in circular event buffe
|
||||
volatile NvU32 guestEccStatus; // guest ecc status
|
||||
volatile NvU64 sysmemBitMapTablePfn; // Root node's pfn value of dirty sysmem tracking table
|
||||
volatile NvU32 guestOsType; // Guest OS type
|
||||
} ;
|
||||
volatile NvU8 buf[VGPU_GSP_CTRL_BUF_SIZE_V1];
|
||||
} VGPU_GSP_CTRL_BUF_V1;
|
||||
|
||||
// check size
|
||||
ct_assert(sizeof(VGPU_GSP_CTRL_BUF_V1) == VGPU_GSP_CTRL_BUF_SIZE_V1);
|
||||
|
||||
// check field offset
|
||||
ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, version ) == 0x000);
|
||||
ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, requestId ) == 0x004);
|
||||
ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, responseBuf ) == 0x008);
|
||||
ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, msgBuf ) == 0x018);
|
||||
ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, sharedMem ) == 0x028);
|
||||
ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, eventBuf ) == 0x038);
|
||||
ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, getEventBuf ) == 0x048);
|
||||
ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, guestEccStatus ) == 0x04C);
|
||||
ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, sysmemBitMapTablePfn ) == 0x050);
|
||||
ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, guestOsType ) == 0x058);
|
||||
|
||||
/****** Response buffer: written by GSP vGPU plugin and read by guest RM ******/
|
||||
|
||||
#define VGPU_GSP_RESPONSE_BUF_SIZE_V1 4096
|
||||
|
||||
typedef union {
|
||||
struct {
|
||||
volatile NvU32 responseId; // Response sequence number
|
||||
volatile NvU32 putEventBuf; // PUT index in circular event buffer
|
||||
volatile NvU32 hostEccStatus; // host ecc status
|
||||
volatile NvU32 usmType; // Host USM Type
|
||||
};
|
||||
volatile NvU8 buf[VGPU_GSP_RESPONSE_BUF_SIZE_V1];
|
||||
} VGPU_GSP_RESPONSE_BUF_V1;
|
||||
|
||||
// check size
|
||||
ct_assert(sizeof(VGPU_GSP_RESPONSE_BUF_V1) == VGPU_GSP_RESPONSE_BUF_SIZE_V1);
|
||||
|
||||
// check field offset
|
||||
ct_assert(NV_OFFSETOF(VGPU_GSP_RESPONSE_BUF_V1, responseId ) == 0x000);
|
||||
ct_assert(NV_OFFSETOF(VGPU_GSP_RESPONSE_BUF_V1, putEventBuf ) == 0x004);
|
||||
ct_assert(NV_OFFSETOF(VGPU_GSP_RESPONSE_BUF_V1, hostEccStatus ) == 0x008);
|
||||
ct_assert(NV_OFFSETOF(VGPU_GSP_RESPONSE_BUF_V1, usmType ) == 0x00C);
|
||||
|
||||
/******************************************************************************/
|
||||
/* GSP Control buffer format - Version 1 - END */
|
||||
/******************************************************************************/
|
||||
|
||||
// Control buffer format for Guest RM
|
||||
typedef union {
|
||||
volatile VGPU_GSP_CTRL_BUF_V1 v1;
|
||||
} VGPU_GSP_CTRL_BUF_RM;
|
||||
|
||||
// Control buffer format for GSP vGPU Plugin
|
||||
typedef union {
|
||||
volatile const VGPU_GSP_CTRL_BUF_V1 v1;
|
||||
} VGPU_GSP_CTRL_BUF_PLUGIN;
|
||||
|
||||
// Response buffer format for Guest RM
|
||||
typedef union {
|
||||
volatile const VGPU_GSP_RESPONSE_BUF_V1 v1;
|
||||
} VGPU_GSP_RESPONSE_BUF_RM;
|
||||
|
||||
// Response buffer format for GSP vGPU Plugin
|
||||
typedef union {
|
||||
volatile VGPU_GSP_RESPONSE_BUF_V1 v1;
|
||||
} VGPU_GSP_RESPONSE_BUF_PLUGIN;
|
||||
|
||||
/******************************************************************************/
|
||||
/* GSP Control buffer shared between Guest RM and GSP Plugin - END */
|
||||
/******************************************************************************/
|
||||
|
||||
// VGPU GSP dirty sysmem tracking pfn format
|
||||
#define VGPU_GSP_SYSMEM_PFN_BITMAP_BUF_ADDR_VALIDITY 0:0
|
||||
#define VGPU_GSP_SYSMEM_PFN_BITMAP_BUF_ADDR_VALIDITY_INVALID 0x00000000
|
||||
#define VGPU_GSP_SYSMEM_PFN_BITMAP_BUF_ADDR_VALIDITY_VALID 0x00000001
|
||||
#define VGPU_GSP_SYSMEM_PFN_BITMAP_BUF_ADDR_APERTURE 2:1
|
||||
#define VGPU_GSP_SYSMEM_PFN_BITMAP_BUF_ADDR_APERTURE_INVALID 0x00000000
|
||||
#define VGPU_GSP_SYSMEM_PFN_BITMAP_BUF_ADDR_APERTURE_SYSMEM 0x00000001
|
||||
#define VGPU_GSP_SYSMEM_PFN_BITMAP_BUF_ADDR_APERTURE_FBMEM 0x00000002
|
||||
#define VGPU_GSP_SYSMEM_PFN_BITMAP_BUF_ADDR_PFN 63:12
|
||||
|
||||
#define MAX_PFNS_PER_4K_PAGE 512
|
||||
|
||||
// VGPU GSP dirty sysmem tracking root node format
|
||||
typedef struct {
|
||||
NvU16 nodeCount; // count of allocated bitmap nodes
|
||||
NvU16 padding1;
|
||||
NvU32 padding2;
|
||||
NvU64 nodePfns[MAX_PFNS_PER_4K_PAGE - 1];
|
||||
} VGPU_GSP_SYSMEM_BITMAP_ROOT_NODE;
|
||||
|
||||
ct_assert(sizeof(VGPU_GSP_SYSMEM_BITMAP_ROOT_NODE) == 0x1000);
|
||||
#endif // __vgpu_dev_nv_vgpu_h__
|
||||
6361
src/nvidia/inc/kernel/vgpu/rm_plugin_shared_code.h
Normal file
6361
src/nvidia/inc/kernel/vgpu/rm_plugin_shared_code.h
Normal file
File diff suppressed because it is too large
Load Diff
632
src/nvidia/inc/kernel/vgpu/rpc.h
Normal file
632
src/nvidia/inc/kernel/vgpu/rpc.h
Normal file
@@ -0,0 +1,632 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2008-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
//******************************************************************************
|
||||
//
|
||||
// Declarations for the RPC module.
|
||||
//
|
||||
// Description:
|
||||
// This module declares the RPC interface functions/macros.
|
||||
//
|
||||
//******************************************************************************
|
||||
|
||||
#ifndef __vgpu_dev_nv_rpc_h__
|
||||
#define __vgpu_dev_nv_rpc_h__
|
||||
|
||||
#include "class/cl84a0.h"
|
||||
#include "vgpu/rpc_headers.h"
|
||||
#include "gpu/dce_client/dce_client.h"
|
||||
#include "objrpc.h"
|
||||
#include "rpc_vgpu.h"
|
||||
|
||||
#include "vgpu_events.h"
|
||||
|
||||
#include "kernel/gpu/fifo/kernel_fifo.h"
|
||||
|
||||
typedef struct ContextDma ContextDma;
|
||||
|
||||
#define NV_RM_RPC_ALLOC_SHARE_DEVICE_FWCLIENT(pGpu, hclient, hdevice, hclientshare, htargetclient, htargetdevice, hclass, \
|
||||
allocflags, vasize, vamode, bFirstDevice, status) \
|
||||
do \
|
||||
{ \
|
||||
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \
|
||||
NV0000_ALLOC_PARAMETERS root_alloc_params = {0}; \
|
||||
\
|
||||
root_alloc_params.hClient = hclient; \
|
||||
\
|
||||
if (!IsT234DorBetter(pGpu)) \
|
||||
{ \
|
||||
RmClient *pClient = serverutilGetClientUnderLock(hclient); \
|
||||
\
|
||||
/* Get process ID from the client database */ \
|
||||
if (pClient != NULL) \
|
||||
{ \
|
||||
CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); \
|
||||
NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); \
|
||||
\
|
||||
if (RMCFG_FEATURE_PLATFORM_UNIX && \
|
||||
(pCallContext->secInfo.privLevel >= RS_PRIV_LEVEL_KERNEL)) \
|
||||
{ \
|
||||
root_alloc_params.processID = KERNEL_PID; \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
root_alloc_params.processID = pClient->ProcID; \
|
||||
NV_ASSERT(root_alloc_params.processID == osGetCurrentProcess()); \
|
||||
} \
|
||||
} \
|
||||
else \
|
||||
NV_ASSERT(0); \
|
||||
} \
|
||||
\
|
||||
if (bFirstDevice) \
|
||||
{ \
|
||||
status = pRmApi->AllocWithHandle(pRmApi, hclient, NV01_NULL_OBJECT, \
|
||||
NV01_NULL_OBJECT, NV01_ROOT, \
|
||||
&root_alloc_params, sizeof(root_alloc_params)); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
status = NV_OK; \
|
||||
} \
|
||||
\
|
||||
if (status == NV_OK) \
|
||||
{ \
|
||||
NV0080_ALLOC_PARAMETERS device_alloc_params = {0}; \
|
||||
\
|
||||
device_alloc_params.hClientShare = hclientshare; \
|
||||
device_alloc_params.hTargetClient = htargetclient; \
|
||||
device_alloc_params.hTargetDevice = htargetdevice; \
|
||||
device_alloc_params.flags = allocflags; \
|
||||
device_alloc_params.vaSpaceSize = vasize; \
|
||||
\
|
||||
status = pRmApi->AllocWithHandle(pRmApi, hclient, hclient, hdevice, \
|
||||
hclass, &device_alloc_params, \
|
||||
sizeof(device_alloc_params)); \
|
||||
} \
|
||||
else \
|
||||
NV_ASSERT(0); \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
#define NV_RM_RPC_ALLOC_MEMORY(pGpu, hclient, hdevice, hmemory, hclass, \
|
||||
flags, pmemdesc, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc; \
|
||||
pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL) \
|
||||
&& (!(IS_VIRTUAL_WITH_SRIOV(pGpu) && \
|
||||
!gpuIsWarBug200577889SriovHeavyEnabled(pGpu) && \
|
||||
!NV_IS_MODS))) { \
|
||||
if (IS_DCE_CLIENT(pGpu)) \
|
||||
{ \
|
||||
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \
|
||||
NV_MEMORY_LIST_ALLOCATION_PARAMS listAllocParams = {0}; \
|
||||
listAllocParams.pteAdjust = pmemdesc->PteAdjust; \
|
||||
listAllocParams.format = memdescGetPteKind(pmemdesc); \
|
||||
listAllocParams.size = pmemdesc->Size; \
|
||||
listAllocParams.pageCount = pmemdesc->PageCount; \
|
||||
listAllocParams.pageNumberList = memdescGetPteArray(pmemdesc, AT_GPU); \
|
||||
listAllocParams.hClient = NV01_NULL_OBJECT; \
|
||||
listAllocParams.hParent = NV01_NULL_OBJECT; \
|
||||
listAllocParams.hObject = NV01_NULL_OBJECT; \
|
||||
listAllocParams.limit = pmemdesc->Size - 1; \
|
||||
listAllocParams.flagsOs02 = (DRF_DEF(OS02,_FLAGS,_MAPPING,_NO_MAP) | \
|
||||
DRF_DEF(OS02,_FLAGS,_PHYSICALITY,_NONCONTIGUOUS) | \
|
||||
(flags & DRF_SHIFTMASK(NVOS02_FLAGS_COHERENCY))); \
|
||||
status = pRmApi->AllocWithHandle(pRmApi, hclient, hdevice, \
|
||||
hmemory, NV01_MEMORY_LIST_SYSTEM, &listAllocParams, \
|
||||
sizeof(listAllocParams)); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
status = rpcAllocMemory_HAL(pGpu, pRpc, hclient, hdevice, hmemory, \
|
||||
hclass, flags, pmemdesc); \
|
||||
} \
|
||||
} else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
#define NV_RM_RPC_MAP_MEMORY_DMA(pGpu, hclient, hdevice, hdma, hmemory, offset, length, flags, \
|
||||
dmaoffset, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc; \
|
||||
pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL) && \
|
||||
!gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) \
|
||||
status = rpcMapMemoryDma_HAL(pGpu, pRpc, hclient, hdevice, hdma, hmemory, offset, \
|
||||
length, flags, dmaoffset); \
|
||||
else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define NV_RM_RPC_UNMAP_MEMORY_DMA(pGpu, hclient, hdevice, hdma, hmemory, flags, dmaoffset, \
|
||||
status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc; \
|
||||
pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL) && \
|
||||
!gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) \
|
||||
status = rpcUnmapMemoryDma_HAL(pGpu, pRpc, hclient, hdevice, hdma, hmemory, \
|
||||
flags, dmaoffset); \
|
||||
else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while (0)
|
||||
|
||||
#define NV_RM_RPC_IDLE_CHANNELS(pGpu, phclients, phdevices, phchannels, \
|
||||
nentries, flags, timeout, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcIdleChannels_HAL(pGpu, pRpc, phclients, phdevices, \
|
||||
phchannels, nentries, flags, timeout); \
|
||||
else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while(0)
|
||||
|
||||
#define NV_RM_RPC_ALLOC_SHARE_DEVICE(pGpu, hclient, hdevice, hclientshare, htargetclient, htargetdevice, hclass, \
|
||||
allocflags, vasize, vamode, bFirstDevice, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
{ \
|
||||
if (IS_GSP_CLIENT(pGpu)) \
|
||||
{ \
|
||||
NV_RM_RPC_ALLOC_SHARE_DEVICE_FWCLIENT(pGpu, hclient, hdevice, hclientshare, htargetclient, htargetdevice, hclass, \
|
||||
allocflags, vasize, vamode, bFirstDevice, status); \
|
||||
} \
|
||||
else \
|
||||
status = rpcAllocShareDevice_HAL(pGpu, pRpc, hclient, hdevice, hclientshare, \
|
||||
htargetclient, htargetdevice, hclass, \
|
||||
allocflags, vasize, vamode); \
|
||||
} else if (pRpc == NULL) \
|
||||
return NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
/*
|
||||
* Control RPC macro
|
||||
*/
|
||||
#define NV_RM_RPC_CONTROL(pGpu, hClient, hObject, cmd, pParams, paramSize, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
{ \
|
||||
if (IS_GSP_CLIENT(pGpu)) \
|
||||
{ \
|
||||
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \
|
||||
status = pRmApi->Control(pRmApi, hClient, hObject, cmd, \
|
||||
pParams, paramSize); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
status = rpcDmaControl_wrapper(pGpu, pRpc, hClient, hObject, \
|
||||
cmd, pParams, paramSize); \
|
||||
} \
|
||||
} else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while (0)
|
||||
|
||||
#define NV_RM_RPC_ALLOC_CHANNEL(pGpu, hclient, hparent, hchannel, hclass, \
|
||||
pGpfifoAllocParams, pchid, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc; \
|
||||
pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
{ \
|
||||
if (IS_GSP_CLIENT(pGpu)) \
|
||||
{ \
|
||||
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \
|
||||
status = pRmApi->AllocWithHandle(pRmApi, hclient, hparent, hchannel, \
|
||||
hclass, pGpfifoAllocParams, \
|
||||
sizeof(*pGpfifoAllocParams)); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
status = rpcAllocChannelDma_HAL(pGpu, pRpc, hclient, hparent, hchannel, \
|
||||
hclass, pGpfifoAllocParams, pchid); \
|
||||
} \
|
||||
} else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
#define NV_RM_RPC_ALLOC_OBJECT(pGpu, hclient, hchannel, hobject, hclass, params, paramsSize, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
{ \
|
||||
if (IS_GSP_CLIENT(pGpu)) \
|
||||
{ \
|
||||
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \
|
||||
status = pRmApi->AllocWithHandle(pRmApi, hclient, hchannel, hobject, \
|
||||
hclass, params, paramsSize); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
status = rpcAllocObject_HAL(pGpu, pRpc, \
|
||||
hclient, hchannel, hobject, hclass, params);\
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* free RPC macros
|
||||
*/
|
||||
#define NV_RM_RPC_FREE(pGpu, hclient, hparent, hobject, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
{ \
|
||||
if (IS_GSP_CLIENT(pGpu)) \
|
||||
{ \
|
||||
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \
|
||||
status = pRmApi->Free(pRmApi, hclient, hobject); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
status = rpcFree_HAL(pGpu, pRpc, hclient, hparent, hobject); \
|
||||
} \
|
||||
} else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while (0)
|
||||
|
||||
#define NV_RM_RPC_FREE_ON_ERROR(pGpu, hclient, hparent, hobject) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
/* used in failure cases, macro doesn't overwrite rmStatus */ \
|
||||
if (pRpc != NULL) \
|
||||
{ \
|
||||
if (IS_GSP_CLIENT(pGpu)) \
|
||||
{ \
|
||||
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \
|
||||
pRmApi->Free(pRmApi, hclient, hobject); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
rpcFree_HAL(pGpu, pRpc, hclient, hparent, hobject); \
|
||||
} \
|
||||
} else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
#define NV_RM_RPC_ALLOC_EVENT(pGpu, hclient, hparentclient, hchannel, hobject, \
|
||||
hevent, hclass, idx, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
{ \
|
||||
if (IS_GSP_CLIENT(pGpu)) \
|
||||
{ \
|
||||
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \
|
||||
NV0005_ALLOC_PARAMETERS allocParams = {0}; \
|
||||
allocParams.hParentClient = hparentclient; \
|
||||
allocParams.hClass = hclass; \
|
||||
allocParams.notifyIndex = idx | NV01_EVENT_CLIENT_RM; \
|
||||
allocParams.data = 0; \
|
||||
status = pRmApi->AllocWithHandle(pRmApi, hclient, \
|
||||
hobject, hevent, \
|
||||
hclass, &allocParams, \
|
||||
sizeof(allocParams)); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
status = rpcAllocEvent_HAL(pGpu, pRpc, hclient, hparentclient, \
|
||||
hchannel, hobject, hevent, hclass, idx);\
|
||||
} \
|
||||
} else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while(0)
|
||||
|
||||
#define NV_RM_RPC_ALLOC_SUBDEVICE(pGpu, hclient, hdevice, hsubdevice, \
|
||||
hclass, subDeviceInst, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
{ \
|
||||
if (IS_GSP_CLIENT(pGpu)) \
|
||||
{ \
|
||||
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \
|
||||
NV2080_ALLOC_PARAMETERS alloc_params = {0}; \
|
||||
\
|
||||
alloc_params.subDeviceId = subDeviceInst; \
|
||||
\
|
||||
status = pRmApi->AllocWithHandle(pRmApi, hclient, hdevice, hsubdevice, \
|
||||
hclass, &alloc_params, \
|
||||
sizeof(alloc_params)); \
|
||||
} \
|
||||
else \
|
||||
status = rpcAllocSubdevice_HAL(pGpu, pRpc, hclient, hdevice, \
|
||||
hsubdevice, hclass, subDeviceInst); \
|
||||
} else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while (0)
|
||||
|
||||
#define NV_RM_RPC_DUP_OBJECT(pGpu, hclient, hparent, hobject, hclient_src, \
|
||||
hobject_src, flags, bAutoFreeRpc, pDstRef, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
{ \
|
||||
if (IS_GSP_CLIENT(pGpu)) \
|
||||
{ \
|
||||
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \
|
||||
status = pRmApi->DupObject(pRmApi, hclient, hparent, \
|
||||
&hobject, hclient_src, \
|
||||
hobject_src, flags); \
|
||||
} \
|
||||
else \
|
||||
status = rpcDupObject_HAL(pGpu, pRpc, hclient, hparent, \
|
||||
hobject, hclient_src, \
|
||||
hobject_src, flags); \
|
||||
if ((bAutoFreeRpc) && (pDstRef != NULL) && (status == NV_OK)) \
|
||||
{ \
|
||||
RmResource *pRmResource; \
|
||||
pRmResource = dynamicCast(((RsResourceRef*)pDstRef)->pResource, RmResource); \
|
||||
pRmResource->bRpcFree = NV_TRUE; \
|
||||
} \
|
||||
} else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while (0)
|
||||
|
||||
static inline void NV_RM_RPC_MANAGE_HW_RESOURCE_ALLOC(OBJGPU *pGpu, ...) { return; }
|
||||
static inline void NV_RM_RPC_MANAGE_HW_RESOURCE_FREE(OBJGPU *pGpu, ...) { return; }
|
||||
static inline void NV_RM_RPC_SIM_UPDATE_DISP_CONTEXT_DMA(OBJGPU *pGpu, ...) { return; }
|
||||
static inline void NV_RM_RPC_SIM_DELETE_DISP_CONTEXT_DMA(OBJGPU *pGpu, ...) { return; }
|
||||
static inline void NV_RM_RPC_SIM_UPDATE_DISP_CHANNEL_INFO(OBJGPU *pGpu, ...) { return; }
|
||||
static inline void NV_RM_RPC_SIM_FREE_INFRA(OBJGPU *pGpu, ...) { return; }
|
||||
|
||||
#define NV_RM_RPC_SET_GUEST_SYSTEM_INFO(pGpu, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = RmRpcSetGuestSystemInfo(pGpu, pRpc); \
|
||||
else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while(0)
|
||||
|
||||
#define NV_RM_RPC_SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER(pGpu, status, bitmapInfo) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcSetSysmemDirtyPageTrackingBuffer_HAL(pGpu, pRpc, bitmapInfo);\
|
||||
else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while(0)
|
||||
|
||||
#define NV_RM_RPC_UNLOADING_GUEST_DRIVER(pGpu, status, bInPMTransition, bGc6Entering, newPMLevel) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcUnloadingGuestDriver_HAL(pGpu, pRpc, bInPMTransition, bGc6Entering, newPMLevel); \
|
||||
else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
#define NV_RM_RPC_GPU_EXEC_REG_OPS(pGpu, hClient, hObject, pParams, pRegOps, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcGpuExecRegOps_HAL(pGpu, pRpc, hClient, hObject, pParams, pRegOps); \
|
||||
else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
#define NV_RM_RPC_REGISTER_VIRTUAL_EVENT_BUFFER(pGpu, hClient, hSubdevice, hEventBuffer, hBufferHeader, hRecordBuffer, recordSize, recordCount, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcRegisterVirtualEventBuffer_HAL(pGpu, pRpc, hClient, hSubdevice, hEventBuffer, hBufferHeader, hRecordBuffer, recordSize, recordCount); \
|
||||
else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while (0)
|
||||
|
||||
#define NV_RM_RPC_UPDATE_BAR_PDE(pGpu, barType, entryValue, entryLevelShift, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcUpdateBarPde_HAL(pGpu, pRpc, barType, entryValue, entryLevelShift); \
|
||||
else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while (0)
|
||||
|
||||
#define NV_RM_RPC_SET_PAGE_DIRECTORY(pGpu, hClient, hDevice, pParams, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcSetPageDirectory_HAL(pGpu, pRpc, hClient, hDevice, pParams); \
|
||||
else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while (0)
|
||||
|
||||
#define NV_RM_RPC_UNSET_PAGE_DIRECTORY(pGpu, hClient, hDevice, pParams, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcUnsetPageDirectory_HAL(pGpu, pRpc, hClient, hDevice, pParams); \
|
||||
else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while (0)
|
||||
|
||||
static inline void NV_RM_RPC_PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION(OBJGPU *pGpu, ...) { return; }
|
||||
|
||||
#define NV_RM_RPC_INVALIDATE_TLB(pGpu, status, pdbAddress, regVal) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcInvalidateTlb_HAL(pGpu, pRpc, pdbAddress, regVal); \
|
||||
else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while(0)
|
||||
|
||||
//
|
||||
// DCE_CLIENT_RM specific RPCs
|
||||
//
|
||||
|
||||
#define NV_RM_RPC_DCE_RM_INIT(pGpu, bInit, status) do {} while (0)
|
||||
|
||||
//
|
||||
// GSP_CLIENT_RM specific RPCs
|
||||
//
|
||||
|
||||
#define NV_RM_RPC_GET_GSP_STATIC_INFO(pGpu, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcGetGspStaticInfo_HAL(pGpu, pRpc); \
|
||||
else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while (0)
|
||||
|
||||
#define NV_RM_RPC_GSP_SET_SYSTEM_INFO(pGpu, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcGspSetSystemInfo_HAL(pGpu, pRpc); \
|
||||
else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while (0)
|
||||
|
||||
#define NV_RM_RPC_SET_REGISTRY(pGpu, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcSetRegistry_HAL(pGpu, pRpc); \
|
||||
else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while (0)
|
||||
|
||||
#define NV_RM_RPC_DUMP_PROTOBUF_COMPONENT(pGpu, status, pPrbEnc, pNvDumpState, \
|
||||
component) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcDumpProtobufComponent_HAL(pGpu, pRpc, pPrbEnc, \
|
||||
pNvDumpState, component); \
|
||||
else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while (0)
|
||||
|
||||
#define NV_RM_RPC_ECC_NOTIFIER_WRITE_ACK(pGpu, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcEccNotifierWriteAck_HAL(pGpu, pRpc); \
|
||||
else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while (0)
|
||||
|
||||
void teardownSysmemPfnBitMap(OBJGPU *pGpu, OBJVGPU *pVGpu);
|
||||
|
||||
NV_STATUS RmRpcPerfGetCurrentPstate(OBJGPU *pGpu, NV2080_CTRL_PERF_GET_CURRENT_PSTATE_PARAMS *pParamStructPtr);
|
||||
|
||||
static inline NV_STATUS RmRpcSimFreeInfra(OBJGPU *pGpu, ...) { return NV_OK; }
|
||||
static inline NV_STATUS RmRpcSimUpdateDisplayContextDma(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; }
|
||||
static inline NV_STATUS RmRpcSimDeleteDisplayContextDma(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; }
|
||||
static inline NV_STATUS RmRpcSimUpdateDispChannelInfo(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; }
|
||||
static inline NV_STATUS RmRpcHwResourceAlloc(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; }
|
||||
static inline NV_STATUS RmRpcHwResourceFree(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; }
|
||||
static inline NV_STATUS RmRpcPerfGetPstateInfo(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; }
|
||||
static inline NV_STATUS RmRpcPerfGetVirtualPstateInfo(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; }
|
||||
|
||||
static inline NV_STATUS RmRpcSimEscapeRead(OBJGPU *pGpu, const char *path, NvU32 index,
|
||||
NvU32 count, NvU32 *data) { return NV_ERR_NOT_SUPPORTED; }
|
||||
static inline NV_STATUS RmRpcSimEscapeWrite(OBJGPU *pGpu, const char *path, NvU32 index,
|
||||
NvU32 count, NvU32 data) { return NV_ERR_NOT_SUPPORTED; }
|
||||
|
||||
NV_STATUS RmRpcSetGuestSystemInfo(OBJGPU *pGpu, OBJRPC *pRpc);
|
||||
|
||||
/*!
|
||||
* Defines the size of the GSP sim access buffer.
|
||||
*/
|
||||
#define GSP_SIM_ACCESS_BUFFER_SIZE 0x4000
|
||||
|
||||
/*!
|
||||
* Defines the structure used to pass SimRead data from Kernel to Physical RM.
|
||||
*/
|
||||
typedef struct SimAccessBuffer
|
||||
{
|
||||
volatile NvU32 data[GSP_SIM_ACCESS_BUFFER_SIZE];
|
||||
volatile NvU32 seq;
|
||||
} SimAccessBuffer;
|
||||
|
||||
#endif // __vgpu_dev_nv_rpc_h__
|
||||
276
src/nvidia/inc/kernel/vgpu/rpc_global_enums.h
Normal file
276
src/nvidia/inc/kernel/vgpu/rpc_global_enums.h
Normal file
@@ -0,0 +1,276 @@
|
||||
#ifndef _RPC_GLOBAL_ENUMS_H_
|
||||
#define _RPC_GLOBAL_ENUMS_H_
|
||||
|
||||
// Deprecated RPC's numbers cannot be reused in order to not break compatibility
|
||||
#ifndef X
|
||||
# define X(UNIT, RPC, VAL) NV_VGPU_MSG_FUNCTION_##RPC = VAL,
|
||||
# define DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
|
||||
enum {
|
||||
#endif
|
||||
X(RM, NOP, 0)
|
||||
X(RM, SET_GUEST_SYSTEM_INFO, 1)
|
||||
X(RM, ALLOC_ROOT, 2)
|
||||
X(RM, ALLOC_DEVICE, 3) // deprecated
|
||||
X(RM, ALLOC_MEMORY, 4)
|
||||
X(RM, ALLOC_CTX_DMA, 5)
|
||||
X(RM, ALLOC_CHANNEL_DMA, 6)
|
||||
X(RM, MAP_MEMORY, 7)
|
||||
X(RM, BIND_CTX_DMA, 8) // deprecated
|
||||
X(RM, ALLOC_OBJECT, 9)
|
||||
X(RM, FREE, 10)
|
||||
X(RM, LOG, 11)
|
||||
X(RM, ALLOC_VIDMEM, 12)
|
||||
X(RM, UNMAP_MEMORY, 13)
|
||||
X(RM, MAP_MEMORY_DMA, 14)
|
||||
X(RM, UNMAP_MEMORY_DMA, 15)
|
||||
X(RM, GET_EDID, 16) // deprecated
|
||||
X(RM, ALLOC_DISP_CHANNEL, 17)
|
||||
X(RM, ALLOC_DISP_OBJECT, 18)
|
||||
X(RM, ALLOC_SUBDEVICE, 19)
|
||||
X(RM, ALLOC_DYNAMIC_MEMORY, 20)
|
||||
X(RM, DUP_OBJECT, 21)
|
||||
X(RM, IDLE_CHANNELS, 22)
|
||||
X(RM, ALLOC_EVENT, 23)
|
||||
X(RM, SEND_EVENT, 24) // deprecated
|
||||
X(RM, REMAPPER_CONTROL, 25) // deprecated
|
||||
X(RM, DMA_CONTROL, 26) // deprecated
|
||||
X(RM, DMA_FILL_PTE_MEM, 27)
|
||||
X(RM, MANAGE_HW_RESOURCE, 28)
|
||||
X(RM, BIND_ARBITRARY_CTX_DMA, 29) // deprecated
|
||||
X(RM, CREATE_FB_SEGMENT, 30)
|
||||
X(RM, DESTROY_FB_SEGMENT, 31)
|
||||
X(RM, ALLOC_SHARE_DEVICE, 32)
|
||||
X(RM, DEFERRED_API_CONTROL, 33)
|
||||
X(RM, REMOVE_DEFERRED_API, 34)
|
||||
X(RM, SIM_ESCAPE_READ, 35)
|
||||
X(RM, SIM_ESCAPE_WRITE, 36)
|
||||
X(RM, SIM_MANAGE_DISPLAY_CONTEXT_DMA, 37)
|
||||
X(RM, FREE_VIDMEM_VIRT, 38)
|
||||
X(RM, PERF_GET_PSTATE_INFO, 39) // deprecated
|
||||
X(RM, PERF_GET_PERFMON_SAMPLE, 40)
|
||||
X(RM, PERF_GET_VIRTUAL_PSTATE_INFO, 41) // deprecated
|
||||
X(RM, PERF_GET_LEVEL_INFO, 42)
|
||||
X(RM, MAP_SEMA_MEMORY, 43)
|
||||
X(RM, UNMAP_SEMA_MEMORY, 44)
|
||||
X(RM, SET_SURFACE_PROPERTIES, 45)
|
||||
X(RM, CLEANUP_SURFACE, 46)
|
||||
X(RM, UNLOADING_GUEST_DRIVER, 47)
|
||||
X(RM, TDR_SET_TIMEOUT_STATE, 48)
|
||||
X(RM, SWITCH_TO_VGA, 49)
|
||||
X(RM, GPU_EXEC_REG_OPS, 50)
|
||||
X(RM, GET_STATIC_INFO, 51)
|
||||
X(RM, ALLOC_VIRTMEM, 52)
|
||||
X(RM, UPDATE_PDE_2, 53)
|
||||
X(RM, SET_PAGE_DIRECTORY, 54)
|
||||
X(RM, GET_STATIC_PSTATE_INFO, 55)
|
||||
X(RM, TRANSLATE_GUEST_GPU_PTES, 56)
|
||||
X(RM, RESERVED_57, 57)
|
||||
X(RM, RESET_CURRENT_GR_CONTEXT, 58)
|
||||
X(RM, SET_SEMA_MEM_VALIDATION_STATE, 59)
|
||||
X(RM, GET_ENGINE_UTILIZATION, 60)
|
||||
X(RM, UPDATE_GPU_PDES, 61)
|
||||
X(RM, GET_ENCODER_CAPACITY, 62)
|
||||
X(RM, VGPU_PF_REG_READ32, 63) // deprecated
|
||||
X(RM, SET_GUEST_SYSTEM_INFO_EXT, 64)
|
||||
X(GSP, GET_GSP_STATIC_INFO, 65)
|
||||
X(RM, RMFS_INIT, 66) // deprecated
|
||||
X(RM, RMFS_CLOSE_QUEUE, 67) // deprecated
|
||||
X(RM, RMFS_CLEANUP, 68) // deprecated
|
||||
X(RM, RMFS_TEST, 69) // deprecated
|
||||
X(RM, UPDATE_BAR_PDE, 70)
|
||||
X(RM, CONTINUATION_RECORD, 71)
|
||||
X(RM, GSP_SET_SYSTEM_INFO, 72)
|
||||
X(RM, SET_REGISTRY, 73)
|
||||
X(GSP, GSP_INIT_POST_OBJGPU, 74) // deprecated
|
||||
X(RM, SUBDEV_EVENT_SET_NOTIFICATION, 75) // deprecated
|
||||
X(GSP, GSP_RM_CONTROL, 76)
|
||||
X(RM, GET_STATIC_INFO2, 77)
|
||||
X(RM, DUMP_PROTOBUF_COMPONENT, 78)
|
||||
X(RM, UNSET_PAGE_DIRECTORY, 79)
|
||||
X(RM, GET_CONSOLIDATED_STATIC_INFO, 80) // deprecated
|
||||
X(RM, GMMU_REGISTER_FAULT_BUFFER, 81) // deprecated
|
||||
X(RM, GMMU_UNREGISTER_FAULT_BUFFER, 82) // deprecated
|
||||
X(RM, GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER, 83) // deprecated
|
||||
X(RM, GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER, 84) // deprecated
|
||||
X(RM, CTRL_SET_VGPU_FB_USAGE, 85)
|
||||
X(RM, CTRL_NVFBC_SW_SESSION_UPDATE_INFO, 86)
|
||||
X(RM, CTRL_NVENC_SW_SESSION_UPDATE_INFO, 87)
|
||||
X(RM, CTRL_RESET_CHANNEL, 88)
|
||||
X(RM, CTRL_RESET_ISOLATED_CHANNEL, 89)
|
||||
X(RM, CTRL_GPU_HANDLE_VF_PRI_FAULT, 90)
|
||||
X(RM, CTRL_CLK_GET_EXTENDED_INFO, 91)
|
||||
X(RM, CTRL_PERF_BOOST, 92)
|
||||
X(RM, CTRL_PERF_VPSTATES_GET_CONTROL, 93)
|
||||
X(RM, CTRL_GET_ZBC_CLEAR_TABLE, 94)
|
||||
X(RM, CTRL_SET_ZBC_COLOR_CLEAR, 95)
|
||||
X(RM, CTRL_SET_ZBC_DEPTH_CLEAR, 96)
|
||||
X(RM, CTRL_GPFIFO_SCHEDULE, 97)
|
||||
X(RM, CTRL_SET_TIMESLICE, 98)
|
||||
X(RM, CTRL_PREEMPT, 99)
|
||||
X(RM, CTRL_FIFO_DISABLE_CHANNELS, 100)
|
||||
X(RM, CTRL_SET_TSG_INTERLEAVE_LEVEL, 101)
|
||||
X(RM, CTRL_SET_CHANNEL_INTERLEAVE_LEVEL, 102)
|
||||
X(GSP, GSP_RM_ALLOC, 103)
|
||||
X(RM, CTRL_GET_P2P_CAPS_V2, 104)
|
||||
X(RM, CTRL_CIPHER_AES_ENCRYPT, 105)
|
||||
X(RM, CTRL_CIPHER_SESSION_KEY, 106)
|
||||
X(RM, CTRL_CIPHER_SESSION_KEY_STATUS, 107)
|
||||
X(RM, CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES, 108)
|
||||
X(RM, CTRL_DBG_READ_ALL_SM_ERROR_STATES, 109)
|
||||
X(RM, CTRL_DBG_SET_EXCEPTION_MASK, 110)
|
||||
X(RM, CTRL_GPU_PROMOTE_CTX, 111)
|
||||
X(RM, CTRL_GR_CTXSW_PREEMPTION_BIND, 112)
|
||||
X(RM, CTRL_GR_SET_CTXSW_PREEMPTION_MODE, 113)
|
||||
X(RM, CTRL_GR_CTXSW_ZCULL_BIND, 114)
|
||||
X(RM, CTRL_GPU_INITIALIZE_CTX, 115)
|
||||
X(RM, CTRL_VASPACE_COPY_SERVER_RESERVED_PDES, 116)
|
||||
X(RM, CTRL_FIFO_CLEAR_FAULTED_BIT, 117)
|
||||
X(RM, CTRL_GET_LATEST_ECC_ADDRESSES, 118)
|
||||
X(RM, CTRL_MC_SERVICE_INTERRUPTS, 119)
|
||||
X(RM, CTRL_DMA_SET_DEFAULT_VASPACE, 120)
|
||||
X(RM, CTRL_GET_CE_PCE_MASK, 121)
|
||||
X(RM, CTRL_GET_ZBC_CLEAR_TABLE_ENTRY, 122)
|
||||
X(RM, CTRL_GET_NVLINK_PEER_ID_MASK, 123) // deprecated
|
||||
X(RM, CTRL_GET_NVLINK_STATUS, 124)
|
||||
X(RM, CTRL_GET_P2P_CAPS, 125)
|
||||
X(RM, CTRL_GET_P2P_CAPS_MATRIX, 126)
|
||||
X(RM, RESERVED_0, 127)
|
||||
X(RM, CTRL_RESERVE_PM_AREA_SMPC, 128)
|
||||
X(RM, CTRL_RESERVE_HWPM_LEGACY, 129)
|
||||
X(RM, CTRL_B0CC_EXEC_REG_OPS, 130)
|
||||
X(RM, CTRL_BIND_PM_RESOURCES, 131)
|
||||
X(RM, CTRL_DBG_SUSPEND_CONTEXT, 132)
|
||||
X(RM, CTRL_DBG_RESUME_CONTEXT, 133)
|
||||
X(RM, CTRL_DBG_EXEC_REG_OPS, 134)
|
||||
X(RM, CTRL_DBG_SET_MODE_MMU_DEBUG, 135)
|
||||
X(RM, CTRL_DBG_READ_SINGLE_SM_ERROR_STATE, 136)
|
||||
X(RM, CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE, 137)
|
||||
X(RM, CTRL_DBG_SET_MODE_ERRBAR_DEBUG, 138)
|
||||
X(RM, CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE, 139)
|
||||
X(RM, CTRL_ALLOC_PMA_STREAM, 140)
|
||||
X(RM, CTRL_PMA_STREAM_UPDATE_GET_PUT, 141)
|
||||
X(RM, CTRL_FB_GET_INFO_V2, 142)
|
||||
X(RM, CTRL_FIFO_SET_CHANNEL_PROPERTIES, 143)
|
||||
X(RM, CTRL_GR_GET_CTX_BUFFER_INFO, 144)
|
||||
X(RM, CTRL_KGR_GET_CTX_BUFFER_PTES, 145)
|
||||
X(RM, CTRL_GPU_EVICT_CTX, 146)
|
||||
X(RM, CTRL_FB_GET_FS_INFO, 147)
|
||||
X(RM, CTRL_GRMGR_GET_GR_FS_INFO, 148)
|
||||
X(RM, CTRL_STOP_CHANNEL, 149)
|
||||
X(RM, CTRL_GR_PC_SAMPLING_MODE, 150)
|
||||
X(RM, CTRL_PERF_RATED_TDP_GET_STATUS, 151)
|
||||
X(RM, CTRL_PERF_RATED_TDP_SET_CONTROL, 152)
|
||||
X(RM, CTRL_FREE_PMA_STREAM, 153)
|
||||
X(RM, CTRL_TIMER_SET_GR_TICK_FREQ, 154)
|
||||
X(RM, CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB, 155)
|
||||
X(RM, GET_CONSOLIDATED_GR_STATIC_INFO, 156)
|
||||
X(RM, CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP, 157)
|
||||
X(RM, CTRL_GR_GET_TPC_PARTITION_MODE, 158)
|
||||
X(RM, CTRL_GR_SET_TPC_PARTITION_MODE, 159)
|
||||
X(UVM, UVM_PAGING_CHANNEL_ALLOCATE, 160)
|
||||
X(UVM, UVM_PAGING_CHANNEL_DESTROY, 161)
|
||||
X(UVM, UVM_PAGING_CHANNEL_MAP, 162)
|
||||
X(UVM, UVM_PAGING_CHANNEL_UNMAP, 163)
|
||||
X(UVM, UVM_PAGING_CHANNEL_PUSH_STREAM, 164)
|
||||
X(UVM, UVM_PAGING_CHANNEL_SET_HANDLES, 165)
|
||||
X(UVM, UVM_METHOD_STREAM_GUEST_PAGES_OPERATION, 166)
|
||||
X(RM, CTRL_INTERNAL_QUIESCE_PMA_CHANNEL, 167)
|
||||
X(RM, DCE_RM_INIT, 168)
|
||||
X(RM, REGISTER_VIRTUAL_EVENT_BUFFER, 169)
|
||||
X(RM, CTRL_EVENT_BUFFER_UPDATE_GET, 170)
|
||||
X(RM, GET_PLCABLE_ADDRESS_KIND, 171)
|
||||
X(RM, CTRL_PERF_LIMITS_SET_STATUS_V2, 172)
|
||||
X(RM, CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM, 173)
|
||||
X(RM, CTRL_GET_MMU_DEBUG_MODE, 174)
|
||||
X(RM, CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS, 175)
|
||||
X(RM, CTRL_FLCN_GET_CTX_BUFFER_SIZE, 176)
|
||||
X(RM, CTRL_FLCN_GET_CTX_BUFFER_INFO, 177)
|
||||
X(RM, DISABLE_CHANNELS, 178)
|
||||
X(RM, CTRL_FABRIC_MEMORY_DESCRIBE, 179)
|
||||
X(RM, CTRL_FABRIC_MEM_STATS, 180)
|
||||
X(RM, SAVE_HIBERNATION_DATA, 181)
|
||||
X(RM, RESTORE_HIBERNATION_DATA, 182)
|
||||
X(RM, CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED, 183)
|
||||
X(RM, CTRL_EXEC_PARTITIONS_CREATE, 184)
|
||||
X(RM, CTRL_EXEC_PARTITIONS_DELETE, 185)
|
||||
X(RM, CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN, 186)
|
||||
X(RM, CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX, 187)
|
||||
X(RM, PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION, 188)
|
||||
X(RM, CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK, 189)
|
||||
X(RM, SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER, 190)
|
||||
X(RM, CTRL_SUBDEVICE_GET_P2P_CAPS, 191)
|
||||
X(RM, CTRL_BUS_SET_P2P_MAPPING, 192)
|
||||
X(RM, CTRL_BUS_UNSET_P2P_MAPPING, 193)
|
||||
X(RM, CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK, 194)
|
||||
X(RM, CTRL_GPU_MIGRATABLE_OPS, 195)
|
||||
X(RM, CTRL_GET_TOTAL_HS_CREDITS, 196)
|
||||
X(RM, CTRL_GET_HS_CREDITS, 197)
|
||||
X(RM, CTRL_SET_HS_CREDITS, 198)
|
||||
X(RM, CTRL_PM_AREA_PC_SAMPLER, 199)
|
||||
X(RM, INVALIDATE_TLB, 200)
|
||||
X(RM, CTRL_GPU_QUERY_ECC_STATUS, 201) // deprecated
|
||||
X(RM, ECC_NOTIFIER_WRITE_ACK, 202)
|
||||
X(RM, CTRL_DBG_GET_MODE_MMU_DEBUG, 203)
|
||||
X(RM, RM_API_CONTROL, 204)
|
||||
X(RM, CTRL_CMD_INTERNAL_GPU_START_FABRIC_PROBE, 205)
|
||||
X(RM, CTRL_NVLINK_GET_INBAND_RECEIVED_DATA, 206)
|
||||
X(RM, GET_STATIC_DATA, 207)
|
||||
X(RM, RESERVED_208, 208)
|
||||
X(RM, CTRL_GPU_GET_INFO_V2, 209)
|
||||
X(RM, GET_BRAND_CAPS, 210)
|
||||
X(RM, CTRL_CMD_NVLINK_INBAND_SEND_DATA, 211)
|
||||
X(RM, NUM_FUNCTIONS, 212)
|
||||
#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
|
||||
};
|
||||
# undef X
|
||||
# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
|
||||
#endif
|
||||
|
||||
// RPC Events. Used by GSP-RM.
|
||||
#ifndef E
|
||||
# define E(RPC, VAL) NV_VGPU_MSG_EVENT_##RPC = VAL,
|
||||
# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
|
||||
enum {
|
||||
#endif
|
||||
E(FIRST_EVENT, 0x1000)
|
||||
E(GSP_INIT_DONE, 0x1001)
|
||||
E(GSP_RUN_CPU_SEQUENCER, 0x1002)
|
||||
E(POST_EVENT, 0x1003)
|
||||
E(RC_TRIGGERED, 0x1004)
|
||||
E(MMU_FAULT_QUEUED, 0x1005)
|
||||
E(OS_ERROR_LOG, 0x1006)
|
||||
E(RG_LINE_INTR, 0x1007)
|
||||
E(GPUACCT_PERFMON_UTIL_SAMPLES, 0x1008)
|
||||
E(SIM_READ, 0x1009)
|
||||
E(SIM_WRITE, 0x100a)
|
||||
E(SEMAPHORE_SCHEDULE_CALLBACK, 0x100b)
|
||||
E(UCODE_LIBOS_PRINT, 0x100c)
|
||||
E(VGPU_GSP_PLUGIN_TRIGGERED, 0x100d)
|
||||
E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK, 0x100e)
|
||||
E(PERF_BRIDGELESS_INFO_UPDATE, 0x100f)
|
||||
E(VGPU_CONFIG, 0x1010)
|
||||
E(DISPLAY_MODESET, 0x1011)
|
||||
E(EXTDEV_INTR_SERVICE, 0x1012)
|
||||
E(NVLINK_INBAND_RECEIVED_DATA_256, 0x1013)
|
||||
E(NVLINK_INBAND_RECEIVED_DATA_512, 0x1014)
|
||||
E(NVLINK_INBAND_RECEIVED_DATA_1024, 0x1015)
|
||||
E(NVLINK_INBAND_RECEIVED_DATA_2048, 0x1016)
|
||||
E(NVLINK_INBAND_RECEIVED_DATA_4096, 0x1017)
|
||||
E(TIMED_SEMAPHORE_RELEASE, 0x1018)
|
||||
E(NVLINK_IS_GPU_DEGRADED, 0x1019)
|
||||
E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK, 0x101a)
|
||||
E(NVLINK_FAULT_UP, 0x101b)
|
||||
E(GSP_LOCKDOWN_NOTICE, 0x101c)
|
||||
E(MIG_CI_CONFIG_UPDATE, 0x101d)
|
||||
E(UPDATE_GSP_TRACE, 0x101e)
|
||||
E(NVLINK_FATAL_ERROR_RECOVERY, 0x101f)
|
||||
E(GSP_POST_NOCAT_RECORD, 0x1020)
|
||||
E(FECS_ERROR, 0x1021)
|
||||
E(NUM_EVENTS, 0x1022)
|
||||
#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
|
||||
};
|
||||
# undef E
|
||||
# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
|
||||
#endif
|
||||
|
||||
#endif /*_RPC_GLOBAL_ENUMS_H_*/
|
||||
250
src/nvidia/inc/kernel/vgpu/rpc_headers.h
Normal file
250
src/nvidia/inc/kernel/vgpu/rpc_headers.h
Normal file
@@ -0,0 +1,250 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2017-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __vgpu_rpc_nv_headers_h__
|
||||
#define __vgpu_rpc_nv_headers_h__
|
||||
|
||||
#include "ctrl/ctrl0080/ctrl0080perf.h"
|
||||
#include "ctrl/ctrl2080/ctrl2080perf.h"
|
||||
#include "ctrl/ctrl2080/ctrl2080internal.h"
|
||||
#include "nvstatus.h"
|
||||
|
||||
#define MAX_GPC_COUNT 32
|
||||
|
||||
/*
|
||||
* Maximum number of RegOps that can be accommodated within one RPC call
|
||||
* due to RPC message buffer size being limited to 4k
|
||||
*/
|
||||
#define VGPU_MAX_REGOPS_PER_RPC 100
|
||||
|
||||
#define VGPU_RESERVED_HANDLE_BASE 0xCAF3F000
|
||||
#define VGPU_RESERVED_HANDLE_RANGE 0x1000
|
||||
|
||||
#define VGPU_CALC_PARAM_OFFSET(prev_offset, prev_params) (prev_offset + NV_ALIGN_UP(sizeof(prev_params), sizeof(NvU32)))
|
||||
|
||||
/*
|
||||
* Message header (in buffer addressed by ring entry)
|
||||
*
|
||||
* If message is invalid (bad length or signature), signature and length
|
||||
* are forced to be valid (if in range of descriptor) and result is set to
|
||||
* NV_VGPU_RESULT_INVALID_MESSAGE_FORMAT. Otherwise, signature, length, and
|
||||
* function are always unchanged and result is always set.
|
||||
*
|
||||
* The function message header, if defined, immediately follows the main message
|
||||
* header.
|
||||
*/
|
||||
#define NV_VGPU_MSG_HEADER_VERSION_MAJOR 31:24 /* R---D */
|
||||
#define NV_VGPU_MSG_HEADER_VERSION_MINOR 23:16 /* R---D */
|
||||
#define NV_VGPU_MSG_HEADER_VERSION_MAJOR_TOT 0x00000003 /* R---D */
|
||||
#define NV_VGPU_MSG_HEADER_VERSION_MINOR_TOT 0x00000000 /* R---D */
|
||||
/* signature must equal valid value */
|
||||
#define NV_VGPU_MSG_SIGNATURE_VALID 0x43505256 /* RW--V */
|
||||
|
||||
#include "rpc_global_enums.h"
|
||||
|
||||
/* result code */
|
||||
/* codes below 0xFF000000 must match exactly the NV_STATUS codes in nvos.h */
|
||||
#define NV_VGPU_MSG_RESULT__RM NV_ERR_GENERIC:0x00000000 /* RW--D */
|
||||
#define NV_VGPU_MSG_RESULT_SUCCESS NV_OK
|
||||
#define NV_VGPU_MSG_RESULT_CARD_NOT_PRESENT NV_ERR_CARD_NOT_PRESENT
|
||||
#define NV_VGPU_MSG_RESULT_DUAL_LINK_INUSE NV_ERR_DUAL_LINK_INUSE
|
||||
#define NV_VGPU_MSG_RESULT_GENERIC NV_ERR_GENERIC
|
||||
#define NV_VGPU_MSG_RESULT_GPU_NOT_FULL_POWER NV_ERR_GPU_NOT_FULL_POWER
|
||||
#define NV_VGPU_MSG_RESULT_IN_USE NV_ERR_IN_USE
|
||||
#define NV_VGPU_MSG_RESULT_INSUFFICIENT_RESOURCES NV_ERR_INSUFFICIENT_RESOURCES
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_ACCESS_TYPE NV_ERR_INVALID_ACCESS_TYPE
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_ARGUMENT NV_ERR_INVALID_ARGUMENT
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_BASE NV_ERR_INVALID_BASE
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_CHANNEL NV_ERR_INVALID_CHANNEL
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_CLASS NV_ERR_INVALID_CLASS
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_CLIENT NV_ERR_INVALID_CLIENT
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_COMMAND NV_ERR_INVALID_COMMAND
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_DATA NV_ERR_INVALID_DATA
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_DEVICE NV_ERR_INVALID_DEVICE
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_DMA_SPECIFIER NV_ERR_INVALID_DMA_SPECIFIER
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_EVENT NV_ERR_INVALID_EVENT
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_FLAGS NV_ERR_INVALID_FLAGS
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_FUNCTION NV_ERR_INVALID_FUNCTION
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_HEAP NV_ERR_INVALID_HEAP
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_INDEX NV_ERR_INVALID_INDEX
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_LIMIT NV_ERR_INVALID_LIMIT
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_METHOD NV_ERR_INVALID_METHOD
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_BUFFER NV_ERR_INVALID_OBJECT_BUFFER
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_ERROR NV_ERR_INVALID_OBJECT
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_HANDLE NV_ERR_INVALID_OBJECT_HANDLE
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_NEW NV_ERR_INVALID_OBJECT_NEW
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_OLD NV_ERR_INVALID_OBJECT_OLD
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_PARENT NV_ERR_INVALID_OBJECT_PARENT
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_OFFSET NV_ERR_INVALID_OFFSET
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_OWNER NV_ERR_INVALID_OWNER
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_PARAM_STRUCT NV_ERR_INVALID_PARAM_STRUCT
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_PARAMETER NV_ERR_INVALID_PARAMETER
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_POINTER NV_ERR_INVALID_POINTER
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_REGISTRY_KEY NV_ERR_INVALID_REGISTRY_KEY
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_STATE NV_ERR_INVALID_STATE
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_STRING_LENGTH NV_ERR_INVALID_STRING_LENGTH
|
||||
#define NV_VGPU_MSG_RESULT_INVALID_XLATE NV_ERR_INVALID_XLATE
|
||||
#define NV_VGPU_MSG_RESULT_IRQ_NOT_FIRING NV_ERR_IRQ_NOT_FIRING
|
||||
#define NV_VGPU_MSG_RESULT_MULTIPLE_MEMORY_TYPES NV_ERR_MULTIPLE_MEMORY_TYPES
|
||||
#define NV_VGPU_MSG_RESULT_NOT_SUPPORTED NV_ERR_NOT_SUPPORTED
|
||||
#define NV_VGPU_MSG_RESULT_OPERATING_SYSTEM NV_ERR_OPERATING_SYSTEM
|
||||
#define NV_VGPU_MSG_RESULT_PROTECTION_FAULT NV_ERR_PROTECTION_FAULT
|
||||
#define NV_VGPU_MSG_RESULT_TIMEOUT NV_ERR_TIMEOUT
|
||||
#define NV_VGPU_MSG_RESULT_TOO_MANY_PRIMARIES NV_ERR_TOO_MANY_PRIMARIES
|
||||
#define NV_VGPU_MSG_RESULT_IRQ_EDGE_TRIGGERED NV_ERR_IRQ_EDGE_TRIGGERED
|
||||
#define NV_VGPU_MSG_RESULT_GUEST_HOST_DRIVER_MISMATCH NV_ERR_LIB_RM_VERSION_MISMATCH
|
||||
|
||||
/*
|
||||
* codes above 0xFF000000 and below 0xFF100000 must match one-for-one
|
||||
* the vmiop_error_t codes in vmioplugin.h, with 0xFF000000 added.
|
||||
*/
|
||||
#define NV_VGPU_MSG_RESULT__VMIOP 0xFF00000a:0xFF000000 /* RW--D */
|
||||
#define NV_VGPU_MSG_RESULT_VMIOP_INVAL 0xFF000001 /* RW--V */
|
||||
#define NV_VGPU_MSG_RESULT_VMIOP_RESOURCE 0xFF000002 /* RW--V */
|
||||
#define NV_VGPU_MSG_RESULT_VMIOP_RANGE 0xFF000003 /* RW--V */
|
||||
#define NV_VGPU_MSG_RESULT_VMIOP_READ_ONLY 0xFF000004 /* RW--V */
|
||||
#define NV_VGPU_MSG_RESULT_VMIOP_NOT_FOUND 0xFF000005 /* RW--V */
|
||||
#define NV_VGPU_MSG_RESULT_VMIOP_NO_ADDRESS_SPACE 0xFF000006 /* RW--V */
|
||||
#define NV_VGPU_MSG_RESULT_VMIOP_TIMEOUT 0xFF000007 /* RW--V */
|
||||
#define NV_VGPU_MSG_RESULT_VMIOP_NOT_ALLOWED_IN_CALLBACK 0xFF000008 /* RW--V */
|
||||
#define NV_VGPU_MSG_RESULT_VMIOP_ECC_MISMATCH 0xFF000009 /* RW--V */
|
||||
#define NV_VGPU_MSG_RESULT_VMIOP_NOT_SUPPORTED 0xFF00000a /* RW--V */
|
||||
/* RPC-specific error codes */
|
||||
#define NV_VGPU_MSG_RESULT__RPC 0xFF100009:0xFF100000 /* RW--D */
|
||||
#define NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION 0xFF100001 /* RW--V */
|
||||
#define NV_VGPU_MSG_RESULT_RPC_INVALID_MESSAGE_FORMAT 0xFF100002 /* RW--V */
|
||||
#define NV_VGPU_MSG_RESULT_RPC_HANDLE_NOT_FOUND 0xFF100003 /* RW--V */
|
||||
#define NV_VGPU_MSG_RESULT_RPC_HANDLE_EXISTS 0xFF100004 /* RW--V */
|
||||
#define NV_VGPU_MSG_RESULT_RPC_UNKNOWN_RM_ERROR 0xFF100005 /* RW--V */
|
||||
#define NV_VGPU_MSG_RESULT_RPC_UNKNOWN_VMIOP_ERROR 0xFF100006 /* RW--V */
|
||||
#define NV_VGPU_MSG_RESULT_RPC_RESERVED_HANDLE 0xFF100007 /* RW--V */
|
||||
#define NV_VGPU_MSG_RESULT_RPC_CUDA_PROFILING_DISABLED 0xFF100008 /* RW--V */
|
||||
// This error code is used by plugin to notify the guest the that API control
|
||||
// is recognized but not supported. It used by the guest to avoid printing
|
||||
// error message about a failed API control.
|
||||
#define NV_VGPU_MSG_RESULT_RPC_API_CONTROL_NOT_SUPPORTED 0xFF100009 /* RW--V */
|
||||
/* RPC-specific code in result for incomplete request */
|
||||
#define NV_VGPU_MSG_RESULT_RPC_PENDING 0xFFFFFFFF /* RW--V */
|
||||
/* shared union field */
|
||||
#define NV_VGPU_MSG_UNION_INIT 0x00000000 /* RW--V */
|
||||
|
||||
/*
|
||||
* common PTEDESC message defines (used w/ ALLOC_MEMORY, ALLOC_VIDMEM, FILL_PTE_MEM)
|
||||
*/
|
||||
#define NV_VGPU_PTEDESC_INIT 0x00000000 /* RWI-V */
|
||||
#define NV_VGPU_PTEDESC__PROD 0x00000000 /* RW--V */
|
||||
#define NV_VGPU_PTEDESC_IDR_NONE 0x00000000 /* RW--V */
|
||||
#define NV_VGPU_PTEDESC_IDR_SINGLE 0x00000001 /* RW--V */
|
||||
#define NV_VGPU_PTEDESC_IDR_DOUBLE 0x00000002 /* RW--V */
|
||||
#define NV_VGPU_PTEDESC_IDR_TRIPLE 0x00000003 /* RW--V */
|
||||
|
||||
#define NV_VGPU_PTE_PAGE_SIZE 0x1000 /* R---V */
|
||||
#define NV_VGPU_PTE_SIZE 4 /* R---V */
|
||||
#define NV_VGPU_PTE_INDEX_SHIFT 10 /* R---V */
|
||||
#define NV_VGPU_PTE_INDEX_MASK 0x3FF /* R---V */
|
||||
|
||||
#define NV_VGPU_PTE_64_PAGE_SIZE 0x1000 /* R---V */
|
||||
#define NV_VGPU_PTE_64_SIZE 8 /* R---V */
|
||||
#define NV_VGPU_PTE_64_INDEX_SHIFT 9 /* R---V */
|
||||
#define NV_VGPU_PTE_64_INDEX_MASK 0x1FF /* R---V */
|
||||
|
||||
/*
|
||||
* LOG message
|
||||
*/
|
||||
#define NV_VGPU_LOG_LEVEL_FATAL 0x00000000 /* RW--V */
|
||||
#define NV_VGPU_LOG_LEVEL_ERROR 0x00000001 /* RW--V */
|
||||
#define NV_VGPU_LOG_LEVEL_NOTICE 0x00000002 /* RW--V */
|
||||
#define NV_VGPU_LOG_LEVEL_STATUS 0x00000003 /* RW--V */
|
||||
#define NV_VGPU_LOG_LEVEL_DEBUG 0x00000004 /* RW--V */
|
||||
|
||||
typedef enum
|
||||
{
|
||||
RPC_GR_BUFFER_TYPE_GRAPHICS = 0,
|
||||
RPC_GR_BUFFER_TYPE_GRAPHICS_ZCULL = 1,
|
||||
RPC_GR_BUFFER_TYPE_GRAPHICS_GRAPHICS_PM = 2,
|
||||
RPC_GR_BUFFER_TYPE_COMPUTE_PREEMPT = 3,
|
||||
RPC_GR_BUFFER_TYPE_GRAPHICS_PATCH = 4,
|
||||
RPC_GR_BUFFER_TYPE_GRAPHICS_BUNDLE_CB = 5,
|
||||
RPC_GR_BUFFER_TYPE_GRAPHICS_PAGEPOOL_GLOBAL = 6,
|
||||
RPC_GR_BUFFER_TYPE_GRAPHICS_ATTRIBUTE_CB = 7,
|
||||
RPC_GR_BUFFER_TYPE_GRAPHICS_RTV_CB_GLOBAL = 8,
|
||||
RPC_GR_BUFFER_TYPE_GRAPHICS_GFXP_POOL = 9,
|
||||
RPC_GR_BUFFER_TYPE_GRAPHICS_GFXP_CTRL_BLK = 10,
|
||||
RPC_GR_BUFFER_TYPE_GRAPHICS_FECS_EVENT = 11,
|
||||
RPC_GR_BUFFER_TYPE_GRAPHICS_PRIV_ACCESS_MAP = 12,
|
||||
RPC_GR_BUFFER_TYPE_GRAPHICS_MAX = 13,
|
||||
} RPC_GR_BUFFER_TYPE;
|
||||
|
||||
typedef enum
|
||||
{
|
||||
FECS_ERROR_EVENT_TYPE_NONE = 0,
|
||||
FECS_ERROR_EVENT_TYPE_BUFFER_RESET_REQUIRED = 1,
|
||||
FECS_ERROR_EVENT_TYPE_BUFFER_FULL = 2,
|
||||
FECS_ERROR_EVENT_TYPE_MAX = 3,
|
||||
} FECS_ERROR_EVENT_TYPE;
|
||||
|
||||
/*
|
||||
* Maximum entries that can be sent in a single pass of RPC.
|
||||
*/
|
||||
#define VGPU_RPC_GET_P2P_CAPS_V2_MAX_GPUS_SQUARED_PER_RPC 512
|
||||
|
||||
/* Fetching NV2080_CTRL_GR_MAX_CTX_BUFFER_COUNT in single RPC mesaage
|
||||
* causes RPC buffer to overflow. To accommodate, we will have to convert
|
||||
* current RPC to multipass. But currently, RM allocates only
|
||||
* (3 + GR_GLOBALCTX_BUFFER_COUNT) < 32 buffers and they accommodate in single
|
||||
* RPC message size. Hence, not converting current RPC to multipass.
|
||||
* and limiting the max buffer count per RPC to 32.
|
||||
*/
|
||||
#define GR_MAX_RPC_CTX_BUFFER_COUNT 32
|
||||
|
||||
/*
|
||||
* Enums specifying the BAR number that we are going to update its PDE
|
||||
*/
|
||||
typedef enum
|
||||
{
|
||||
NV_RPC_UPDATE_PDE_BAR_1,
|
||||
NV_RPC_UPDATE_PDE_BAR_2,
|
||||
NV_RPC_UPDATE_PDE_BAR_INVALID,
|
||||
} NV_RPC_UPDATE_PDE_BAR_TYPE;
|
||||
|
||||
typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS
|
||||
{
|
||||
NvU32 headIndex;
|
||||
NvU32 maxHResolution;
|
||||
NvU32 maxVResolution;
|
||||
} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS;
|
||||
|
||||
typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS
|
||||
{
|
||||
NvU32 numHeads;
|
||||
NvU32 maxNumHeads;
|
||||
} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS;
|
||||
|
||||
|
||||
/*
|
||||
* Maximum number of SMs that can be read in one RPC call to get error states
|
||||
*/
|
||||
|
||||
#define VGPU_RPC_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PER_RPC_v21_06 80
|
||||
|
||||
#endif // __vgpu_rpc_nv_headers_h__
|
||||
184
src/nvidia/inc/kernel/vgpu/rpc_vgpu.h
Normal file
184
src/nvidia/inc/kernel/vgpu/rpc_vgpu.h
Normal file
@@ -0,0 +1,184 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2008-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __vgpu_dev_nv_rpc_vgpu_h__
|
||||
#define __vgpu_dev_nv_rpc_vgpu_h__
|
||||
|
||||
|
||||
//******************************************************************************
|
||||
//
|
||||
// Declarations for the RPC VGPU module.
|
||||
//
|
||||
// Description:
|
||||
// This module declares the RPC interface functions/macros for VGPU.
|
||||
//
|
||||
//******************************************************************************
|
||||
|
||||
static NV_INLINE void NV_RM_RPC_ALLOC_LOCAL_USER(OBJGPU *pGpu, ...) { }
|
||||
static NV_INLINE void NV_RM_RPC_ALLOC_VIDMEM(OBJGPU *pGpu, ...) { }
|
||||
static NV_INLINE void NV_RM_RPC_ALLOC_VIRTMEM(OBJGPU *pGpu, ...) { }
|
||||
static NV_INLINE void NV_RM_RPC_MAP_MEMORY(OBJGPU *pGpu, ...) { }
|
||||
static NV_INLINE void NV_RM_RPC_UNMAP_MEMORY(OBJGPU *pGpu, ...) { }
|
||||
static NV_INLINE void NV_RM_RPC_DMA_FILL_PTE_MEM(OBJGPU *pGpu, ...) { }
|
||||
static NV_INLINE void NV_RM_RPC_CREATE_FB_SEGMENT(OBJGPU *pGpu, ...) { }
|
||||
static NV_INLINE void NV_RM_RPC_DESTROY_FB_SEGMENT(OBJGPU *pGpu, ...) { }
|
||||
static NV_INLINE void NV_RM_RPC_DEFERRED_API_CONTROL(OBJGPU *pGpu, ...) { }
|
||||
static NV_INLINE void NV_RM_RPC_REMOVE_DEFERRED_API(OBJGPU *pGpu, ...) { }
|
||||
static NV_INLINE void NV_RM_RPC_FREE_VIDMEM_VIRT(OBJGPU *pGpu, ...) { }
|
||||
static NV_INLINE void NV_RM_RPC_MAP_SEMA_MEMORY(OBJGPU *pGpu, ...) { }
|
||||
static NV_INLINE void NV_RM_RPC_UNMAP_SEMA_MEMORY(OBJGPU *pGpu, ...) { }
|
||||
static NV_INLINE void NV_RM_RPC_GET_CONSOLIDATED_STATIC_INFO(OBJGPU *pGpu, ...) { }
|
||||
static NV_INLINE void NV_RM_RPC_UPDATE_PDE_2(OBJGPU *pGpu, ...) { }
|
||||
static NV_INLINE void NV_RM_RPC_TRANSLATE_GUEST_GPU_PTES(OBJGPU *pGpu, ...) { }
|
||||
static NV_INLINE void NV_RM_RPC_SET_SEMA_MEM_VALIDATION_STATE(OBJGPU *pGpu, ...) { }
|
||||
static NV_INLINE void NV_RM_RPC_RESET_CURRENT_GR_CONTEXT(OBJGPU *pGpu, ...) { }
|
||||
static NV_INLINE void NV_RM_RPC_ALLOC_CONTEXT_DMA(OBJGPU *pGpu, ...) { }
|
||||
static NV_INLINE void NV_RM_RPC_GET_PLCABLE_ADDRESS_KIND(OBJGPU *pGpu, ...) { }
|
||||
static NV_INLINE void NV_RM_RPC_UPDATE_GPU_PDES(OBJGPU *pGpu, ...) { }
|
||||
|
||||
/*
|
||||
* LOG RPC. This message produces a log line in the display plugin's log
|
||||
* XXX: make this accept variable parameters like printf
|
||||
*/
|
||||
#define NV_RM_RPC_LOG(pgpu, logstr, loglevel) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pgpu); \
|
||||
if (pRpc != NULL) \
|
||||
rpcLog_HAL(pgpu, pRpc, logstr, loglevel); \
|
||||
} while(0)
|
||||
|
||||
#define NV_RM_RPC_SET_GUEST_SYSTEM_INFO_EXT(pGpu, status) \
|
||||
do \
|
||||
{ \
|
||||
/* VGPU only */ \
|
||||
OBJRPC *pRpc = GPU_GET_VGPU_RPC(pGpu); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcSetGuestSystemInfoExt_HAL(pGpu,pRpc); \
|
||||
} while(0)
|
||||
|
||||
#define NV_RM_RPC_SET_SURFACE_PROPERTIES(pGpu, hClient, pParams, bSkipCompare, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcSetSurfaceProperties_HAL(pGpu, pRpc, hClient, pParams, bSkipCompare); \
|
||||
} while (0)
|
||||
|
||||
#define NV_RM_RPC_CLEANUP_SURFACE(pGpu, pParams, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcCleanupSurface_HAL(pGpu, pRpc, pParams); \
|
||||
} while (0)
|
||||
|
||||
#define NV_RM_RPC_SWITCH_TO_VGA(pGpu, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcSwitchToVga_HAL(pGpu, pRpc); \
|
||||
} while (0)
|
||||
|
||||
#define NV_RM_RPC_GET_STATIC_DATA(pGpu, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcGetStaticData_HAL(pGpu, pRpc); \
|
||||
} while (0)
|
||||
|
||||
#define NV_RM_RPC_GET_CONSOLIDATED_GR_STATIC_INFO(pGpu, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcGetConsolidatedGrStaticInfo_HAL(pGpu, pRpc); \
|
||||
} while (0)
|
||||
|
||||
#define NV_RM_RPC_GET_ENCODER_CAPACITY(pGpu, hClient, hObject, encoderCapacity, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcGetEncoderCapacity_HAL(pGpu, pRpc, hClient, hObject, encoderCapacity); \
|
||||
} while(0)
|
||||
|
||||
#define NV_RM_RPC_DISABLE_CHANNELS(pGpu, bDisable, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcDisableChannels_HAL(pGpu, pRpc, bDisable); \
|
||||
} while(0)
|
||||
|
||||
#define NV_RM_RPC_SAVE_HIBERNATION_DATA(pGpu, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcSaveHibernationData_HAL(pGpu, pRpc); \
|
||||
} while(0)
|
||||
|
||||
#define NV_RM_RPC_RESTORE_HIBERNATION_DATA(pGpu, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcRestoreHibernationData_HAL(pGpu, pRpc); \
|
||||
} while(0)
|
||||
|
||||
#define NV_RM_RPC_PERF_GET_LEVEL_INFO(pGpu, hClient, hObject, pParams, pPerfClkInfos, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcPerfGetLevelInfo_HAL(pGpu, pRpc, hClient, hObject, pParams, pPerfClkInfos); \
|
||||
else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while(0)
|
||||
|
||||
#define NV_RM_RPC_GET_ENGINE_UTILIZATION(pGpu, hClient, hObject, cmd, pParamStructPtr, paramSize, status) \
|
||||
do { \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
status = rpcGetEngineUtilizationWrapper_HAL(pGpu, pRpc, hClient, hObject, cmd, \
|
||||
pParamStructPtr, paramSize); \
|
||||
} while(0) \
|
||||
|
||||
#define NV_RM_RPC_API_CONTROL(pGpu, hClient, hObject, cmd, pParams, paramSize, status) \
|
||||
do \
|
||||
{ \
|
||||
OBJRPC *pRpc = GPU_GET_RPC(pGpu); \
|
||||
NV_ASSERT(pRpc != NULL); \
|
||||
if ((status == NV_OK) && (pRpc != NULL)) \
|
||||
{ \
|
||||
status = rpcRmApiControl_HAL(pGpu, pRpc, hClient, hObject, \
|
||||
cmd, pParams, paramSize); \
|
||||
} else if (pRpc == NULL) \
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES; \
|
||||
} while (0)
|
||||
|
||||
#endif // __vgpu_dev_nv_rpc_vgpu_h__
|
||||
471
src/nvidia/inc/kernel/vgpu/sdk-structures.h
Normal file
471
src/nvidia/inc/kernel/vgpu/sdk-structures.h
Normal file
@@ -0,0 +1,471 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef _RPC_SDK_STRUCTURES_H_
|
||||
#define _RPC_SDK_STRUCTURES_H_
|
||||
|
||||
#include <ctrl/ctrl83de.h>
|
||||
#include <ctrl/ctrla080.h>
|
||||
#include <ctrl/ctrlc36f.h>
|
||||
#include <ctrl/ctrlc637.h>
|
||||
#include <ctrl/ctrl0000/ctrl0000system.h>
|
||||
#include <ctrl/ctrl0080/ctrl0080nvjpg.h>
|
||||
#include <ctrl/ctrl0080/ctrl0080bsp.h>
|
||||
#include <ctrl/ctrl0080/ctrl0080dma.h>
|
||||
#include <ctrl/ctrl0080/ctrl0080fb.h>
|
||||
#include <ctrl/ctrl0080/ctrl0080gr.h>
|
||||
#include <ctrl/ctrl2080/ctrl2080ce.h>
|
||||
#include <ctrl/ctrl2080/ctrl2080bus.h>
|
||||
#include <ctrl/ctrl2080/ctrl2080fifo.h>
|
||||
#include <ctrl/ctrl2080/ctrl2080gr.h>
|
||||
#include <ctrl/ctrl2080/ctrl2080fb.h>
|
||||
#include <ctrl/ctrl2080/ctrl2080internal.h>
|
||||
#include <ctrl/ctrl83de/ctrl83dedebug.h>
|
||||
#include <ctrl/ctrl0080/ctrl0080fifo.h>
|
||||
#include <ctrl/ctrl2080/ctrl2080nvlink.h>
|
||||
#include <ctrl/ctrl2080/ctrl2080fla.h>
|
||||
#include <ctrl/ctrl2080/ctrl2080internal.h>
|
||||
#include <ctrl/ctrl2080/ctrl2080mc.h>
|
||||
#include <ctrl/ctrl2080/ctrl2080grmgr.h>
|
||||
#include <ctrl/ctrl2080/ctrl2080ecc.h>
|
||||
#include <ctrl/ctrl0090.h>
|
||||
#include <ctrl/ctrl9096.h>
|
||||
#include <ctrl/ctrlb0cc.h>
|
||||
#include <ctrl/ctrla06f.h>
|
||||
#include <ctrl/ctrl00f8.h>
|
||||
#include <ctrl/ctrl90e6.h>
|
||||
|
||||
#include <class/cl2080.h>
|
||||
#include <class/cl0073.h>
|
||||
#include <class/clc670.h>
|
||||
#include <class/clc673.h>
|
||||
#include <class/clc67b.h>
|
||||
#include <class/clc67d.h>
|
||||
#include <class/clc67e.h>
|
||||
#include "rpc_headers.h"
|
||||
#include "nvctassert.h"
|
||||
#include "nv_vgpu_types.h"
|
||||
|
||||
|
||||
|
||||
typedef struct vmiopd_SM_info {
|
||||
NvU32 version;
|
||||
NvU32 regBankCount;
|
||||
NvU32 regBankRegCount;
|
||||
NvU32 maxWarpsPerSM;
|
||||
NvU32 maxThreadsPerWarp;
|
||||
NvU32 geomGsObufEntries;
|
||||
NvU32 geomXbufEntries;
|
||||
NvU32 maxSPPerSM;
|
||||
NvU32 rtCoreCount;
|
||||
} VMIOPD_GRSMINFO;
|
||||
|
||||
// NV_SCAL_FAMILY_MAX_FBPS 16
|
||||
#define MAX_FBPS 16 //Maximum number of FBPs
|
||||
|
||||
#define OBJ_MAX_HEADS_v03_00 4
|
||||
#define OBJ_MAX_HEADS_v24_08 8
|
||||
|
||||
// NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_DEVICES(256) / NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES(32)
|
||||
#define MAX_ITERATIONS_DEVICE_INFO_TABLE 8
|
||||
|
||||
// NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_PAGES(512) / NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_ENTRIES(64)
|
||||
#define MAX_ITERATIONS_DYNAMIC_BLACKLIST 8
|
||||
|
||||
#define NV0000_GPUACCT_RPC_PID_MAX_QUERY_COUNT 1000
|
||||
|
||||
#define NV2080_CTRL_CLK_ARCH_MAX_DOMAINS_v1E_0D 32
|
||||
|
||||
#define NV_RM_RPC_NO_MORE_DATA_TO_READ 0
|
||||
#define NV_RM_RPC_MORE_RPC_DATA_TO_READ 1
|
||||
|
||||
//Maximum EXEC_PARTITIONS
|
||||
#define NVC637_CTRL_MAX_EXEC_PARTITIONS_v18_05 8
|
||||
|
||||
//Maximum ECC Addresses
|
||||
#define NV2080_CTRL_ECC_GET_LATEST_ECC_ADDRESSES_MAX_COUNT_v18_04 32
|
||||
|
||||
#define NV2080_CTRL_NVLINK_MAX_LINKS_v15_02 6
|
||||
#define NV2080_CTRL_NVLINK_MAX_LINKS_v1A_18 12
|
||||
#define NV2080_CTRL_NVLINK_MAX_LINKS_v23_04 24
|
||||
|
||||
#define NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE_v15_02 8
|
||||
#define NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE_v1F_0D 9
|
||||
|
||||
#define NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS_v21_02 32
|
||||
#define VM_UUID_SIZE_v21_02 16
|
||||
|
||||
#define NV2080_CTRL_FB_FS_INFO_MAX_QUERIES_v1A_1D 96
|
||||
#define NV2080_CTRL_FB_FS_INFO_MAX_QUERIES_v24_00 120
|
||||
#define NV2080_CTRL_FB_FS_INFO_MAX_QUERY_SIZE_v1A_1D 24
|
||||
#define NV2080_CTRL_GRMGR_GR_FS_INFO_MAX_QUERIES_v1A_1D 96
|
||||
#define NV2080_CTRL_GRMGR_MAX_SMC_IDS_v1A_1D 8
|
||||
|
||||
#define NV0080_CTRL_GR_INFO_MAX_SIZE_1B_04 (0x0000002C)
|
||||
#define NV0080_CTRL_GR_INFO_MAX_SIZE_1C_01 (0x00000030)
|
||||
#define NV0080_CTRL_GR_INFO_MAX_SIZE_1E_02 (0x00000032)
|
||||
#define NV0080_CTRL_GR_INFO_MAX_SIZE_21_01 (0x00000033)
|
||||
#define NV0080_CTRL_GR_INFO_MAX_SIZE_22_02 (0x00000034)
|
||||
#define NV0080_CTRL_GR_INFO_MAX_SIZE_23_00 (0x00000035)
|
||||
#define NV0080_CTRL_GR_INFO_MAX_SIZE_24_02 (0x00000036)
|
||||
#define NV0080_CTRL_GR_INFO_MAX_SIZE_24_03 (0x00000037)
|
||||
#define NV0080_CTRL_GR_INFO_MAX_SIZE_24_07 (0x00000038)
|
||||
#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES_1B_04 8
|
||||
#define NV2080_CTRL_INTERNAL_GR_MAX_SM_v1B_05 256
|
||||
#define NV2080_CTRL_INTERNAL_GR_MAX_SM_v1E_03 240
|
||||
#define NV2080_CTRL_INTERNAL_GR_MAX_GPC_v1B_05 8
|
||||
#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT_v1B_05 0x19
|
||||
#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT_v25_07 0x1a
|
||||
#define NV2080_CTRL_INTERNAL_MAX_TPC_PER_GPC_COUNT_v1C_03 10
|
||||
#define NV2080_CTRL_INTERNAL_GR_MAX_GPC_v1C_03 12
|
||||
#define NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_MAX_v1E_09 32
|
||||
#define NV2080_CTRL_PERF_GPUMON_SAMPLE_COUNT_PERFMON_UTIL_v1F_0E 72
|
||||
#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE__SIZE_v20_04 6
|
||||
#define NVB0CC_MAX_CREDIT_INFO_ENTRIES_v21_08 63
|
||||
#define NV2080_CTRL_MIGRATABLE_OPS_ARRAY_MAX_v21_07 50
|
||||
#define NV2080_CTRL_MAX_PCES_v21_0A 32
|
||||
#define NV2080_CTRL_CE_CAPS_TBL_SIZE_v21_0A 2
|
||||
#define NV2080_CTRL_NVLINK_INBAND_MAX_DATA_SIZE_v26_05 1024
|
||||
|
||||
// Host USM type
|
||||
#define NV_VGPU_CONFIG_USM_TYPE_DEFAULT 0x00000000 /* R-XVF */
|
||||
#define NV_VGPU_CONFIG_USM_TYPE_NVS 0x00000001 /* R-XVF */
|
||||
#define NV_VGPU_CONFIG_USM_TYPE_QUADRO 0x00000002 /* R-XVF */
|
||||
#define NV_VGPU_CONFIG_USM_TYPE_GEFORCE 0x00000003 /* R-XVF */
|
||||
#define NV_VGPU_CONFIG_USM_TYPE_COMPUTE 0x00000004 /* R-XVF */
|
||||
|
||||
#define NV_ALLOC_STRUCTURE_SIZE_v26_00 56
|
||||
|
||||
// Defined this intermediate RM-RPC structure for making RPC call from Guest as
|
||||
// we have the restriction of passing max 4kb of data to plugin and the
|
||||
// NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS is way more than that.
|
||||
// This structure is similar to NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS
|
||||
// RM control structure.
|
||||
// Added passIndex member to identify from which index (in the full RM pid list
|
||||
// on host)onwards the data needs to be read. Caller should initialize passIndex
|
||||
// to NV_RM_RPC_MORE_RPC_DATA_TO_READ, and keep making RPC calls until the
|
||||
// passIndex value is returned as NV_RM_RPC_NO_MORE_DATA_TO_READ by the RPC.
|
||||
typedef struct
|
||||
{
|
||||
NvU32 gpuId;
|
||||
NvU32 passIndex;
|
||||
NvU32 pidTbl[NV0000_GPUACCT_RPC_PID_MAX_QUERY_COUNT];
|
||||
NvU32 pidCount;
|
||||
} NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS_RPC_EX;
|
||||
|
||||
typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG_v03_00[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES];
|
||||
|
||||
typedef NvV32 NvRmctrlCmd;
|
||||
|
||||
struct pte_desc
|
||||
{
|
||||
NvU32 idr:2;
|
||||
NvU32 reserved1:14;
|
||||
NvU32 length:16;
|
||||
union {
|
||||
NvU64 pte; // PTE when IDR==0; PDE when IDR > 0
|
||||
NvU64 pde; // PTE when IDR==0; PDE when IDR > 0
|
||||
} pte_pde[] NV_ALIGN_BYTES(8); // PTE when IDR==0; PDE when IDR > 0
|
||||
};
|
||||
|
||||
typedef struct VGPU_BSP_CAPS
|
||||
{
|
||||
NvU8 capsTbl[NV0080_CTRL_BSP_CAPS_TBL_SIZE];
|
||||
} VGPU_BSP_CAPS;
|
||||
|
||||
#define NV2080_CTRL_GPU_ECC_UNIT_COUNT_v15_01 (0x00000014)
|
||||
#define NV2080_CTRL_GPU_ECC_UNIT_COUNT_v1A_04 (0x00000014)
|
||||
#define NV2080_CTRL_GPU_ECC_UNIT_COUNT_v1C_09 (0x00000016)
|
||||
#define NV2080_CTRL_GPU_ECC_UNIT_COUNT_v20_03 (0x00000018)
|
||||
#define NV2080_CTRL_GPU_ECC_UNIT_COUNT_v24_06 (0x00000019)
|
||||
#define NV2080_CTRL_GPU_ECC_UNIT_COUNT_v26_02 (0x0000001E)
|
||||
|
||||
#define NV2080_ENGINE_TYPE_LAST_v18_01 (0x0000002a)
|
||||
#define NV2080_ENGINE_TYPE_LAST_v1C_09 (0x00000034)
|
||||
|
||||
#define NV2080_ENGINE_TYPE_LAST_v1A_00 (0x2a)
|
||||
|
||||
#define NV2080_ENGINE_TYPE_COPY_SIZE_v1A_0D (10)
|
||||
#define NV2080_ENGINE_TYPE_COPY_SIZE_v22_00 (10)
|
||||
#define NV2080_ENGINE_TYPE_COPY_SIZE_v24_09 (64)
|
||||
|
||||
#define NV2080_CTRL_BUS_INFO_MAX_LIST_SIZE_v1A_0F (0x00000033)
|
||||
#define NV2080_CTRL_BUS_INFO_MAX_LIST_SIZE_v1C_09 (0x00000034)
|
||||
|
||||
//Maximum GMMU_FMT_LEVELS
|
||||
#define GMMU_FMT_MAX_LEVELS_v05_00 5
|
||||
#define GMMU_FMT_MAX_LEVELS_v1A_12 6
|
||||
|
||||
//Maximum MMU FMT sub levels
|
||||
#define MMU_FMT_MAX_SUB_LEVELS_v09_02 2
|
||||
|
||||
//Maximum number of supported TDP clients
|
||||
#define NV2080_CTRL_PERF_RATED_TDP_CLIENT_NUM_CLIENTS_v1A_1F 5
|
||||
|
||||
//Maximum number of SMs whose error state can be read in single call
|
||||
#define NV83DE_CTRL_DEBUG_MAX_SMS_PER_CALL_v16_03 100
|
||||
|
||||
// Workaround for bug 200702083 (#15)
|
||||
#define NV2080_CTRL_FB_INFO_MAX_LIST_SIZE_1A_15 0x2F
|
||||
#define NV2080_CTRL_FB_INFO_MAX_LIST_SIZE_1A_24 0x33
|
||||
#define NV2080_CTRL_FB_INFO_MAX_LIST_SIZE_1E_01 0x35
|
||||
#define NV2080_CTRL_FB_INFO_MAX_LIST_SIZE_1F_0F 0x36
|
||||
#define NV2080_CTRL_FB_INFO_MAX_LIST_SIZE_24_0A 0x37
|
||||
|
||||
#define NV2080_CTRL_PERF_MAX_LIMITS_v1C_0B 0x100
|
||||
|
||||
// Maximum guest address that can we queried in one RPC.
|
||||
// Below number is calculated as per Max. Guest Adrresses and their
|
||||
// state can be returned in a single 4K (RPC Page size) iteration
|
||||
#define GET_PLCABLE_MAX_GUEST_ADDRESS_v1D_05 60
|
||||
|
||||
//
|
||||
// Versioned define for
|
||||
// NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_MAX_RUNQUEUES
|
||||
//
|
||||
#define NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_MAX_RUNQUEUES_v1E_07 2
|
||||
|
||||
// Versioned define for
|
||||
// NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_COUNT
|
||||
#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_COUNT_v1F_08 13
|
||||
|
||||
#define MAX_NVDEC_ENGINES_V1A_07 5
|
||||
#define MAX_NVDEC_ENGINES_V25_00 8
|
||||
#define NV0080_CTRL_MSENC_CAPS_TBL_SIZE_V25_00 4
|
||||
#define NV0080_CTRL_NVJPG_CAPS_TBL_SIZE_V18_0C 9
|
||||
#define NV0080_CTRL_BSP_CAPS_TBL_SIZE_V09_10 8
|
||||
#define NV2080_CTRL_GPU_MAX_CONSTRUCTED_FALCONS_V25_01 0x40
|
||||
#define NV2080_CTRL_CMD_INTERNAL_DEVICE_INFO_MAX_ENTRIES_V25_05 256
|
||||
|
||||
#define NV0080_CTRL_GR_CAPS_TBL_SIZE_v25_0E 23
|
||||
#define NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_v25_0E 5
|
||||
#define RPC_GR_BUFFER_TYPE_GRAPHICS_MAX_v25_0E 13
|
||||
#define NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_COUNT_v1A_07 4
|
||||
|
||||
#define NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v25_11 0x00000041
|
||||
|
||||
#define NV2080_CTRL_BOARDOBJGRP_E255_MAX_OBJECTS_v06_01 (255U)
|
||||
|
||||
typedef struct _GPU_PARTITION_INFO
|
||||
{
|
||||
NvU32 swizzId;
|
||||
NvU32 grEngCount;
|
||||
NvU32 veidCount;
|
||||
NvU32 ceCount;
|
||||
NvU32 gpcCount;
|
||||
NvU32 virtualGpcCount;
|
||||
NvU32 gfxGpcCount;
|
||||
NvU32 gpcsPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS];
|
||||
NvU32 virtualGpcsPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS];
|
||||
NvU32 gfxGpcPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS];
|
||||
NvU32 veidsPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS];
|
||||
NvU32 nvDecCount;
|
||||
NvU32 nvEncCount;
|
||||
NvU32 nvJpgCount;
|
||||
NvU32 partitionFlag;
|
||||
NvU32 smCount;
|
||||
NvU32 nvOfaCount;
|
||||
NvU64 memSize;
|
||||
NvBool bValid;
|
||||
NV2080_CTRL_GPU_PARTITION_SPAN span;
|
||||
NvU64 validCTSIdMask;
|
||||
} GPU_PARTITION_INFO;
|
||||
|
||||
typedef struct _GPU_EXEC_PARTITION_INFO
|
||||
{
|
||||
NvU32 execPartCount;
|
||||
NvU32 execPartId[NVC637_CTRL_MAX_EXEC_PARTITIONS];
|
||||
NVC637_CTRL_EXEC_PARTITIONS_INFO execPartInfo[NVC637_CTRL_MAX_EXEC_PARTITIONS];
|
||||
} GPU_EXEC_PARTITION_INFO;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
NvBool bGpuSupportsFabricProbe;
|
||||
} VGPU_P2P_CAPABILITY_PARAMS;
|
||||
|
||||
typedef struct _GPU_EXEC_SYSPIPE_INFO {
|
||||
NvU32 execPartCount;
|
||||
NvU32 execPartId[NVC637_CTRL_MAX_EXEC_PARTITIONS];
|
||||
NvU32 syspipeId[NVC637_CTRL_MAX_EXEC_PARTITIONS];
|
||||
} GPU_EXEC_SYSPIPE_INFO;
|
||||
|
||||
typedef struct _VGPU_STATIC_PROPERTIES
|
||||
{
|
||||
NvU32 encSessionStatsReportingState;
|
||||
NvBool bProfilingTracingEnabled;
|
||||
NvBool bDebuggingEnabled;
|
||||
NvU32 channelCount;
|
||||
NvBool bPblObjNotPresent; //Valid only in case of GA100 SRIOV Heavy
|
||||
NvU64 vmmuSegmentSize;
|
||||
} VGPU_STATIC_PROPERTIES;
|
||||
|
||||
struct _vgpu_static_info
|
||||
{
|
||||
NvU32 gfxpBufferSize[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
|
||||
NvU32 gfxpBufferAlignment[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
|
||||
NV2080_CTRL_FB_GET_LTC_INFO_FOR_FBP_PARAMS fbLtcInfoForFbp[MAX_FBPS];
|
||||
NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_PARAMS mcStaticIntrTable;
|
||||
NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS grZcullInfo;
|
||||
NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS fifoDeviceInfoTable[MAX_ITERATIONS_DEVICE_INFO_TABLE];
|
||||
NV2080_CTRL_FB_GET_DYNAMIC_OFFLINED_PAGES_PARAMS fbDynamicBlacklistedPages[MAX_ITERATIONS_DYNAMIC_BLACKLIST];
|
||||
NV0080_CTRL_FIFO_GET_LATENCY_BUFFER_SIZE_PARAMS fifoLatencyBufferSize[NV2080_ENGINE_TYPE_LAST];
|
||||
NV2080_CTRL_CE_GET_CAPS_V2_PARAMS ceCaps[NV2080_ENGINE_TYPE_COPY_SIZE];
|
||||
NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS_PARAMS nvlinkCaps;
|
||||
NV2080_CTRL_BUS_GET_INFO_V2_PARAMS busGetInfoV2;
|
||||
NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS grSmIssueRateModifier;
|
||||
NV2080_CTRL_CMD_BUS_GET_PCIE_SUPPORTED_GPU_ATOMICS_PARAMS pcieSupportedGpuAtomics;
|
||||
NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS ceGetAllCaps;
|
||||
NV2080_CTRL_CMD_BUS_GET_C2C_INFO_PARAMS c2cInfo;
|
||||
NV0000_CTRL_SYSTEM_GET_VGX_SYSTEM_INFO_PARAMS vgxSystemInfo;
|
||||
NVA080_CTRL_VGPU_GET_CONFIG_PARAMS vgpuConfig;
|
||||
NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo;
|
||||
NvU64 engineList;
|
||||
NvU32 pcieGpuLinkCaps;
|
||||
NvBool bFlaSupported;
|
||||
NV2080_CTRL_FLA_GET_RANGE_PARAMS flaInfo;
|
||||
NvBool bPerRunlistChannelRamEnabled;
|
||||
NvU32 subProcessIsolation;
|
||||
VGPU_STATIC_PROPERTIES vgpuStaticProperties;
|
||||
NvU64 maxSupportedPageSize; // Only used pre-SRIOV/SRIOV-heavy
|
||||
GPU_PARTITION_INFO gpuPartitionInfo; // Default (Admin created) EXEC-I PARTITION INFO
|
||||
NvBool bC2CLinkUp;
|
||||
NvBool bSelfHostedMode;
|
||||
NvBool bLocalEgmEnabled;
|
||||
NvU32 localEgmPeerId;
|
||||
NvU32 ceFaultMethodBufferDepth;
|
||||
NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE];
|
||||
NvBool bPerSubCtxheaderSupported;
|
||||
NV2080_CTRL_INTERNAL_STATIC_GR_GET_INFO_PARAMS grInfoParams;
|
||||
NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS ctxBuffInfo;
|
||||
NV2080_CTRL_INTERNAL_STATIC_GR_GET_PPC_MASKS_PARAMS ppcMaskParams;
|
||||
NV2080_CTRL_INTERNAL_STATIC_GR_GET_GLOBAL_SM_ORDER_PARAMS globalSmOrder;
|
||||
NV2080_CTRL_INTERNAL_STATIC_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS smIssueRateModifier;
|
||||
NV2080_CTRL_INTERNAL_STATIC_GR_GET_FLOORSWEEPING_MASKS_PARAMS floorsweepMaskParams;
|
||||
NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE_PARAMS fecsRecordSize;
|
||||
NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES_PARAMS fecsTraceDefines;
|
||||
NV2080_CTRL_INTERNAL_STATIC_GR_GET_PDB_PROPERTIES_PARAMS pdbTableParams;
|
||||
NV2080_CTRL_INTERNAL_STATIC_GR_GET_ROP_INFO_PARAMS ropInfoParams;
|
||||
NV2080_CTRL_INTERNAL_STATIC_GR_GET_ZCULL_INFO_PARAMS zcullInfoParams;
|
||||
NV2080_CTRL_GPU_GET_COMPUTE_PROFILES_PARAMS ciProfiles;
|
||||
NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_PARAMS mcEngineNotificationIntrVectors;
|
||||
NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS eccStatus;
|
||||
NvBool guestManagedHwAlloc;
|
||||
NvU8 jpegCaps[NV0080_CTRL_NVJPG_CAPS_TBL_SIZE];
|
||||
NV0080_CTRL_MSENC_GET_CAPS_V2_PARAMS nvencCaps;
|
||||
VGPU_BSP_CAPS vgpuBspCaps[NV2080_CTRL_CMD_INTERNAL_MAX_BSPS];
|
||||
NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS constructedFalconInfo;
|
||||
GPU_EXEC_PARTITION_INFO execPartitionInfo;
|
||||
NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo;
|
||||
NvU64 fbTaxLength;
|
||||
NvU64 fbLength;
|
||||
NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams;
|
||||
NvU32 grBufferSize[RPC_GR_BUFFER_TYPE_GRAPHICS_MAX];
|
||||
NvU32 fbioMask;
|
||||
NvBool bSplitVasBetweenServerClientRm;
|
||||
NvU8 adapterName[NV2080_GPU_MAX_NAME_STRING_LENGTH];
|
||||
NvU16 adapterName_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH];
|
||||
NvU8 shortGpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
|
||||
NvBool poisonFuseEnabled;
|
||||
NvBool bAtsSupported;
|
||||
NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS deviceInfoTable;
|
||||
NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS memsysStaticConfig;
|
||||
VGPU_P2P_CAPABILITY_PARAMS p2pCaps;
|
||||
NvU32 fbBusWidth;
|
||||
NvU32 fbpMask;
|
||||
NvU32 ltcMask;
|
||||
NvU32 ltsCount;
|
||||
NvU32 sizeL2Cache;
|
||||
NV9096_CTRL_GET_ZBC_CLEAR_TABLE_SIZE_PARAMS zbcTableSizes[NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_COUNT];
|
||||
NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_PARAMS busGetPcieReqAtomicsCaps;
|
||||
NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS masterGetVfErrCntIntMsk;
|
||||
GPU_EXEC_SYSPIPE_INFO execSyspipeInfo;
|
||||
};
|
||||
|
||||
typedef struct _vgpu_static_info VGPU_STATIC_INFO, VGPU_STATIC_INFO2;
|
||||
typedef struct _vgpu_static_info VGPU_STATIC_DATA;
|
||||
|
||||
typedef NV2080_CTRL_FB_GET_LTC_INFO_FOR_FBP_PARAMS VGPU_FB_GET_LTC_INFO_FOR_FBP[MAX_FBPS];
|
||||
typedef VGPU_BSP_CAPS VGPU_BSP_GET_CAPS[NV2080_CTRL_CMD_INTERNAL_MAX_BSPS];
|
||||
typedef NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS VGPU_FIFO_GET_DEVICE_INFO_TABLE[MAX_ITERATIONS_DEVICE_INFO_TABLE];
|
||||
typedef NV2080_CTRL_FB_GET_DYNAMIC_OFFLINED_PAGES_PARAMS VGPU_FB_GET_DYNAMIC_BLACKLISTED_PAGES[MAX_ITERATIONS_DYNAMIC_BLACKLIST];
|
||||
typedef NV0080_CTRL_FIFO_GET_LATENCY_BUFFER_SIZE_PARAMS VGPU_GET_LATENCY_BUFFER_SIZE[NV2080_ENGINE_TYPE_LAST];
|
||||
typedef NV2080_CTRL_CE_GET_CAPS_V2_PARAMS VGPU_CE_GET_CAPS_V2[NV2080_ENGINE_TYPE_COPY_SIZE];
|
||||
|
||||
typedef struct GSP_FIRMWARE GSP_FIRMWARE;
|
||||
|
||||
ct_assert(NV2080_CTRL_GPU_ECC_UNIT_COUNT == NV2080_CTRL_GPU_ECC_UNIT_COUNT_v26_02);
|
||||
ct_assert(NV2080_ENGINE_TYPE_LAST == 0x40);
|
||||
ct_assert(NV2080_CTRL_BUS_INFO_MAX_LIST_SIZE == NV2080_CTRL_BUS_INFO_MAX_LIST_SIZE_v1C_09);
|
||||
ct_assert(NV2080_CTRL_FB_FS_INFO_MAX_QUERIES == NV2080_CTRL_FB_FS_INFO_MAX_QUERIES_v24_00);
|
||||
ct_assert(NV2080_CTRL_FB_FS_INFO_MAX_QUERY_SIZE == NV2080_CTRL_FB_FS_INFO_MAX_QUERY_SIZE_v1A_1D);
|
||||
ct_assert(NV2080_CTRL_GRMGR_GR_FS_INFO_MAX_QUERIES == NV2080_CTRL_GRMGR_GR_FS_INFO_MAX_QUERIES_v1A_1D);
|
||||
ct_assert(NV2080_CTRL_GRMGR_MAX_SMC_IDS == NV2080_CTRL_GRMGR_MAX_SMC_IDS_v1A_1D);
|
||||
ct_assert((NV0080_CTRL_GR_INFO_INDEX_MAX + 1) == NV0080_CTRL_GR_INFO_MAX_SIZE_24_07);
|
||||
ct_assert(NV2080_CTRL_INTERNAL_GR_MAX_ENGINES == NV2080_CTRL_INTERNAL_GR_MAX_ENGINES_1B_04);
|
||||
ct_assert(NV2080_CTRL_INTERNAL_GR_MAX_SM == NV2080_CTRL_INTERNAL_GR_MAX_SM_v1E_03);
|
||||
ct_assert(NV2080_CTRL_INTERNAL_GR_MAX_GPC == NV2080_CTRL_INTERNAL_GR_MAX_GPC_v1C_03);
|
||||
ct_assert(NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT ==
|
||||
NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT_v25_07);
|
||||
ct_assert(NV2080_CTRL_INTERNAL_MAX_TPC_PER_GPC_COUNT == NV2080_CTRL_INTERNAL_MAX_TPC_PER_GPC_COUNT_v1C_03);
|
||||
ct_assert(NV2080_CTRL_PERF_RATED_TDP_CLIENT_NUM_CLIENTS == NV2080_CTRL_PERF_RATED_TDP_CLIENT_NUM_CLIENTS_v1A_1F);
|
||||
ct_assert(NV83DE_CTRL_DEBUG_MAX_SMS_PER_CALL == NV83DE_CTRL_DEBUG_MAX_SMS_PER_CALL_v16_03);
|
||||
ct_assert(VGPU_RPC_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PER_RPC_v21_06 < NV83DE_CTRL_DEBUG_MAX_SMS_PER_CALL_v16_03);
|
||||
ct_assert(NV2080_CTRL_FB_INFO_MAX_LIST_SIZE == NV2080_CTRL_FB_INFO_MAX_LIST_SIZE_24_0A);
|
||||
ct_assert(NV2080_CTRL_GPU_MAX_SMC_IDS == 8);
|
||||
ct_assert(NV2080_GPU_MAX_GID_LENGTH == 0x000000100);
|
||||
ct_assert(NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES == 16);
|
||||
ct_assert(NV2080_GPU_MAX_NAME_STRING_LENGTH == 0x0000040);
|
||||
ct_assert(NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_MAX_ENGINES == 256);
|
||||
ct_assert(NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_MAX == NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_MAX_v1E_09);
|
||||
ct_assert(NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_DEVICES == 256);
|
||||
ct_assert(NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES == 32);
|
||||
ct_assert(NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES == 16);
|
||||
ct_assert(NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA == 2);
|
||||
ct_assert(NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN == 16);
|
||||
ct_assert(NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_ENTRIES == 64);
|
||||
ct_assert(NV2080_CTRL_CE_CAPS_TBL_SIZE == NV2080_CTRL_CE_CAPS_TBL_SIZE_v21_0A);
|
||||
ct_assert(NV2080_ENGINE_TYPE_COPY_SIZE == NV2080_ENGINE_TYPE_COPY_SIZE_v24_09);
|
||||
ct_assert(NV2080_ENGINE_TYPE_NVENC_SIZE <= 4);
|
||||
ct_assert(NV2080_ENGINE_TYPE_NVDEC_SIZE == 8);
|
||||
ct_assert(NV2080_ENGINE_TYPE_NVJPEG_SIZE == 8);
|
||||
ct_assert(NV2080_ENGINE_TYPE_GR_SIZE == 8);
|
||||
ct_assert(NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE == NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE_v1F_0D);
|
||||
ct_assert(NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS == NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS_v21_02);
|
||||
ct_assert(VM_UUID_SIZE == VM_UUID_SIZE_v21_02);
|
||||
ct_assert(NV2080_CTRL_MAX_PCES == NV2080_CTRL_MAX_PCES_v21_0A);
|
||||
ct_assert(NV0080_CTRL_MSENC_CAPS_TBL_SIZE_V25_00 == NV0080_CTRL_MSENC_CAPS_TBL_SIZE);
|
||||
ct_assert(MAX_NVDEC_ENGINES_V1A_07 <= NV2080_CTRL_CMD_INTERNAL_MAX_BSPS);
|
||||
ct_assert(MAX_NVDEC_ENGINES_V25_00 == NV2080_CTRL_CMD_INTERNAL_MAX_BSPS);
|
||||
ct_assert(NV0080_CTRL_NVJPG_CAPS_TBL_SIZE_V18_0C == NV0080_CTRL_NVJPG_CAPS_TBL_SIZE);
|
||||
ct_assert(NV0080_CTRL_BSP_CAPS_TBL_SIZE_V09_10 == NV0080_CTRL_BSP_CAPS_TBL_SIZE);
|
||||
ct_assert(NV2080_CTRL_GPU_MAX_CONSTRUCTED_FALCONS_V25_01 == NV2080_CTRL_GPU_MAX_CONSTRUCTED_FALCONS);
|
||||
ct_assert(NV2080_CTRL_CMD_INTERNAL_DEVICE_INFO_MAX_ENTRIES_V25_05 == NV2080_CTRL_CMD_INTERNAL_DEVICE_INFO_MAX_ENTRIES);
|
||||
ct_assert(NV0080_CTRL_GR_CAPS_TBL_SIZE_v25_0E == NV0080_CTRL_GR_CAPS_TBL_SIZE);
|
||||
ct_assert(NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_v25_0E == NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL);
|
||||
ct_assert(RPC_GR_BUFFER_TYPE_GRAPHICS_MAX_v25_0E == RPC_GR_BUFFER_TYPE_GRAPHICS_MAX);
|
||||
ct_assert(NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_COUNT_v1A_07 == NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_COUNT);
|
||||
ct_assert(NVC637_CTRL_MAX_EXEC_PARTITIONS_v18_05 == NVC637_CTRL_MAX_EXEC_PARTITIONS);
|
||||
ct_assert(NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE_v25_11 == NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE);
|
||||
|
||||
#endif /*_RPC_SDK_STRUCTURES_H_*/
|
||||
272
src/nvidia/inc/kernel/vgpu/vgpu_events.h
Normal file
272
src/nvidia/inc/kernel/vgpu/vgpu_events.h
Normal file
@@ -0,0 +1,272 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2008-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
//******************************************************************************
|
||||
//
|
||||
// Declarations for the VGPU event module.
|
||||
//
|
||||
// Description:
|
||||
// This module declares the VGPU event interface functions/macros.
|
||||
//
|
||||
//******************************************************************************
|
||||
|
||||
#ifndef VGPU_EVENTS_H
|
||||
#define VGPU_EVENTS_H
|
||||
|
||||
#include "rmconfig.h"
|
||||
|
||||
#include "ctrl/ctrl2080/ctrl2080bios.h"
|
||||
#include "ctrl/ctrl2080/ctrl2080fb.h"
|
||||
#include "ctrl/ctrl2080/ctrl2080gpu.h"
|
||||
#include "ctrl/ctrl2080/ctrl2080gr.h"
|
||||
#include "ctrl/ctrl0080/ctrl0080nvjpg.h"
|
||||
#include "vgpu/rpc_headers.h"
|
||||
|
||||
#include "vgpu/sdk-structures.h"
|
||||
#include "kernel/gpu/mig_mgr/kernel_mig_manager.h"
|
||||
#include "vgpu/dev_vgpu.h"
|
||||
|
||||
typedef MC_ENGINE_BITVECTOR *PMC_ENGINE_BITVECTOR;
|
||||
typedef struct HOST_VGPU_DEVICE HOST_VGPU_DEVICE;
|
||||
typedef struct KERNEL_HOST_VGPU_DEVICE KERNEL_HOST_VGPU_DEVICE;
|
||||
typedef struct _object_vgpu OBJVGPU, *POBJVGPU;
|
||||
typedef struct Device Device;
|
||||
|
||||
// Create and destroy OBJVGPU *object
|
||||
NV_STATUS vgpuCreateObject(OBJGPU *pGpu);
|
||||
void vgpuDestructObject(OBJGPU *pGpu);
|
||||
|
||||
// Overwrite registry keys
|
||||
void vgpuInitRegistryOverWrite(OBJGPU *pGpu);
|
||||
|
||||
// Get the device pointer from the calling context
|
||||
Device *vgpuGetCallingContextDevice(OBJGPU *pGpu);
|
||||
|
||||
// Get the host VGPU device pointer from the calling context
|
||||
NV_STATUS vgpuGetCallingContextHostVgpuDevice(OBJGPU *pGpu, HOST_VGPU_DEVICE **ppHostVgpuDevice);
|
||||
NV_STATUS vgpuGetCallingContextKernelHostVgpuDevice(OBJGPU *pGpu, KERNEL_HOST_VGPU_DEVICE **ppKernelHostVgpuDevice);
|
||||
|
||||
// Get the GFID for from the VGPU device of the calling context
|
||||
NV_STATUS vgpuGetCallingContextGfid(OBJGPU *pGpu, NvU32 *pGfid);
|
||||
|
||||
// Check is the calling context if VGPU plugin
|
||||
NV_STATUS vgpuIsCallingContextPlugin(OBJGPU *pGpu, NvBool *pIsCallingContextPlugin);
|
||||
|
||||
// Get the GFID from DeviceInfo
|
||||
NV_STATUS vgpuGetGfidFromDeviceInfo(OBJGPU *pGpu, Device *pDevice, NvU32 *pGfid);
|
||||
|
||||
// Check if a VGPU event is pending
|
||||
NvBool vgpuGetPendingEvent(OBJGPU *pGpu, THREAD_STATE_NODE *pThreadState);
|
||||
|
||||
// Service VGPU events
|
||||
void vgpuService(OBJGPU *pGpu);
|
||||
|
||||
#define GPU_GET_VGPU(pGpu) (NvVGPU_Table[gpuGetInstance(pGpu)])
|
||||
|
||||
#define NV_VGPU_MAX_INSTANCES 16
|
||||
|
||||
extern OBJVGPU *NvVGPU_Table[NV_VGPU_MAX_INSTANCES];
|
||||
|
||||
#define NV_VGPU_RPC_TIMEOUT_USEC(pGpu) (10 * 1000000)
|
||||
|
||||
struct _vgpu_last_surface_info
|
||||
{
|
||||
/* Stores last primary surface information in displayless mode */
|
||||
NVA080_CTRL_VGPU_DISPLAY_SET_SURFACE_PROPERTIES last_surface;
|
||||
/* cached headClient */
|
||||
NvHandle hClient;
|
||||
};
|
||||
typedef struct _vgpu_last_surface_info VGPU_LAST_SURFACE_INFO;
|
||||
|
||||
typedef struct vgpu_sysmem_segment_node {
|
||||
NvU64 seg_start_address; // start page of the segment
|
||||
NvU64 next_free_address; // Next free page of the segment
|
||||
NvU32 page_count; // total number of pages in segment
|
||||
NvU64 *pfn_list; // list of sysmem pages to which the segment is mapped
|
||||
ListNode vgpuSysmemSegmentNode;
|
||||
} VGPU_SYSMEM_SEGMENT_NODE, *VGPU_SYSMEM_SEGMENT_NODE_P;
|
||||
|
||||
MAKE_INTRUSIVE_LIST(VGPU_SYSMEM_SEGMENT_NODE_LIST, VGPU_SYSMEM_SEGMENT_NODE, vgpuSysmemSegmentNode);
|
||||
|
||||
typedef struct
|
||||
{
|
||||
MEMORY_DESCRIPTOR *pMemDesc;
|
||||
NvU32 *pMemory;
|
||||
void *pPriv;
|
||||
NvU64 pfn;
|
||||
} VGPU_MEM_INFO;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
VGPU_MEM_INFO mem;
|
||||
NvU32 get;
|
||||
NvU32 put;
|
||||
} VGPU_MEM_RING_INFO;
|
||||
|
||||
//
|
||||
// Structure to hold hibernation state data
|
||||
//
|
||||
typedef struct vgpu_hibernation_data
|
||||
{
|
||||
NvU32 size;
|
||||
NvU8 *buffer;
|
||||
}VGPU_HIBERNATION_DATA;
|
||||
|
||||
//
|
||||
// per-vGPU device instance
|
||||
//
|
||||
struct _object_vgpu
|
||||
{
|
||||
VGPU_MEM_RING_INFO eventRing;
|
||||
VGPU_MEM_RING_INFO sendRing;
|
||||
VGPU_MEM_RING_INFO recvRing;
|
||||
VGPU_MEM_INFO sharedMemory;
|
||||
|
||||
VGPU_MEM_INFO gspCtrlBufInfo;
|
||||
VGPU_MEM_INFO gspResponseBufInfo;
|
||||
VGPU_MEM_INFO gspMessageBuf;
|
||||
|
||||
// CPU plugin shared memory buffer
|
||||
NvU32 *shared_memory;
|
||||
|
||||
// Shared memory format for Guest RM
|
||||
VGPU_GSP_CTRL_BUF_RM *gspCtrlBuf;
|
||||
VGPU_GSP_RESPONSE_BUF_RM *gspResponseBuf;
|
||||
|
||||
NvBool bGspPlugin;
|
||||
NvBool bIsBar2Physical;
|
||||
// Start offset of FB to use in Physical BAR2 mode
|
||||
NvU64 allocFbOffsetBar2Physical;
|
||||
|
||||
// Message sequence counter
|
||||
NvU32 sequence_base;
|
||||
NvU32 sequence_gsp_request;
|
||||
|
||||
/* Stores last primary surface information in displayless mode */
|
||||
VGPU_LAST_SURFACE_INFO last_surface_info;
|
||||
|
||||
/* Flag indicates whether VNC support enabled */
|
||||
NvBool bVncSupported;
|
||||
/* Flag indicates current VNC state */
|
||||
NvBool bVncConnected;
|
||||
/* Size of the staging buffer size */
|
||||
NvU32 staging_buffer_size;
|
||||
/* Stores the timestamp of the latest FB usage update to host */
|
||||
NvU64 last_fb_update_timestamp;
|
||||
/* Stores the value of the latest FB usage update to host */
|
||||
NvU64 last_fb_used_value;
|
||||
/* GET/PUT pointer are inside the rings */
|
||||
NvBool gpInRing;
|
||||
/* Get static information from host RM or plugin */
|
||||
VGPU_STATIC_INFO _vgpuStaticInfo; // Always use GPU_GET_STATIC_INFO accessor.
|
||||
/* Stores whether interrupt using shared memory is active */
|
||||
NvBool shmInterruptActive;
|
||||
/* Cache NV_VGPU_CONFIG_USM_TYPE value in the Guest */
|
||||
NvU32 vgpuConfigUsmType;
|
||||
/* Cache ECC supported value in the Guest */
|
||||
NvBool bECCSupported;
|
||||
/* Cache ECC status value in the Guest */
|
||||
NvBool bECCEnabled;
|
||||
/* RPC fully initialized */
|
||||
NvBool bRpcInitialized;
|
||||
/* GSP buffers initialized */
|
||||
NvBool bGspBuffersInitialized;
|
||||
|
||||
/* RPC HAL objects */
|
||||
struct OBJRPC *pRpc;
|
||||
|
||||
/* Stores whether page retirement is enabled or not */
|
||||
NvBool page_retirement_enabled;
|
||||
/* GR engine index */
|
||||
NvU32 grIndex;
|
||||
|
||||
VGPU_SYSMEM_SEGMENT_NODE_LIST listVgpuSysmemSegments;
|
||||
|
||||
VGPU_HIBERNATION_DATA hibernationData;
|
||||
|
||||
};
|
||||
|
||||
//
|
||||
// (1) One bit represents one 4K sysmep page = 2^12
|
||||
// (2) One 4K page of bit map will contain 32K bits of (1). This represents 128MB of sysmem = 2^27
|
||||
// (3) One pass transfers 512 PFN of (2) from guest to plugin. This represents 64GB of sysmem = 2^36
|
||||
// (4) The provision is to have 16384 passes (NV_VGPU_SYSMEM_BITMAP_PASS_ID).
|
||||
// That will cover up to 1 PB of guest PA.
|
||||
//
|
||||
#define MAX_PFNS_PER_4K_PAGE 512
|
||||
#define MAX_PFNS_PER_SYSMEM_BITMAP_NODE (16 * 1024 * 1024) // Max PFNs that can be tracked per node (64 GB memory)
|
||||
#define MAX_SYSMEM_PFN_BITMAP_NODE 16384
|
||||
|
||||
#define _UINT64_SIZE 64
|
||||
#define _UINT64_SHIFT 6
|
||||
|
||||
#define PAGE_BITIDX64(n) ((n) & (_UINT64_SIZE - 1))
|
||||
#define PAGE_MAPIDX64(n) ((n) >> _UINT64_SHIFT)
|
||||
|
||||
#define SETBIT64(bits, offset) (bits | NVBIT64(offset))
|
||||
#define CLEARBIT64(bits, offset) (bits & ~NVBIT64(offset))
|
||||
|
||||
typedef struct vgpu_sysmem_pfn_bitmap_node {
|
||||
NvU64 *sysmemPfnMap; // BitMap to track the sysmem PFN allocation, shared with vGPU plugin, Size = 64GB
|
||||
NvU64 *sysmemPfnMap_priv;
|
||||
MEMORY_DESCRIPTOR *pMemDesc_sysmemPfnMap; // Describe shared page
|
||||
NvU64 nodeStartPfn;
|
||||
NvU64 nodeEndPfn;
|
||||
NvU64 sizeInBytes;
|
||||
NvU32 index;
|
||||
ListNode listNode; // For intrusive lists
|
||||
} VGPU_SYSMEM_PFN_BITMAP_NODE, * VGPU_SYSMEM_PFN_BITMAP_NODE_P;
|
||||
|
||||
MAKE_INTRUSIVE_LIST(VGPU_SYSMEM_PFN_BITMAP_NODE_LIST, VGPU_SYSMEM_PFN_BITMAP_NODE, listNode);
|
||||
|
||||
typedef struct vgpu_sysmem_pfn_info {
|
||||
NvU64 guestMaxPfn; // Max guest PFN; Initialized for 64 GB RAM
|
||||
NvU64 sizeInBytes;
|
||||
NvU16 *sysmemPfnRefCount; // An array to store the Guest PFN ref count, Size = guestMaxPfn
|
||||
NvBool bSysmemPfnInfoInitialized;
|
||||
|
||||
NvU64 *sysmemPfnRing; // Ring to pass the PFNs of 4K chunks of shared memory
|
||||
NvU64 *sysmemPfnRing_priv;
|
||||
NvU64 sysmemPfnRing_pfn;
|
||||
MEMORY_DESCRIPTOR *pMemDesc_sysmemPfnRing; // Describe shared page
|
||||
|
||||
VGPU_SYSMEM_PFN_BITMAP_NODE_LIST listVgpuSysmemPfnBitmapHead;
|
||||
VGPU_SYSMEM_PFN_BITMAP_NODE_P bitmapNodes[MAX_SYSMEM_PFN_BITMAP_NODE];
|
||||
} VGPU_SYSMEM_PFN_INFO;
|
||||
|
||||
typedef VGPU_SYSMEM_PFN_INFO* VGPU_SYSMEM_PFN_INFO_P;
|
||||
|
||||
extern VGPU_SYSMEM_PFN_INFO vgpuSysmemPfnInfo;
|
||||
|
||||
NV_STATUS nv0000CtrlCmdDiagProfileRpc(RmCtrlParams *pRmCtrlParams);
|
||||
NV_STATUS nv0000CtrlCmdDiagDumpRpc(RmCtrlParams *pRmCtrlParams);
|
||||
|
||||
#define shm32(x) (*(pVGpu->shared_memory + ((x) / sizeof(NvU32))))
|
||||
|
||||
// Initialize and free event infrastructure
|
||||
NV_STATUS _setupEventInfrastructure(OBJGPU *pGpu, OBJVGPU *pVGpu);
|
||||
NV_STATUS _teardownEventInfrastructure(OBJGPU *pGpu, OBJVGPU *pVGpu);
|
||||
NV_STATUS _setupGspEventInfrastructure(OBJGPU *pGpu, OBJVGPU *pVGpu);
|
||||
void _teardownGspEventInfrastructure(OBJGPU *pGpu, OBJVGPU *pVGpu);
|
||||
|
||||
#endif // VGPU_EVENTS_H
|
||||
62
src/nvidia/inc/kernel/vgpu/vgpu_guest_pma_scrubber.h
Normal file
62
src/nvidia/inc/kernel/vgpu/vgpu_guest_pma_scrubber.h
Normal file
@@ -0,0 +1,62 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
//******************************************************************************
|
||||
//
|
||||
// Declarations for VGPU PMA Guest Scrubber Shared memory structures.
|
||||
//
|
||||
// Description:
|
||||
// This module declares the shared memory structures for VGPU PMA Guest
|
||||
// Scrubber.
|
||||
//
|
||||
//******************************************************************************
|
||||
|
||||
#ifndef __vgpu_pma_guest_scrubber_h__
|
||||
#define __vgpu_pma_guest_scrubber_h__
|
||||
|
||||
#include "gpu/mem_mgr/rm_page_size.h"
|
||||
|
||||
#define VGPU_GUEST_PMA_MAX_SCRUB_ITEMS 4096
|
||||
#define VGPU_GUEST_PMA_SCRUBBER_SHARED_BUFFER_SIZE ((sizeof(VGPU_SCRUB_NODE) * VGPU_GUEST_PMA_MAX_SCRUB_ITEMS) + RM_PAGE_SIZE)
|
||||
#define VGPU_GUEST_PMA_SCRUBBER_SHARED_BUFFER_PFNS (NV_DIV_AND_CEIL(VGPU_GUEST_PMA_SCRUBBER_SHARED_BUFFER_SIZE, RM_PAGE_SIZE))
|
||||
|
||||
typedef struct VGPU_SCRUB_NODE {
|
||||
volatile NvU32 workId; // The 32 bit ID assigned to each work
|
||||
volatile NvU64 base; // The base address from which the scrub to start
|
||||
volatile NvU64 size; // The size of a scrub work
|
||||
} VGPU_SCRUB_NODE;
|
||||
|
||||
typedef struct VGPU_GUEST_PMA_SCRUB_BUFFER_RING_HEADER {
|
||||
volatile NvU32 lastSubmittedWorkId;
|
||||
volatile NvU32 lastSWSemaphoreDone;
|
||||
volatile NvU64 scrubberGetIdx;
|
||||
volatile NvU64 scrubberPutIdx;
|
||||
} VGPU_GUEST_PMA_SCRUB_BUFFER_RING_HEADER;
|
||||
|
||||
typedef struct VGPU_GUEST_PMA_SCRUB_BUFFER_RING {
|
||||
VGPU_GUEST_PMA_SCRUB_BUFFER_RING_HEADER *pScrubBuffRingHeader;
|
||||
VGPU_SCRUB_NODE *pScrubList;
|
||||
} VGPU_GUEST_PMA_SCRUB_BUFFER_RING;
|
||||
|
||||
#endif // __vgpu_pma_guest_scrubber_h__
|
||||
|
||||
52
src/nvidia/inc/kernel/vgpu/vgpu_util.h
Normal file
52
src/nvidia/inc/kernel/vgpu/vgpu_util.h
Normal file
@@ -0,0 +1,52 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
//******************************************************************************
|
||||
//
|
||||
// Declarations for VGPU util functions.
|
||||
//
|
||||
//******************************************************************************
|
||||
|
||||
#ifndef __vgpu_util_h__
|
||||
#define __vgpu_util_h__
|
||||
|
||||
#include "gpu/gpu.h"
|
||||
#include "gpu/mem_mgr/mem_desc.h"
|
||||
|
||||
#include "vgpu/vgpu_events.h"
|
||||
|
||||
#include "vgpu/dev_vgpu.h"
|
||||
|
||||
NV_STATUS vgpuAllocSysmemPfnBitMapNode(OBJGPU *pGpu, VGPU_SYSMEM_PFN_BITMAP_NODE_P *node, NvU32 index);
|
||||
NvU64 vgpuGspSysmemPfnMakeBufferAddress(MEMORY_DESCRIPTOR *pMemDesc, NvU64 pfn);
|
||||
void vgpuFreeSysmemPfnBitMapNode(VGPU_SYSMEM_PFN_BITMAP_NODE_P node);
|
||||
NV_STATUS vgpuUpdateSysmemPfnBitMap(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvBool bAlloc);
|
||||
|
||||
static inline NvBool vgpuIsGuestManagedHwAlloc(OBJGPU *pGpu)
|
||||
{
|
||||
VGPU_STATIC_INFO *pVSI = GPU_GET_STATIC_INFO(pGpu);
|
||||
|
||||
return pVSI && pVSI->guestManagedHwAlloc;
|
||||
}
|
||||
|
||||
#endif // __vgpu_util_h__
|
||||
112
src/nvidia/inc/kernel/vgpu/vgpu_version.h
Normal file
112
src/nvidia/inc/kernel/vgpu/vgpu_version.h
Normal file
@@ -0,0 +1,112 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2008-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __vgpu_vgpu_version_h__
|
||||
#define __vgpu_vgpu_version_h__
|
||||
|
||||
/* VGX interface version */
|
||||
#define NV_RPC_VERSION_NUMBER_MAJOR 31:24 /* R---D */
|
||||
#define NV_RPC_VERSION_NUMBER_MINOR 23:16 /* R---D */
|
||||
|
||||
#define RPC_VERSION_FROM_VGX_VERSION(major, minor) ( DRF_NUM(_RPC, _VERSION_NUMBER, _MAJOR, major) | \
|
||||
DRF_NUM(_RPC, _VERSION_NUMBER, _MINOR, minor))
|
||||
#define VGX_MAJOR_VERSION_NUMBER 0x26
|
||||
#define VGX_MINOR_VERSION_NUMBER 0x05
|
||||
|
||||
#define VGX_MAJOR_VERSION_NUMBER_VGPU_12_0 0x1A
|
||||
#define VGX_MINOR_VERSION_NUMBER_VGPU_12_0 0x18
|
||||
#define VGX_MAJOR_VERSION_NUMBER_VGPU_13_0 0x1C
|
||||
#define VGX_MINOR_VERSION_NUMBER_VGPU_13_0 0x0A
|
||||
#define VGX_MAJOR_VERSION_NUMBER_VGPU_16_0 0x23
|
||||
#define VGX_MAJOR_VERSION_NUMBER_VGPU_17_0 0x25
|
||||
|
||||
/**
|
||||
* This macro have the mapping between internal (RPC) and external version
|
||||
* and is required to be updated appropriately with every new internal version.
|
||||
*
|
||||
* In case a new external version is added, a new entry representing the mapping
|
||||
* for the external version should be appended. Please note that the external
|
||||
* version should be updated when both of the following are true:
|
||||
* 1. The new RPC version update cause a break in migration compatibility.
|
||||
* 2. This is the first break in migration compatibility after a release.
|
||||
*/
|
||||
#define NV_VGPU_GRIDSW_INTERNAL_TO_EXTERNAL_VERSION_MAPPING \
|
||||
{{0x26, 0x0}, {0x26, 0x05}, {0x15, 0x1}}, \
|
||||
{{0x25, 0x0}, {0x25, 0x1B}, {0x14, 0x1}}, \
|
||||
{{0x24, 0x0}, {0x24, 0x0A}, {0x13, 0x1}}, \
|
||||
{{0x23, 0x0}, {0x23, 0x05}, {0x12, 0x1}}, \
|
||||
{{0x22, 0x0}, {0x22, 0x02}, {0x11, 0x1}}, \
|
||||
{{0x21, 0x0}, {0x21, 0x0C}, {0x10, 0x1}}, \
|
||||
{{0x20, 0x0}, {0x20, 0x04}, {0xF, 0x1}}, \
|
||||
{{0x1F, 0x0}, {0x1F, 0xF}, {0xE, 0x1}}, \
|
||||
{{0x1E, 0x0}, {0x1E, 0xE}, {0xD, 0x1}}, \
|
||||
{{0x1D, 0x0}, {0x1D, 0x6}, {0xC, 0x1}}, \
|
||||
{{0x1C, 0x0}, {0x1C, 0xA}, {0xB, 0x1}}, \
|
||||
{{0x1C, 0xB}, {0x1C, 0xC}, {0xB, 0x2}}, \
|
||||
{{0x1B, 0x0}, {0x1B, 0x5}, {0xA, 0x1}}, \
|
||||
{{0x1A, 0x0}, {0x1A, 0x18}, {0x9, 0x1}}, \
|
||||
{{0x1A, 0x19}, {0x1A, 0x24}, {0x9, 0x2}}, \
|
||||
{{0x19, 0x0}, {0x19, 0x1}, {0x8, 0x1}}, \
|
||||
{{0x18, 0x0}, {0x18, 0x14},{0x7, 0x1}}, \
|
||||
{{0x18, 0x15}, {0x18, 0x16},{0x7, 0x2}}, \
|
||||
{{0x17, 0x0}, {0x17, 0x6}, {0x6, 0x1}}, \
|
||||
{{0x16, 0x0}, {0x16, 0x6}, {0x5, 0x1}}, \
|
||||
{{0x16, 0x7}, {0x16, 0x7}, {0x5, 0x2}}
|
||||
|
||||
/*
|
||||
* Internal Versioning
|
||||
*/
|
||||
|
||||
#define NV_VGPU_GRIDSW_NUMBER_INTERNAL_MAJOR 63:32
|
||||
#define NV_VGPU_GRIDSW_NUMBER_INTERNAL_MINOR 31:0
|
||||
|
||||
#define GRIDSW_VERSION_INTERNAL(major, minor) (DRF_NUM64(_VGPU, _GRIDSW_NUMBER_INTERNAL, _MAJOR, major) | \
|
||||
DRF_NUM64(_VGPU, _GRIDSW_NUMBER_INTERNAL, _MINOR, minor))
|
||||
|
||||
|
||||
// The NV_VGPU_GRIDSW_VERSION_MIN_SUPPORTED_INTERNAL macros are auto-generated using the value from rpc-structures.def file.
|
||||
#define AUTOGENERATE_RPC_MIN_SUPPORTED_VERSION_INFORMATION
|
||||
#include "g_rpc-structures.h"
|
||||
#undef AUTOGENERATE_RPC_MIN_SUPPORTED_VERSION_INFORMATION
|
||||
|
||||
/*
|
||||
* Versioning exposed externally
|
||||
*/
|
||||
#define NV_VGPU_GRIDSW_NUMBER_EXTERNAL_MAJOR 31:16
|
||||
#define NV_VGPU_GRIDSW_NUMBER_EXTERNAL_MINOR 15:0
|
||||
|
||||
#define GRIDSW_VERSION_EXTERNAL(major, minor) (DRF_NUM(_VGPU, _GRIDSW_NUMBER_EXTERNAL, _MAJOR, major) | \
|
||||
DRF_NUM(_VGPU, _GRIDSW_NUMBER_EXTERNAL, _MINOR, minor))
|
||||
|
||||
/* WARNING: Should be updated with each vGPU release, if there is a break in
|
||||
* migration compatibility during the development of that release. */
|
||||
#define NV_VGPU_MAX_SUPPORTED_GRIDSW_VERSION_EXTERNAL_MAJOR 0x15
|
||||
#define NV_VGPU_MAX_SUPPORTED_GRIDSW_VERSION_EXTERNAL_MINOR 0x1
|
||||
|
||||
/* WARNING: Should be updated with each vGPU release, if minimum supported
|
||||
* version change on the host.
|
||||
*/
|
||||
#define NV_VGPU_MIN_SUPPORTED_GRIDSW_VERSION_EXTERNAL_MAJOR 0x7
|
||||
#define NV_VGPU_MIN_SUPPORTED_GRIDSW_VERSION_EXTERNAL_MINOR 0x1
|
||||
|
||||
#endif // __vgpu_vgpu_version_h__
|
||||
71
src/nvidia/inc/kernel/vgpu/vgpuapi.h
Normal file
71
src/nvidia/inc/kernel/vgpu/vgpuapi.h
Normal file
@@ -0,0 +1,71 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
#include "g_vgpuapi_nvoc.h"
|
||||
|
||||
#ifndef _VGPUAPI_H_
|
||||
#define _VGPUAPI_H_
|
||||
|
||||
#include "core/core.h"
|
||||
#include "rmapi/client.h"
|
||||
#include "gpu/gpu_resource.h"
|
||||
#include "rmapi/control.h"
|
||||
#include "ctrl/ctrla080.h"
|
||||
|
||||
NVOC_PREFIX(vgpuapi) class VgpuApi : GpuResource
|
||||
{
|
||||
public:
|
||||
NV_STATUS vgpuapiConstruct(VgpuApi *pVgpuApi, CALL_CONTEXT *pCallContext,
|
||||
RS_RES_ALLOC_PARAMS_INTERNAL *pParams)
|
||||
: GpuResource(pCallContext, pParams);
|
||||
void vgpuapiDestruct(VgpuApi *pVgpuApi);
|
||||
|
||||
NODE node;
|
||||
NvHandle handle;
|
||||
NvHandle hDevice;
|
||||
|
||||
//
|
||||
// RMCTRL Exported methods -- Category: VGPU_DISPLAY
|
||||
//
|
||||
RMCTRL_EXPORT(NVA080_CTRL_CMD_VGPU_DISPLAY_SET_SURFACE_PROPERTIES,
|
||||
RMCTRL_FLAGS(NON_PRIVILEGED))
|
||||
NV_STATUS vgpuapiCtrlCmdVgpuDisplaySetSurfaceProperties(VgpuApi *pVgpuApi,
|
||||
NVA080_CTRL_VGPU_DISPLAY_SET_SURFACE_PROPERTIES *pParams);
|
||||
|
||||
RMCTRL_EXPORT(NVA080_CTRL_CMD_VGPU_DISPLAY_CLEANUP_SURFACE,
|
||||
RMCTRL_FLAGS(NON_PRIVILEGED))
|
||||
NV_STATUS vgpuapiCtrlCmdVgpuDisplayCleanupSurface(VgpuApi *pVgpuApi,
|
||||
NVA080_CTRL_VGPU_DISPLAY_CLEANUP_SURFACE_PARAMS *pParams);
|
||||
|
||||
//
|
||||
// RMCTRL Exported methods -- Category: VGPU_OTHERS
|
||||
//
|
||||
RMCTRL_EXPORT(NVA080_CTRL_CMD_VGPU_GET_CONFIG,
|
||||
RMCTRL_FLAGS(NON_PRIVILEGED))
|
||||
NV_STATUS vgpuapiCtrlCmdVGpuGetConfig(VgpuApi *pVgpuApi,
|
||||
NVA080_CTRL_VGPU_GET_CONFIG_PARAMS *pParams);
|
||||
|
||||
};
|
||||
|
||||
#endif // _VGPUAPI_H_
|
||||
Reference in New Issue
Block a user