550.54.14

This commit is contained in:
Bernhard Stoeckner
2024-02-23 16:37:56 +01:00
parent 91676d6628
commit 476bd34534
186 changed files with 42509 additions and 37629 deletions

View File

@@ -156,6 +156,7 @@ struct RmClient {
PSECURITY_TOKEN pSecurityToken;
struct UserInfo *pUserInfo;
NvBool bIsClientVirtualMode;
NvS32 imexChannel;
PNODE pCliSyncGpuBoostTree;
};

View File

@@ -1136,28 +1136,6 @@ static void __nvoc_init_funcTable_OBJGPU_1(OBJGPU *pThis) {
pThis->__gpuUpdateErrorContainmentState__ = &gpuUpdateErrorContainmentState_c04480;
}
// Hal function -- gpuCheckEccCounts
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000420UL) )) /* ChipHal: TU102 | GA100 | GH100 */
{
pThis->__gpuCheckEccCounts__ = &gpuCheckEccCounts_TU102;
}
// default
else
{
pThis->__gpuCheckEccCounts__ = &gpuCheckEccCounts_d44104;
}
// Hal function -- gpuClearEccCounts
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000420UL) )) /* ChipHal: TU102 | GA100 | GH100 */
{
pThis->__gpuClearEccCounts__ = &gpuClearEccCounts_TU102;
}
// default
else
{
pThis->__gpuClearEccCounts__ = &gpuClearEccCounts_ac1694;
}
// Hal function -- gpuWaitForGfwBootComplete
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000001UL) )) /* RmVariantHal: VF */
{

View File

@@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2004-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -973,8 +973,6 @@ struct OBJGPU {
NvBool (*__gpuIsDevModeEnabledInHw__)(struct OBJGPU *);
NvBool (*__gpuIsCtxBufAllocInPmaSupported__)(struct OBJGPU *);
NV_STATUS (*__gpuUpdateErrorContainmentState__)(struct OBJGPU *, NV_ERROR_CONT_ERR_ID, NV_ERROR_CONT_LOCATION, NvU32 *);
void (*__gpuCheckEccCounts__)(struct OBJGPU *);
NV_STATUS (*__gpuClearEccCounts__)(struct OBJGPU *);
NV_STATUS (*__gpuWaitForGfwBootComplete__)(struct OBJGPU *);
NvBool (*__gpuGetIsCmpSku__)(struct OBJGPU *);
NvBool PDB_PROP_GPU_HIGH_SPEED_BRIDGE_CONNECTED;
@@ -1236,6 +1234,7 @@ struct OBJGPU {
NvBool bStateUnloading;
NvBool bStateLoaded;
NvBool bFullyConstructed;
NvBool bRecoveryMarginPresent;
NvBool bBf3WarBug4040336Enabled;
NvBool bUnifiedMemorySpaceEnabled;
NvBool bSriovEnabled;
@@ -1633,10 +1632,6 @@ NV_STATUS __nvoc_objCreate_OBJGPU(OBJGPU**, Dynamic*, NvU32,
#define gpuIsCtxBufAllocInPmaSupported_HAL(pGpu) gpuIsCtxBufAllocInPmaSupported_DISPATCH(pGpu)
#define gpuUpdateErrorContainmentState(pGpu, arg0, arg1, arg2) gpuUpdateErrorContainmentState_DISPATCH(pGpu, arg0, arg1, arg2)
#define gpuUpdateErrorContainmentState_HAL(pGpu, arg0, arg1, arg2) gpuUpdateErrorContainmentState_DISPATCH(pGpu, arg0, arg1, arg2)
#define gpuCheckEccCounts(pGpu) gpuCheckEccCounts_DISPATCH(pGpu)
#define gpuCheckEccCounts_HAL(pGpu) gpuCheckEccCounts_DISPATCH(pGpu)
#define gpuClearEccCounts(pGpu) gpuClearEccCounts_DISPATCH(pGpu)
#define gpuClearEccCounts_HAL(pGpu) gpuClearEccCounts_DISPATCH(pGpu)
#define gpuWaitForGfwBootComplete(pGpu) gpuWaitForGfwBootComplete_DISPATCH(pGpu)
#define gpuWaitForGfwBootComplete_HAL(pGpu) gpuWaitForGfwBootComplete_DISPATCH(pGpu)
#define gpuGetIsCmpSku(pGpu) gpuGetIsCmpSku_DISPATCH(pGpu)
@@ -2557,6 +2552,34 @@ static inline NV_STATUS gpuSetPartitionErrorAttribution(struct OBJGPU *pGpu, NV_
#define gpuSetPartitionErrorAttribution_HAL(pGpu, arg0, arg1, arg2) gpuSetPartitionErrorAttribution(pGpu, arg0, arg1, arg2)
NV_STATUS gpuCreateRusdMemory_IMPL(struct OBJGPU *pGpu);
#ifdef __nvoc_gpu_h_disabled
static inline NV_STATUS gpuCreateRusdMemory(struct OBJGPU *pGpu) {
NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_gpu_h_disabled
#define gpuCreateRusdMemory(pGpu) gpuCreateRusdMemory_IMPL(pGpu)
#endif //__nvoc_gpu_h_disabled
#define gpuCreateRusdMemory_HAL(pGpu) gpuCreateRusdMemory(pGpu)
NvBool gpuCheckEccCounts_TU102(struct OBJGPU *pGpu);
#ifdef __nvoc_gpu_h_disabled
static inline NvBool gpuCheckEccCounts(struct OBJGPU *pGpu) {
NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
return NV_FALSE;
}
#else //__nvoc_gpu_h_disabled
#define gpuCheckEccCounts(pGpu) gpuCheckEccCounts_TU102(pGpu)
#endif //__nvoc_gpu_h_disabled
#define gpuCheckEccCounts_HAL(pGpu) gpuCheckEccCounts(pGpu)
NV_STATUS gpuConstructDeviceInfoTable_FWCLIENT(struct OBJGPU *pGpu);
NV_STATUS gpuConstructDeviceInfoTable_VGPUSTUB(struct OBJGPU *pGpu);
@@ -3147,26 +3170,6 @@ static inline NV_STATUS gpuUpdateErrorContainmentState_DISPATCH(struct OBJGPU *p
return pGpu->__gpuUpdateErrorContainmentState__(pGpu, arg0, arg1, arg2);
}
static inline void gpuCheckEccCounts_d44104(struct OBJGPU *pGpu) {
return;
}
void gpuCheckEccCounts_TU102(struct OBJGPU *pGpu);
static inline void gpuCheckEccCounts_DISPATCH(struct OBJGPU *pGpu) {
pGpu->__gpuCheckEccCounts__(pGpu);
}
static inline NV_STATUS gpuClearEccCounts_ac1694(struct OBJGPU *pGpu) {
return NV_OK;
}
NV_STATUS gpuClearEccCounts_TU102(struct OBJGPU *pGpu);
static inline NV_STATUS gpuClearEccCounts_DISPATCH(struct OBJGPU *pGpu) {
return pGpu->__gpuClearEccCounts__(pGpu);
}
NV_STATUS gpuWaitForGfwBootComplete_TU102(struct OBJGPU *pGpu);
static inline NV_STATUS gpuWaitForGfwBootComplete_5baef9(struct OBJGPU *pGpu) {
@@ -4458,16 +4461,6 @@ static inline NV_STATUS gpuSanityCheckRegisterAccess(struct OBJGPU *pGpu, NvU32
#define gpuSanityCheckRegisterAccess(pGpu, addr, pRetVal) gpuSanityCheckRegisterAccess_IMPL(pGpu, addr, pRetVal)
#endif //__nvoc_gpu_h_disabled
void gpuUpdateUserSharedData_IMPL(struct OBJGPU *pGpu);
#ifdef __nvoc_gpu_h_disabled
static inline void gpuUpdateUserSharedData(struct OBJGPU *pGpu) {
NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
}
#else //__nvoc_gpu_h_disabled
#define gpuUpdateUserSharedData(pGpu) gpuUpdateUserSharedData_IMPL(pGpu)
#endif //__nvoc_gpu_h_disabled
NV_STATUS gpuValidateRegOffset_IMPL(struct OBJGPU *pGpu, NvU32 arg0);
#ifdef __nvoc_gpu_h_disabled
@@ -4523,6 +4516,38 @@ static inline NV_STATUS gpuGc6Exit(struct OBJGPU *pGpu, NV2080_CTRL_GC6_EXIT_PAR
#define gpuGc6Exit(pGpu, arg0) gpuGc6Exit_IMPL(pGpu, arg0)
#endif //__nvoc_gpu_h_disabled
void gpuDestroyRusdMemory_IMPL(struct OBJGPU *pGpu);
#ifdef __nvoc_gpu_h_disabled
static inline void gpuDestroyRusdMemory(struct OBJGPU *pGpu) {
NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
}
#else //__nvoc_gpu_h_disabled
#define gpuDestroyRusdMemory(pGpu) gpuDestroyRusdMemory_IMPL(pGpu)
#endif //__nvoc_gpu_h_disabled
NV_STATUS gpuEnableAccounting_IMPL(struct OBJGPU *arg0);
#ifdef __nvoc_gpu_h_disabled
static inline NV_STATUS gpuEnableAccounting(struct OBJGPU *arg0) {
NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_gpu_h_disabled
#define gpuEnableAccounting(arg0) gpuEnableAccounting_IMPL(arg0)
#endif //__nvoc_gpu_h_disabled
NV_STATUS gpuDisableAccounting_IMPL(struct OBJGPU *arg0, NvBool bForce);
#ifdef __nvoc_gpu_h_disabled
static inline NV_STATUS gpuDisableAccounting(struct OBJGPU *arg0, NvBool bForce) {
NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_gpu_h_disabled
#define gpuDisableAccounting(arg0, bForce) gpuDisableAccounting_IMPL(arg0, bForce)
#endif //__nvoc_gpu_h_disabled
#undef PRIVATE_FIELD

View File

@@ -2159,6 +2159,16 @@ NV_STATUS rpcMapMemoryDma_STUB(
return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION;
}
// RPC:hal:CTRL_SET_VGPU_FB_USAGE - TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X
NV_STATUS rpcCtrlSetVgpuFbUsage_STUB(
POBJGPU pGpu,
POBJRPC pRpc,
void *pArg3
)
{
return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION;
}
// RPC:hal:UNMAP_MEMORY_DMA - TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X
NV_STATUS rpcUnmapMemoryDma_STUB(
POBJGPU pGpu,

View File

@@ -1303,17 +1303,6 @@ static void __nvoc_init_funcTable_KernelBus_1(KernelBus *pThis, RmHalspecOwner *
pThis->__kbusGetEccCounts__ = &kbusGetEccCounts_4a4dee;
}
// Hal function -- kbusClearEccCounts
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__kbusClearEccCounts__ = &kbusClearEccCounts_GH100;
}
// default
else
{
pThis->__kbusClearEccCounts__ = &kbusClearEccCounts_b3696a;
}
pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelBus_engstateConstructEngine;
pThis->__nvoc_base_OBJENGSTATE.__engstateStatePreInitLocked__ = &__nvoc_thunk_KernelBus_engstateStatePreInitLocked;

View File

@@ -428,7 +428,6 @@ struct KernelBus {
void (*__kbusTeardownCoherentCpuMapping__)(struct OBJGPU *, struct KernelBus *, NvBool);
NV_STATUS (*__kbusBar1InstBlkBind__)(struct OBJGPU *, struct KernelBus *);
NvU32 (*__kbusGetEccCounts__)(struct OBJGPU *, struct KernelBus *);
void (*__kbusClearEccCounts__)(struct OBJGPU *, struct KernelBus *);
NV_STATUS (*__kbusStateInitUnlocked__)(POBJGPU, struct KernelBus *);
void (*__kbusInitMissing__)(POBJGPU, struct KernelBus *);
NV_STATUS (*__kbusStatePreInitUnlocked__)(POBJGPU, struct KernelBus *);
@@ -730,8 +729,6 @@ NV_STATUS __nvoc_objCreate_KernelBus(KernelBus**, Dynamic*, NvU32);
#define kbusBar1InstBlkBind_HAL(pGpu, pKernelBus) kbusBar1InstBlkBind_DISPATCH(pGpu, pKernelBus)
#define kbusGetEccCounts(pGpu, pKernelBus) kbusGetEccCounts_DISPATCH(pGpu, pKernelBus)
#define kbusGetEccCounts_HAL(pGpu, pKernelBus) kbusGetEccCounts_DISPATCH(pGpu, pKernelBus)
#define kbusClearEccCounts(pGpu, pKernelBus) kbusClearEccCounts_DISPATCH(pGpu, pKernelBus)
#define kbusClearEccCounts_HAL(pGpu, pKernelBus) kbusClearEccCounts_DISPATCH(pGpu, pKernelBus)
#define kbusStateInitUnlocked(pGpu, pEngstate) kbusStateInitUnlocked_DISPATCH(pGpu, pEngstate)
#define kbusInitMissing(pGpu, pEngstate) kbusInitMissing_DISPATCH(pGpu, pEngstate)
#define kbusStatePreInitUnlocked(pGpu, pEngstate) kbusStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
@@ -2531,16 +2528,6 @@ static inline NvU32 kbusGetEccCounts_DISPATCH(struct OBJGPU *pGpu, struct Kernel
return pKernelBus->__kbusGetEccCounts__(pGpu, pKernelBus);
}
void kbusClearEccCounts_GH100(struct OBJGPU *pGpu, struct KernelBus *pKernelBus);
static inline void kbusClearEccCounts_b3696a(struct OBJGPU *pGpu, struct KernelBus *pKernelBus) {
return;
}
static inline void kbusClearEccCounts_DISPATCH(struct OBJGPU *pGpu, struct KernelBus *pKernelBus) {
pKernelBus->__kbusClearEccCounts__(pGpu, pKernelBus);
}
static inline NV_STATUS kbusStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate) {
return pEngstate->__kbusStateInitUnlocked__(pGpu, pEngstate);
}
@@ -2625,6 +2612,10 @@ static inline NvBool kbusIsBarAccessBlocked(struct KernelBus *pKernelBus) {
return pKernelBus->bBarAccessBlocked;
}
static inline void kbusSetFlaSupported(struct KernelBus *pKernelBus, NvBool bSupported) {
pKernelBus->bFlaSupported = bSupported;
}
void kbusDestruct_IMPL(struct KernelBus *pKernelBus);
#define __nvoc_kbusDestruct(pKernelBus) kbusDestruct_IMPL(pKernelBus)
@@ -2719,6 +2710,9 @@ static inline NV_STATUS kbusIsGpuP2pAlive(struct OBJGPU *pGpu, struct KernelBus
#define kbusIsGpuP2pAlive(pGpu, pKernelBus) kbusIsGpuP2pAlive_IMPL(pGpu, pKernelBus)
#endif //__nvoc_kern_bus_h_disabled
NV_STATUS kbusUpdateRusdStatistics_IMPL(struct OBJGPU *pGpu);
#define kbusUpdateRusdStatistics(pGpu) kbusUpdateRusdStatistics_IMPL(pGpu)
void kbusDetermineBar1Force64KBMapping_IMPL(struct KernelBus *pKernelBus);
#ifdef __nvoc_kern_bus_h_disabled

View File

@@ -1007,17 +1007,6 @@ static void __nvoc_init_funcTable_KernelGmmu_1(KernelGmmu *pThis, RmHalspecOwner
pThis->__kgmmuGetEccCounts__ = &kgmmuGetEccCounts_4a4dee;
}
// Hal function -- kgmmuClearEccCounts
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000420UL) )) /* ChipHal: TU102 | GA100 | GH100 */
{
pThis->__kgmmuClearEccCounts__ = &kgmmuClearEccCounts_TU102;
}
// default
else
{
pThis->__kgmmuClearEccCounts__ = &kgmmuClearEccCounts_b3696a;
}
pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelGmmu_engstateConstructEngine;
pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelGmmu_engstateStateInitLocked;

View File

@@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -405,7 +405,7 @@ struct KernelGmmu {
NV_STATUS (*__kgmmuTranslatePdePcfFromSw__)(struct KernelGmmu *, NvU32, NvU32 *);
NV_STATUS (*__kgmmuTranslatePdePcfFromHw__)(struct KernelGmmu *, NvU32, GMMU_APERTURE, NvU32 *);
NV_STATUS (*__kgmmuGetFaultRegisterMappings__)(OBJGPU *, struct KernelGmmu *, NvU32, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvU32 *, NvP64 *);
NV_STATUS (*__kgmmuIssueReplayableFaultBufferFlush__)(OBJGPU *, struct KernelGmmu *);
NV_STATUS (*__kgmmuIssueReplayableFaultBufferFlush__)(OBJGPU *, struct KernelGmmu *, NvBool);
NV_STATUS (*__kgmmuToggleFaultOnPrefetch__)(OBJGPU *, struct KernelGmmu *, NvBool);
NV_STATUS (*__kgmmuFaultBufferAllocSharedMemory__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE);
void (*__kgmmuFaultBufferFreeSharedMemory__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE);
@@ -415,7 +415,7 @@ struct KernelGmmu {
NvBool (*__kgmmuIsFaultEngineBar1__)(struct KernelGmmu *, NvU32);
NvBool (*__kgmmuIsFaultEngineBar2__)(struct KernelGmmu *, NvU32);
NvBool (*__kgmmuIsFaultEnginePhysical__)(struct KernelGmmu *, NvU32);
NV_STATUS (*__kgmmuCopyMmuFaults__)(OBJGPU *, struct KernelGmmu *, struct THREAD_STATE_NODE *, NvU32 *, FAULT_BUFFER_TYPE);
NV_STATUS (*__kgmmuCopyMmuFaults__)(OBJGPU *, struct KernelGmmu *, struct THREAD_STATE_NODE *, NvU32 *, FAULT_BUFFER_TYPE, NvBool);
NV_STATUS (*__kgmmuParseFaultPacket__)(OBJGPU *, struct KernelGmmu *, NvP64, NvP64);
void (*__kgmmuFaultBufferClearPackets__)(OBJGPU *, struct KernelGmmu *, struct HW_FAULT_BUFFER *, NvU32, NvU32);
GMMU_FAULT_PACKET *(*__kgmmuFaultBufferGetFault__)(OBJGPU *, struct KernelGmmu *, struct HW_FAULT_BUFFER *, NvU32);
@@ -456,7 +456,6 @@ struct KernelGmmu {
NV_STATUS (*__kgmmuServiceMmuFault__)(OBJGPU *, struct KernelGmmu *, NvP64, FIFO_MMU_EXCEPTION_DATA *);
NV_STATUS (*__kgmmuServiceUnboundInstBlockFault__)(OBJGPU *, struct KernelGmmu *, NvP64, FIFO_MMU_EXCEPTION_DATA *);
NvU32 (*__kgmmuGetEccCounts__)(OBJGPU *, struct KernelGmmu *);
void (*__kgmmuClearEccCounts__)(OBJGPU *, struct KernelGmmu *);
NV_STATUS (*__kgmmuStatePreLoad__)(POBJGPU, struct KernelGmmu *, NvU32);
NV_STATUS (*__kgmmuStatePostUnload__)(POBJGPU, struct KernelGmmu *, NvU32);
NV_STATUS (*__kgmmuStateInitUnlocked__)(POBJGPU, struct KernelGmmu *);
@@ -534,7 +533,7 @@ struct KernelGmmu_PRIVATE {
NV_STATUS (*__kgmmuTranslatePdePcfFromSw__)(struct KernelGmmu *, NvU32, NvU32 *);
NV_STATUS (*__kgmmuTranslatePdePcfFromHw__)(struct KernelGmmu *, NvU32, GMMU_APERTURE, NvU32 *);
NV_STATUS (*__kgmmuGetFaultRegisterMappings__)(OBJGPU *, struct KernelGmmu *, NvU32, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvP64 *, NvU32 *, NvP64 *);
NV_STATUS (*__kgmmuIssueReplayableFaultBufferFlush__)(OBJGPU *, struct KernelGmmu *);
NV_STATUS (*__kgmmuIssueReplayableFaultBufferFlush__)(OBJGPU *, struct KernelGmmu *, NvBool);
NV_STATUS (*__kgmmuToggleFaultOnPrefetch__)(OBJGPU *, struct KernelGmmu *, NvBool);
NV_STATUS (*__kgmmuFaultBufferAllocSharedMemory__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE);
void (*__kgmmuFaultBufferFreeSharedMemory__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE);
@@ -544,7 +543,7 @@ struct KernelGmmu_PRIVATE {
NvBool (*__kgmmuIsFaultEngineBar1__)(struct KernelGmmu *, NvU32);
NvBool (*__kgmmuIsFaultEngineBar2__)(struct KernelGmmu *, NvU32);
NvBool (*__kgmmuIsFaultEnginePhysical__)(struct KernelGmmu *, NvU32);
NV_STATUS (*__kgmmuCopyMmuFaults__)(OBJGPU *, struct KernelGmmu *, struct THREAD_STATE_NODE *, NvU32 *, FAULT_BUFFER_TYPE);
NV_STATUS (*__kgmmuCopyMmuFaults__)(OBJGPU *, struct KernelGmmu *, struct THREAD_STATE_NODE *, NvU32 *, FAULT_BUFFER_TYPE, NvBool);
NV_STATUS (*__kgmmuParseFaultPacket__)(OBJGPU *, struct KernelGmmu *, NvP64, NvP64);
void (*__kgmmuFaultBufferClearPackets__)(OBJGPU *, struct KernelGmmu *, struct HW_FAULT_BUFFER *, NvU32, NvU32);
GMMU_FAULT_PACKET *(*__kgmmuFaultBufferGetFault__)(OBJGPU *, struct KernelGmmu *, struct HW_FAULT_BUFFER *, NvU32);
@@ -585,7 +584,6 @@ struct KernelGmmu_PRIVATE {
NV_STATUS (*__kgmmuServiceMmuFault__)(OBJGPU *, struct KernelGmmu *, NvP64, FIFO_MMU_EXCEPTION_DATA *);
NV_STATUS (*__kgmmuServiceUnboundInstBlockFault__)(OBJGPU *, struct KernelGmmu *, NvP64, FIFO_MMU_EXCEPTION_DATA *);
NvU32 (*__kgmmuGetEccCounts__)(OBJGPU *, struct KernelGmmu *);
void (*__kgmmuClearEccCounts__)(OBJGPU *, struct KernelGmmu *);
NV_STATUS (*__kgmmuStatePreLoad__)(POBJGPU, struct KernelGmmu *, NvU32);
NV_STATUS (*__kgmmuStatePostUnload__)(POBJGPU, struct KernelGmmu *, NvU32);
NV_STATUS (*__kgmmuStateInitUnlocked__)(POBJGPU, struct KernelGmmu *);
@@ -712,8 +710,8 @@ NV_STATUS __nvoc_objCreate_KernelGmmu(KernelGmmu**, Dynamic*, NvU32);
#define kgmmuTranslatePdePcfFromHw_HAL(pKernelGmmu, arg0, arg1, arg2) kgmmuTranslatePdePcfFromHw_DISPATCH(pKernelGmmu, arg0, arg1, arg2)
#define kgmmuGetFaultRegisterMappings(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl) kgmmuGetFaultRegisterMappings_DISPATCH(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl)
#define kgmmuGetFaultRegisterMappings_HAL(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl) kgmmuGetFaultRegisterMappings_DISPATCH(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl)
#define kgmmuIssueReplayableFaultBufferFlush(pGpu, pKernelGmmu) kgmmuIssueReplayableFaultBufferFlush_DISPATCH(pGpu, pKernelGmmu)
#define kgmmuIssueReplayableFaultBufferFlush_HAL(pGpu, pKernelGmmu) kgmmuIssueReplayableFaultBufferFlush_DISPATCH(pGpu, pKernelGmmu)
#define kgmmuIssueReplayableFaultBufferFlush(pGpu, pKernelGmmu, bCopyAndFlush) kgmmuIssueReplayableFaultBufferFlush_DISPATCH(pGpu, pKernelGmmu, bCopyAndFlush)
#define kgmmuIssueReplayableFaultBufferFlush_HAL(pGpu, pKernelGmmu, bCopyAndFlush) kgmmuIssueReplayableFaultBufferFlush_DISPATCH(pGpu, pKernelGmmu, bCopyAndFlush)
#define kgmmuToggleFaultOnPrefetch(pGpu, pKernelGmmu, bEnable) kgmmuToggleFaultOnPrefetch_DISPATCH(pGpu, pKernelGmmu, bEnable)
#define kgmmuToggleFaultOnPrefetch_HAL(pGpu, pKernelGmmu, bEnable) kgmmuToggleFaultOnPrefetch_DISPATCH(pGpu, pKernelGmmu, bEnable)
#define kgmmuFaultBufferAllocSharedMemory(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferAllocSharedMemory_DISPATCH(pGpu, pKernelGmmu, arg0)
@@ -732,8 +730,8 @@ NV_STATUS __nvoc_objCreate_KernelGmmu(KernelGmmu**, Dynamic*, NvU32);
#define kgmmuIsFaultEngineBar2_HAL(pKernelGmmu, arg0) kgmmuIsFaultEngineBar2_DISPATCH(pKernelGmmu, arg0)
#define kgmmuIsFaultEnginePhysical(pKernelGmmu, arg0) kgmmuIsFaultEnginePhysical_DISPATCH(pKernelGmmu, arg0)
#define kgmmuIsFaultEnginePhysical_HAL(pKernelGmmu, arg0) kgmmuIsFaultEnginePhysical_DISPATCH(pKernelGmmu, arg0)
#define kgmmuCopyMmuFaults(pGpu, pKernelGmmu, pThreadState, entriesCopied, type) kgmmuCopyMmuFaults_DISPATCH(pGpu, pKernelGmmu, pThreadState, entriesCopied, type)
#define kgmmuCopyMmuFaults_HAL(pGpu, pKernelGmmu, pThreadState, entriesCopied, type) kgmmuCopyMmuFaults_DISPATCH(pGpu, pKernelGmmu, pThreadState, entriesCopied, type)
#define kgmmuCopyMmuFaults(pGpu, pKernelGmmu, pThreadState, entriesCopied, type, bPollForValidBit) kgmmuCopyMmuFaults_DISPATCH(pGpu, pKernelGmmu, pThreadState, entriesCopied, type, bPollForValidBit)
#define kgmmuCopyMmuFaults_HAL(pGpu, pKernelGmmu, pThreadState, entriesCopied, type, bPollForValidBit) kgmmuCopyMmuFaults_DISPATCH(pGpu, pKernelGmmu, pThreadState, entriesCopied, type, bPollForValidBit)
#define kgmmuParseFaultPacket(pGpu, pKernelGmmu, pFaultPacket, pParsedFaultEntry) kgmmuParseFaultPacket_DISPATCH(pGpu, pKernelGmmu, pFaultPacket, pParsedFaultEntry)
#define kgmmuParseFaultPacket_HAL(pGpu, pKernelGmmu, pFaultPacket, pParsedFaultEntry) kgmmuParseFaultPacket_DISPATCH(pGpu, pKernelGmmu, pFaultPacket, pParsedFaultEntry)
#define kgmmuFaultBufferClearPackets(pGpu, pKernelGmmu, pFaultBuffer, beginIdx, numFaultPackets) kgmmuFaultBufferClearPackets_DISPATCH(pGpu, pKernelGmmu, pFaultBuffer, beginIdx, numFaultPackets)
@@ -814,8 +812,6 @@ NV_STATUS __nvoc_objCreate_KernelGmmu(KernelGmmu**, Dynamic*, NvU32);
#define kgmmuServiceUnboundInstBlockFault_HAL(pGpu, pKernelGmmu, arg0, arg1) kgmmuServiceUnboundInstBlockFault_DISPATCH(pGpu, pKernelGmmu, arg0, arg1)
#define kgmmuGetEccCounts(pGpu, pKernelGmmu) kgmmuGetEccCounts_DISPATCH(pGpu, pKernelGmmu)
#define kgmmuGetEccCounts_HAL(pGpu, pKernelGmmu) kgmmuGetEccCounts_DISPATCH(pGpu, pKernelGmmu)
#define kgmmuClearEccCounts(pGpu, pKernelGmmu) kgmmuClearEccCounts_DISPATCH(pGpu, pKernelGmmu)
#define kgmmuClearEccCounts_HAL(pGpu, pKernelGmmu) kgmmuClearEccCounts_DISPATCH(pGpu, pKernelGmmu)
#define kgmmuStatePreLoad(pGpu, pEngstate, arg0) kgmmuStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
#define kgmmuStatePostUnload(pGpu, pEngstate, arg0) kgmmuStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
#define kgmmuStateInitUnlocked(pGpu, pEngstate) kgmmuStateInitUnlocked_DISPATCH(pGpu, pEngstate)
@@ -1672,14 +1668,14 @@ static inline NV_STATUS kgmmuGetFaultRegisterMappings_DISPATCH(OBJGPU *pGpu, str
return pKernelGmmu->__kgmmuGetFaultRegisterMappings__(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl);
}
NV_STATUS kgmmuIssueReplayableFaultBufferFlush_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
NV_STATUS kgmmuIssueReplayableFaultBufferFlush_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool bCopyAndFlush);
static inline NV_STATUS kgmmuIssueReplayableFaultBufferFlush_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
static inline NV_STATUS kgmmuIssueReplayableFaultBufferFlush_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool bCopyAndFlush) {
return NV_ERR_NOT_SUPPORTED;
}
static inline NV_STATUS kgmmuIssueReplayableFaultBufferFlush_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
return pKernelGmmu->__kgmmuIssueReplayableFaultBufferFlush__(pGpu, pKernelGmmu);
static inline NV_STATUS kgmmuIssueReplayableFaultBufferFlush_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool bCopyAndFlush) {
return pKernelGmmu->__kgmmuIssueReplayableFaultBufferFlush__(pGpu, pKernelGmmu, bCopyAndFlush);
}
NV_STATUS kgmmuToggleFaultOnPrefetch_GH100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool bEnable);
@@ -1764,15 +1760,15 @@ static inline NvBool kgmmuIsFaultEnginePhysical_DISPATCH(struct KernelGmmu *pKer
return pKernelGmmu->__kgmmuIsFaultEnginePhysical__(pKernelGmmu, arg0);
}
static inline NV_STATUS kgmmuCopyMmuFaults_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *pThreadState, NvU32 *entriesCopied, FAULT_BUFFER_TYPE type) {
static inline NV_STATUS kgmmuCopyMmuFaults_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *pThreadState, NvU32 *entriesCopied, FAULT_BUFFER_TYPE type, NvBool bPollForValidBit) {
NV_ASSERT_PRECOMP(0);
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS kgmmuCopyMmuFaults_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *pThreadState, NvU32 *entriesCopied, FAULT_BUFFER_TYPE type);
NV_STATUS kgmmuCopyMmuFaults_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *pThreadState, NvU32 *entriesCopied, FAULT_BUFFER_TYPE type, NvBool bPollForValidBit);
static inline NV_STATUS kgmmuCopyMmuFaults_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *pThreadState, NvU32 *entriesCopied, FAULT_BUFFER_TYPE type) {
return pKernelGmmu->__kgmmuCopyMmuFaults__(pGpu, pKernelGmmu, pThreadState, entriesCopied, type);
static inline NV_STATUS kgmmuCopyMmuFaults_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *pThreadState, NvU32 *entriesCopied, FAULT_BUFFER_TYPE type, NvBool bPollForValidBit) {
return pKernelGmmu->__kgmmuCopyMmuFaults__(pGpu, pKernelGmmu, pThreadState, entriesCopied, type, bPollForValidBit);
}
static inline NV_STATUS kgmmuParseFaultPacket_92bfc3(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvP64 pFaultPacket, NvP64 pParsedFaultEntry) {
@@ -2237,16 +2233,6 @@ static inline NvU32 kgmmuGetEccCounts_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *
return pKernelGmmu->__kgmmuGetEccCounts__(pGpu, pKernelGmmu);
}
void kgmmuClearEccCounts_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
static inline void kgmmuClearEccCounts_b3696a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
return;
}
static inline void kgmmuClearEccCounts_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
pKernelGmmu->__kgmmuClearEccCounts__(pGpu, pKernelGmmu);
}
static inline NV_STATUS kgmmuStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, NvU32 arg0) {
return pEngstate->__kgmmuStatePreLoad__(pGpu, pEngstate, arg0);
}

View File

@@ -544,14 +544,14 @@ static void __nvoc_init_funcTable_KernelMemorySystem_1(KernelMemorySystem *pThis
}
// Hal function -- kmemsysGetMaxFbpas
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000020UL) )) /* ChipHal: TU102 */
{
pThis->__kmemsysGetMaxFbpas__ = &kmemsysGetMaxFbpas_TU102;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000400UL) )) /* ChipHal: GA100 | GH100 */
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000400UL) )) /* ChipHal: GA100 | GH100 */
{
pThis->__kmemsysGetMaxFbpas__ = &kmemsysGetMaxFbpas_GA100;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */
{
pThis->__kmemsysGetMaxFbpas__ = &kmemsysGetMaxFbpas_TU102;
}
// default
else
{
@@ -563,14 +563,9 @@ static void __nvoc_init_funcTable_KernelMemorySystem_1(KernelMemorySystem *pThis
{
pThis->__kmemsysGetEccDedCountSize__ = &kmemsysGetEccDedCountSize_GH100;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000420UL) )) /* ChipHal: TU102 | GA100 */
{
pThis->__kmemsysGetEccDedCountSize__ = &kmemsysGetEccDedCountSize_TU102;
}
// default
else
{
pThis->__kmemsysGetEccDedCountSize__ = &kmemsysGetEccDedCountSize_4a4dee;
pThis->__kmemsysGetEccDedCountSize__ = &kmemsysGetEccDedCountSize_TU102;
}
// Hal function -- kmemsysGetEccDedCountRegAddr
@@ -578,37 +573,10 @@ static void __nvoc_init_funcTable_KernelMemorySystem_1(KernelMemorySystem *pThis
{
pThis->__kmemsysGetEccDedCountRegAddr__ = &kmemsysGetEccDedCountRegAddr_GH100;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000420UL) )) /* ChipHal: TU102 | GA100 */
else
{
pThis->__kmemsysGetEccDedCountRegAddr__ = &kmemsysGetEccDedCountRegAddr_TU102;
}
// default
else
{
pThis->__kmemsysGetEccDedCountRegAddr__ = &kmemsysGetEccDedCountRegAddr_4a4dee;
}
// Hal function -- kmemsysGetEccCounts
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000420UL) )) /* ChipHal: TU102 | GA100 | GH100 */
{
pThis->__kmemsysGetEccCounts__ = &kmemsysGetEccCounts_TU102;
}
// default
else
{
pThis->__kmemsysGetEccCounts__ = &kmemsysGetEccCounts_b3696a;
}
// Hal function -- kmemsysClearEccCounts
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000420UL) )) /* ChipHal: TU102 | GA100 | GH100 */
{
pThis->__kmemsysClearEccCounts__ = &kmemsysClearEccCounts_TU102;
}
// default
else
{
pThis->__kmemsysClearEccCounts__ = &kmemsysClearEccCounts_b3696a;
}
// Hal function -- kmemsysGetMaximumBlacklistPages
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */

View File

@@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -238,8 +238,6 @@ struct KernelMemorySystem {
NvU32 (*__kmemsysGetMaxFbpas__)(OBJGPU *, struct KernelMemorySystem *);
NvU32 (*__kmemsysGetEccDedCountSize__)(OBJGPU *, struct KernelMemorySystem *);
NvU32 (*__kmemsysGetEccDedCountRegAddr__)(OBJGPU *, struct KernelMemorySystem *, NvU32, NvU32);
void (*__kmemsysGetEccCounts__)(OBJGPU *, struct KernelMemorySystem *, NvU32 *, NvU32 *);
void (*__kmemsysClearEccCounts__)(OBJGPU *, struct KernelMemorySystem *);
NvU16 (*__kmemsysGetMaximumBlacklistPages__)(OBJGPU *, struct KernelMemorySystem *);
NV_STATUS (*__kmemsysGetFbInfos__)(OBJGPU *, struct KernelMemorySystem *, struct RsClient *, Device *, NvHandle, NV2080_CTRL_FB_GET_INFO_V2_PARAMS *, NvU64 *);
NV_STATUS (*__kmemsysStatePostUnload__)(POBJGPU, struct KernelMemorySystem *, NvU32);
@@ -366,10 +364,6 @@ NV_STATUS __nvoc_objCreate_KernelMemorySystem(KernelMemorySystem**, Dynamic*, Nv
#define kmemsysGetEccDedCountSize_HAL(pGpu, pKernelMemorySystem) kmemsysGetEccDedCountSize_DISPATCH(pGpu, pKernelMemorySystem)
#define kmemsysGetEccDedCountRegAddr(pGpu, pKernelMemorySystem, fbpa, subp) kmemsysGetEccDedCountRegAddr_DISPATCH(pGpu, pKernelMemorySystem, fbpa, subp)
#define kmemsysGetEccDedCountRegAddr_HAL(pGpu, pKernelMemorySystem, fbpa, subp) kmemsysGetEccDedCountRegAddr_DISPATCH(pGpu, pKernelMemorySystem, fbpa, subp)
#define kmemsysGetEccCounts(pGpu, pKernelMemorySystem, arg0, arg1) kmemsysGetEccCounts_DISPATCH(pGpu, pKernelMemorySystem, arg0, arg1)
#define kmemsysGetEccCounts_HAL(pGpu, pKernelMemorySystem, arg0, arg1) kmemsysGetEccCounts_DISPATCH(pGpu, pKernelMemorySystem, arg0, arg1)
#define kmemsysClearEccCounts(pGpu, pKernelMemorySystem) kmemsysClearEccCounts_DISPATCH(pGpu, pKernelMemorySystem)
#define kmemsysClearEccCounts_HAL(pGpu, pKernelMemorySystem) kmemsysClearEccCounts_DISPATCH(pGpu, pKernelMemorySystem)
#define kmemsysGetMaximumBlacklistPages(pGpu, pKernelMemorySystem) kmemsysGetMaximumBlacklistPages_DISPATCH(pGpu, pKernelMemorySystem)
#define kmemsysGetMaximumBlacklistPages_HAL(pGpu, pKernelMemorySystem) kmemsysGetMaximumBlacklistPages_DISPATCH(pGpu, pKernelMemorySystem)
#define kmemsysGetFbInfos(arg0, arg1, arg2, arg3, hSubdevice, pParams, pFbInfoListIndicesUnset) kmemsysGetFbInfos_DISPATCH(arg0, arg1, arg2, arg3, hSubdevice, pParams, pFbInfoListIndicesUnset)
@@ -538,6 +532,19 @@ static inline NvBool kmemsysCbcIsSafe(OBJGPU *pGpu, struct KernelMemorySystem *p
#define kmemsysCbcIsSafe_HAL(pGpu, pKernelMemorySystem) kmemsysCbcIsSafe(pGpu, pKernelMemorySystem)
void kmemsysGetEccCounts_TU102(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 *arg0, NvU32 *arg1);
#ifdef __nvoc_kern_mem_sys_h_disabled
static inline void kmemsysGetEccCounts(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 *arg0, NvU32 *arg1) {
NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
}
#else //__nvoc_kern_mem_sys_h_disabled
#define kmemsysGetEccCounts(pGpu, pKernelMemorySystem, arg0, arg1) kmemsysGetEccCounts_TU102(pGpu, pKernelMemorySystem, arg0, arg1)
#endif //__nvoc_kern_mem_sys_h_disabled
#define kmemsysGetEccCounts_HAL(pGpu, pKernelMemorySystem, arg0, arg1) kmemsysGetEccCounts(pGpu, pKernelMemorySystem, arg0, arg1)
static inline NV_STATUS kmemsysPrepareForXVEReset_56cd7a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
return NV_OK;
}
@@ -860,10 +867,6 @@ NvU32 kmemsysGetEccDedCountSize_TU102(OBJGPU *pGpu, struct KernelMemorySystem *p
NvU32 kmemsysGetEccDedCountSize_GH100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
static inline NvU32 kmemsysGetEccDedCountSize_4a4dee(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
return 0;
}
static inline NvU32 kmemsysGetEccDedCountSize_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
return pKernelMemorySystem->__kmemsysGetEccDedCountSize__(pGpu, pKernelMemorySystem);
}
@@ -872,34 +875,10 @@ NvU32 kmemsysGetEccDedCountRegAddr_TU102(OBJGPU *pGpu, struct KernelMemorySystem
NvU32 kmemsysGetEccDedCountRegAddr_GH100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 fbpa, NvU32 subp);
static inline NvU32 kmemsysGetEccDedCountRegAddr_4a4dee(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 fbpa, NvU32 subp) {
return 0;
}
static inline NvU32 kmemsysGetEccDedCountRegAddr_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 fbpa, NvU32 subp) {
return pKernelMemorySystem->__kmemsysGetEccDedCountRegAddr__(pGpu, pKernelMemorySystem, fbpa, subp);
}
void kmemsysGetEccCounts_TU102(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 *arg0, NvU32 *arg1);
static inline void kmemsysGetEccCounts_b3696a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 *arg0, NvU32 *arg1) {
return;
}
static inline void kmemsysGetEccCounts_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 *arg0, NvU32 *arg1) {
pKernelMemorySystem->__kmemsysGetEccCounts__(pGpu, pKernelMemorySystem, arg0, arg1);
}
void kmemsysClearEccCounts_TU102(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
static inline void kmemsysClearEccCounts_b3696a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
return;
}
static inline void kmemsysClearEccCounts_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
pKernelMemorySystem->__kmemsysClearEccCounts__(pGpu, pKernelMemorySystem);
}
NvU16 kmemsysGetMaximumBlacklistPages_GM107(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
NvU16 kmemsysGetMaximumBlacklistPages_GA100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
@@ -1072,9 +1051,14 @@ static inline NV_STATUS kmemsysInitMIGGPUInstanceMemConfigForSwizzId(OBJGPU *arg
#undef PRIVATE_FIELD
#define IS_COHERENT_CPU_ATS_OFFSET(kmemsys, offset, length) \
(kmemsys && ((offset) >= kmemsys->coherentCpuFbBase) && \
(((NvU64)offset + size) <= kmemsys->coherentCpuFbEnd))
#define IS_COHERENT_CPU_ATS_OFFSET(kmemsys, offset, length) \
(kmemsys && ((offset) >= (kmemsys->coherentCpuFbBase + kmemsys->numaOnlineBase)) && \
(((NvU64)offset + length) <= (kmemsys->coherentCpuFbBase + kmemsys->numaOnlineBase + kmemsys->numaOnlineSize)))
#define IS_COHERENT_FB_OFFSET(kmemsys, offset, length) \
(kmemsys && (kmemsys->numaOnlineSize == 0) && \
((offset) >= (kmemsys->coherentCpuFbBase)) && \
(((NvU64)offset + length) <= (kmemsys->coherentCpuFbEnd)))
#endif // KERN_MEM_SYS_H

View File

@@ -779,17 +779,6 @@ static void __nvoc_init_funcTable_KernelBif_1(KernelBif *pThis, RmHalspecOwner *
pThis->__kbifGetEccCounts__ = &kbifGetEccCounts_4a4dee;
}
// Hal function -- kbifClearEccCounts
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__kbifClearEccCounts__ = &kbifClearEccCounts_GH100;
}
// default
else
{
pThis->__kbifClearEccCounts__ = &kbifClearEccCounts_56cd7a;
}
pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelBif_engstateConstructEngine;
pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelBif_engstateStateInitLocked;

View File

@@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -187,7 +187,6 @@ struct KernelBif {
NvU32 (*__kbifGetValidEnginesToReset__)(struct OBJGPU *, struct KernelBif *);
NvU32 (*__kbifGetValidDeviceEnginesToReset__)(struct OBJGPU *, struct KernelBif *);
NvU32 (*__kbifGetEccCounts__)(struct OBJGPU *, struct KernelBif *);
NV_STATUS (*__kbifClearEccCounts__)(struct OBJGPU *, struct KernelBif *);
NV_STATUS (*__kbifStatePreLoad__)(POBJGPU, struct KernelBif *, NvU32);
NV_STATUS (*__kbifStatePostUnload__)(POBJGPU, struct KernelBif *, NvU32);
void (*__kbifStateDestroy__)(POBJGPU, struct KernelBif *);
@@ -418,8 +417,6 @@ NV_STATUS __nvoc_objCreate_KernelBif(KernelBif**, Dynamic*, NvU32);
#define kbifGetValidDeviceEnginesToReset_HAL(pGpu, pKernelBif) kbifGetValidDeviceEnginesToReset_DISPATCH(pGpu, pKernelBif)
#define kbifGetEccCounts(pGpu, pKernelBif) kbifGetEccCounts_DISPATCH(pGpu, pKernelBif)
#define kbifGetEccCounts_HAL(pGpu, pKernelBif) kbifGetEccCounts_DISPATCH(pGpu, pKernelBif)
#define kbifClearEccCounts(pGpu, pKernelBif) kbifClearEccCounts_DISPATCH(pGpu, pKernelBif)
#define kbifClearEccCounts_HAL(pGpu, pKernelBif) kbifClearEccCounts_DISPATCH(pGpu, pKernelBif)
#define kbifStatePreLoad(pGpu, pEngstate, arg0) kbifStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
#define kbifStatePostUnload(pGpu, pEngstate, arg0) kbifStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
#define kbifStateDestroy(pGpu, pEngstate) kbifStateDestroy_DISPATCH(pGpu, pEngstate)
@@ -988,16 +985,6 @@ static inline NvU32 kbifGetEccCounts_DISPATCH(struct OBJGPU *pGpu, struct Kernel
return pKernelBif->__kbifGetEccCounts__(pGpu, pKernelBif);
}
NV_STATUS kbifClearEccCounts_GH100(struct OBJGPU *pGpu, struct KernelBif *pKernelBif);
static inline NV_STATUS kbifClearEccCounts_56cd7a(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) {
return NV_OK;
}
static inline NV_STATUS kbifClearEccCounts_DISPATCH(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) {
return pKernelBif->__kbifClearEccCounts__(pGpu, pKernelBif);
}
static inline NV_STATUS kbifStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelBif *pEngstate, NvU32 arg0) {
return pEngstate->__kbifStatePreLoad__(pGpu, pEngstate, arg0);
}

View File

@@ -180,6 +180,9 @@ void __nvoc_init_dataField_KernelGraphics(KernelGraphics *pThis, RmHalspecOwner
pThis->bCtxswLoggingSupported = ((NvBool)(0 == 0));
}
// Hal field -- bUsePriFecsMailbox
pThis->bUsePriFecsMailbox = ((NvBool)(0 != 0));
// Hal field -- bDeferContextInit
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000003UL) )) /* RmVariantHal: VF | PF_KERNEL_ONLY */
{
@@ -283,7 +286,7 @@ static void __nvoc_init_funcTable_KernelGraphics_1(KernelGraphics *pThis, RmHals
// Hal function -- kgraphicsLoadStaticInfo
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000001UL) )) /* RmVariantHal: VF */
{
pThis->__kgraphicsLoadStaticInfo__ = &kgraphicsLoadStaticInfo_VGPUSTUB;
pThis->__kgraphicsLoadStaticInfo__ = &kgraphicsLoadStaticInfo_VF;
}
else
{
@@ -307,6 +310,46 @@ static void __nvoc_init_funcTable_KernelGraphics_1(KernelGraphics *pThis, RmHals
pThis->__kgraphicsIsUnrestrictedAccessMapSupported__ = &kgraphicsIsUnrestrictedAccessMapSupported_PF;
}
// Hal function -- kgraphicsGetFecsTraceRdOffset
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */
{
pThis->__kgraphicsGetFecsTraceRdOffset__ = &kgraphicsGetFecsTraceRdOffset_474d46;
}
else
{
pThis->__kgraphicsGetFecsTraceRdOffset__ = &kgraphicsGetFecsTraceRdOffset_GA100;
}
// Hal function -- kgraphicsSetFecsTraceRdOffset
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */
{
pThis->__kgraphicsSetFecsTraceRdOffset__ = &kgraphicsSetFecsTraceRdOffset_d44104;
}
else
{
pThis->__kgraphicsSetFecsTraceRdOffset__ = &kgraphicsSetFecsTraceRdOffset_GA100;
}
// Hal function -- kgraphicsSetFecsTraceWrOffset
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */
{
pThis->__kgraphicsSetFecsTraceWrOffset__ = &kgraphicsSetFecsTraceWrOffset_d44104;
}
else
{
pThis->__kgraphicsSetFecsTraceWrOffset__ = &kgraphicsSetFecsTraceWrOffset_GA100;
}
// Hal function -- kgraphicsSetFecsTraceHwEnable
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */
{
pThis->__kgraphicsSetFecsTraceHwEnable__ = &kgraphicsSetFecsTraceHwEnable_d44104;
}
else
{
pThis->__kgraphicsSetFecsTraceHwEnable__ = &kgraphicsSetFecsTraceHwEnable_GA100;
}
pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelGraphics_engstateConstructEngine;
pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelGraphics_engstateStateInitLocked;

View File

@@ -177,6 +177,10 @@ struct KernelGraphics {
NvBool (*__kgraphicsClearInterrupt__)(OBJGPU *, struct KernelGraphics *, IntrServiceClearInterruptArguments *);
NvU32 (*__kgraphicsServiceInterrupt__)(OBJGPU *, struct KernelGraphics *, IntrServiceServiceInterruptArguments *);
NvBool (*__kgraphicsIsUnrestrictedAccessMapSupported__)(OBJGPU *, struct KernelGraphics *);
NvU32 (*__kgraphicsGetFecsTraceRdOffset__)(OBJGPU *, struct KernelGraphics *);
void (*__kgraphicsSetFecsTraceRdOffset__)(OBJGPU *, struct KernelGraphics *, NvU32);
void (*__kgraphicsSetFecsTraceWrOffset__)(OBJGPU *, struct KernelGraphics *, NvU32);
void (*__kgraphicsSetFecsTraceHwEnable__)(OBJGPU *, struct KernelGraphics *, NvBool);
NV_STATUS (*__kgraphicsStatePreLoad__)(POBJGPU, struct KernelGraphics *, NvU32);
NV_STATUS (*__kgraphicsStatePostUnload__)(POBJGPU, struct KernelGraphics *, NvU32);
NV_STATUS (*__kgraphicsStateInitUnlocked__)(POBJGPU, struct KernelGraphics *);
@@ -184,6 +188,8 @@ struct KernelGraphics {
NV_STATUS (*__kgraphicsStatePreInitLocked__)(POBJGPU, struct KernelGraphics *);
NV_STATUS (*__kgraphicsStatePreInitUnlocked__)(POBJGPU, struct KernelGraphics *);
NvBool PRIVATE_FIELD(bCtxswLoggingSupported);
NvBool PRIVATE_FIELD(bCtxswLoggingEnabled);
NvBool PRIVATE_FIELD(bUsePriFecsMailbox);
NvBool PRIVATE_FIELD(bIntrDrivenCtxswLoggingEnabled);
NvBool PRIVATE_FIELD(bBottomHalfCtxswLoggingEnabled);
NvBool PRIVATE_FIELD(bDeferContextInit);
@@ -224,6 +230,10 @@ struct KernelGraphics_PRIVATE {
NvBool (*__kgraphicsClearInterrupt__)(OBJGPU *, struct KernelGraphics *, IntrServiceClearInterruptArguments *);
NvU32 (*__kgraphicsServiceInterrupt__)(OBJGPU *, struct KernelGraphics *, IntrServiceServiceInterruptArguments *);
NvBool (*__kgraphicsIsUnrestrictedAccessMapSupported__)(OBJGPU *, struct KernelGraphics *);
NvU32 (*__kgraphicsGetFecsTraceRdOffset__)(OBJGPU *, struct KernelGraphics *);
void (*__kgraphicsSetFecsTraceRdOffset__)(OBJGPU *, struct KernelGraphics *, NvU32);
void (*__kgraphicsSetFecsTraceWrOffset__)(OBJGPU *, struct KernelGraphics *, NvU32);
void (*__kgraphicsSetFecsTraceHwEnable__)(OBJGPU *, struct KernelGraphics *, NvBool);
NV_STATUS (*__kgraphicsStatePreLoad__)(POBJGPU, struct KernelGraphics *, NvU32);
NV_STATUS (*__kgraphicsStatePostUnload__)(POBJGPU, struct KernelGraphics *, NvU32);
NV_STATUS (*__kgraphicsStateInitUnlocked__)(POBJGPU, struct KernelGraphics *);
@@ -231,6 +241,8 @@ struct KernelGraphics_PRIVATE {
NV_STATUS (*__kgraphicsStatePreInitLocked__)(POBJGPU, struct KernelGraphics *);
NV_STATUS (*__kgraphicsStatePreInitUnlocked__)(POBJGPU, struct KernelGraphics *);
NvBool bCtxswLoggingSupported;
NvBool bCtxswLoggingEnabled;
NvBool bUsePriFecsMailbox;
NvBool bIntrDrivenCtxswLoggingEnabled;
NvBool bBottomHalfCtxswLoggingEnabled;
NvBool bDeferContextInit;
@@ -297,6 +309,14 @@ NV_STATUS __nvoc_objCreate_KernelGraphics(KernelGraphics**, Dynamic*, NvU32);
#define kgraphicsServiceInterrupt_HAL(arg0, arg1, arg2) kgraphicsServiceInterrupt_DISPATCH(arg0, arg1, arg2)
#define kgraphicsIsUnrestrictedAccessMapSupported(arg0, arg1) kgraphicsIsUnrestrictedAccessMapSupported_DISPATCH(arg0, arg1)
#define kgraphicsIsUnrestrictedAccessMapSupported_HAL(arg0, arg1) kgraphicsIsUnrestrictedAccessMapSupported_DISPATCH(arg0, arg1)
#define kgraphicsGetFecsTraceRdOffset(arg0, arg1) kgraphicsGetFecsTraceRdOffset_DISPATCH(arg0, arg1)
#define kgraphicsGetFecsTraceRdOffset_HAL(arg0, arg1) kgraphicsGetFecsTraceRdOffset_DISPATCH(arg0, arg1)
#define kgraphicsSetFecsTraceRdOffset(arg0, arg1, rdOffset) kgraphicsSetFecsTraceRdOffset_DISPATCH(arg0, arg1, rdOffset)
#define kgraphicsSetFecsTraceRdOffset_HAL(arg0, arg1, rdOffset) kgraphicsSetFecsTraceRdOffset_DISPATCH(arg0, arg1, rdOffset)
#define kgraphicsSetFecsTraceWrOffset(arg0, arg1, wrOffset) kgraphicsSetFecsTraceWrOffset_DISPATCH(arg0, arg1, wrOffset)
#define kgraphicsSetFecsTraceWrOffset_HAL(arg0, arg1, wrOffset) kgraphicsSetFecsTraceWrOffset_DISPATCH(arg0, arg1, wrOffset)
#define kgraphicsSetFecsTraceHwEnable(arg0, arg1, bEnable) kgraphicsSetFecsTraceHwEnable_DISPATCH(arg0, arg1, bEnable)
#define kgraphicsSetFecsTraceHwEnable_HAL(arg0, arg1, bEnable) kgraphicsSetFecsTraceHwEnable_DISPATCH(arg0, arg1, bEnable)
#define kgraphicsStatePreLoad(pGpu, pEngstate, arg0) kgraphicsStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
#define kgraphicsStatePostUnload(pGpu, pEngstate, arg0) kgraphicsStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
#define kgraphicsStateInitUnlocked(pGpu, pEngstate) kgraphicsStateInitUnlocked_DISPATCH(pGpu, pEngstate)
@@ -449,7 +469,7 @@ static inline NV_STATUS kgraphicsServiceNotificationInterrupt_DISPATCH(OBJGPU *a
return arg1->__kgraphicsServiceNotificationInterrupt__(arg0, arg1, arg2);
}
NV_STATUS kgraphicsLoadStaticInfo_VGPUSTUB(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 swizzId);
NV_STATUS kgraphicsLoadStaticInfo_VF(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 swizzId);
NV_STATUS kgraphicsLoadStaticInfo_KERNEL(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 swizzId);
@@ -479,6 +499,46 @@ static inline NvBool kgraphicsIsUnrestrictedAccessMapSupported_DISPATCH(OBJGPU *
return arg1->__kgraphicsIsUnrestrictedAccessMapSupported__(arg0, arg1);
}
static inline NvU32 kgraphicsGetFecsTraceRdOffset_474d46(OBJGPU *arg0, struct KernelGraphics *arg1) {
NV_ASSERT_OR_RETURN_PRECOMP(0, 0);
}
NvU32 kgraphicsGetFecsTraceRdOffset_GA100(OBJGPU *arg0, struct KernelGraphics *arg1);
static inline NvU32 kgraphicsGetFecsTraceRdOffset_DISPATCH(OBJGPU *arg0, struct KernelGraphics *arg1) {
return arg1->__kgraphicsGetFecsTraceRdOffset__(arg0, arg1);
}
static inline void kgraphicsSetFecsTraceRdOffset_d44104(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 rdOffset) {
return;
}
void kgraphicsSetFecsTraceRdOffset_GA100(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 rdOffset);
static inline void kgraphicsSetFecsTraceRdOffset_DISPATCH(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 rdOffset) {
arg1->__kgraphicsSetFecsTraceRdOffset__(arg0, arg1, rdOffset);
}
static inline void kgraphicsSetFecsTraceWrOffset_d44104(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 wrOffset) {
return;
}
void kgraphicsSetFecsTraceWrOffset_GA100(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 wrOffset);
static inline void kgraphicsSetFecsTraceWrOffset_DISPATCH(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 wrOffset) {
arg1->__kgraphicsSetFecsTraceWrOffset__(arg0, arg1, wrOffset);
}
static inline void kgraphicsSetFecsTraceHwEnable_d44104(OBJGPU *arg0, struct KernelGraphics *arg1, NvBool bEnable) {
return;
}
void kgraphicsSetFecsTraceHwEnable_GA100(OBJGPU *arg0, struct KernelGraphics *arg1, NvBool bEnable);
static inline void kgraphicsSetFecsTraceHwEnable_DISPATCH(OBJGPU *arg0, struct KernelGraphics *arg1, NvBool bEnable) {
arg1->__kgraphicsSetFecsTraceHwEnable__(arg0, arg1, bEnable);
}
static inline NV_STATUS kgraphicsStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelGraphics *pEngstate, NvU32 arg0) {
return pEngstate->__kgraphicsStatePreLoad__(pGpu, pEngstate, arg0);
}
@@ -523,6 +583,16 @@ static inline void kgraphicsSetCtxswLoggingSupported(OBJGPU *pGpu, struct Kernel
pKernelGraphics_PRIVATE->bCtxswLoggingSupported = bProp;
}
static inline NvBool kgraphicsIsCtxswLoggingEnabled(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics) {
struct KernelGraphics_PRIVATE *pKernelGraphics_PRIVATE = (struct KernelGraphics_PRIVATE *)pKernelGraphics;
return pKernelGraphics_PRIVATE->bCtxswLoggingEnabled;
}
static inline void kgraphicsSetCtxswLoggingEnabled(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics, NvBool bProp) {
struct KernelGraphics_PRIVATE *pKernelGraphics_PRIVATE = (struct KernelGraphics_PRIVATE *)pKernelGraphics;
pKernelGraphics_PRIVATE->bCtxswLoggingEnabled = bProp;
}
static inline NvBool kgraphicsIsIntrDrivenCtxswLoggingEnabled(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics) {
struct KernelGraphics_PRIVATE *pKernelGraphics_PRIVATE = (struct KernelGraphics_PRIVATE *)pKernelGraphics;
return pKernelGraphics_PRIVATE->bIntrDrivenCtxswLoggingEnabled;

View File

@@ -760,7 +760,7 @@ static void __nvoc_init_funcTable_KernelGsp_1(KernelGsp *pThis, RmHalspecOwner *
}
else
{
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000400UL) )) /* ChipHal: GA100 | GH100 */
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */
{
pThis->__kgspGetFrtsSize__ = &kgspGetFrtsSize_4a4dee;
}

View File

@@ -234,17 +234,19 @@ typedef struct GSP_FIRMWARE
/*!
* GSP Notify op infra. Used by UVM in HCC mode.
*/
#define GSP_NOTIFY_OP_RESERVED_OPCODE 0
#define GSP_NOTIFY_OP_RESERVED_OPCODE 0
// Request fault buffer flush.
#define GSP_NOTIFY_OP_FLUSH_REPLAYABLE_FAULT_BUFFER_OPCODE 1
#define GSP_NOTIFY_OP_FLUSH_REPLAYABLE_FAULT_BUFFER_OPCODE 1
#define GSP_NOTIFY_OP_FLUSH_REPLAYABLE_FAULT_BUFFER_VALID_ARGC 1
#define GSP_NOTIFY_OP_FLUSH_REPLAYABLE_FAULT_BUFFER_FLUSH_MODE_ARGIDX 0
// Fault on prefetch toggle.
#define GSP_NOTIFY_OP_TOGGLE_FAULT_ON_PREFETCH_OPCODE 2
#define GSP_NOTIFY_OP_TOGGLE_FAULT_ON_PREFETCH_VALID_ARGC 1
#define GSP_NOTIFY_OP_TOGGLE_FAULT_ON_PREFETCH_EN_ARGIDX 0
#define GSP_NOTIFY_OP_TOGGLE_FAULT_ON_PREFETCH_OPCODE 2
#define GSP_NOTIFY_OP_TOGGLE_FAULT_ON_PREFETCH_VALID_ARGC 1
#define GSP_NOTIFY_OP_TOGGLE_FAULT_ON_PREFETCH_EN_ARGIDX 0
// Always keep this as the last defined value
#define GSP_NOTIFY_OP_OPCODE_MAX 3
#define GSP_NOTIFY_OP_NO_ARGUMENTS 0
#define GSP_NOTIFY_OP_MAX_ARGUMENT_COUNT 1
#define GSP_NOTIFY_OP_OPCODE_MAX 3
#define GSP_NOTIFY_OP_NO_ARGUMENTS 0
#define GSP_NOTIFY_OP_MAX_ARGUMENT_COUNT 1
typedef struct NotifyOpSharedSurface
{
NvU32 inUse; // 0 - signals free, 1 - signals busy
@@ -434,6 +436,7 @@ struct KernelGsp {
NvU64 logElfDataSize;
PORT_MUTEX *pNvlogFlushMtx;
NvBool bLibosLogsPollingEnabled;
NvU8 bootAttempts;
NvBool bInInit;
NvBool bInLockdown;
NvBool bPollingForRpcResponse;
@@ -1422,6 +1425,17 @@ static inline NvU64 kgspGetFwHeapSize(struct OBJGPU *pGpu, struct KernelGsp *pKe
#define kgspGetFwHeapSize(pGpu, pKernelGsp, posteriorFbSize) kgspGetFwHeapSize_IMPL(pGpu, pKernelGsp, posteriorFbSize)
#endif //__nvoc_kernel_gsp_h_disabled
NvU64 kgspGetWprEndMargin_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp);
#ifdef __nvoc_kernel_gsp_h_disabled
static inline NvU64 kgspGetWprEndMargin(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!");
return 0;
}
#else //__nvoc_kernel_gsp_h_disabled
#define kgspGetWprEndMargin(pGpu, pKernelGsp) kgspGetWprEndMargin_IMPL(pGpu, pKernelGsp)
#endif //__nvoc_kernel_gsp_h_disabled
void kgspSetupLibosInitArgs_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp);
#ifdef __nvoc_kernel_gsp_h_disabled

View File

@@ -184,7 +184,8 @@ typedef struct
NvU32 numCreatedVgpu; // Used only on KVM
vgpu_vf_pci_info vfPciInfo[MAX_VF_COUNT_PER_GPU]; // Used only on KVM
NvU64 createdVfMask; // Used only on KVM
NvBool miniQuarterEnabled; // Used only on ESXi
NvBool miniQuarterEnabled; // Used only on ESXi (vGPU profile)
NvBool computeMediaEngineEnabled; // Used only on ESXi (vGPU profile)
/*!
* SwizzId Map. HW currently uses only 14 swizzIds. Every bit position

View File

@@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -248,7 +248,7 @@ typedef enum
NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_32 = 65U,
NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_33 = 66U,
NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_34 = 67U,
NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_35 = 68U,
NV_FB_ALLOC_RM_INTERNAL_OWNER_RUSD_BUFFER = 68U,
NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_36 = 69U,
NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_37 = 70U,
NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_38 = 71U,

View File

@@ -799,7 +799,9 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x1FF2, 0x1613, 0x103c, "NVIDIA T400 4GB" },
{ 0x1FF2, 0x8a80, 0x103c, "NVIDIA T400 4GB" },
{ 0x1FF2, 0x1613, 0x10de, "NVIDIA T400 4GB" },
{ 0x1FF2, 0x18ff, 0x10de, "NVIDIA T400E" },
{ 0x1FF2, 0x1613, 0x17aa, "NVIDIA T400 4GB" },
{ 0x1FF2, 0x18ff, 0x17aa, "NVIDIA T400E" },
{ 0x1FF9, 0x0000, 0x0000, "Quadro T1000" },
{ 0x20B0, 0x0000, 0x0000, "NVIDIA A100-SXM4-40GB" },
{ 0x20B0, 0x1450, 0x10de, "NVIDIA A100-PG509-200" },
@@ -896,12 +898,15 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2322, 0x17a4, 0x10de, "NVIDIA H800 PCIe" },
{ 0x2324, 0x17a6, 0x10de, "NVIDIA H800" },
{ 0x2324, 0x17a8, 0x10de, "NVIDIA H800" },
{ 0x2329, 0x198b, 0x10de, "NVIDIA H20" },
{ 0x2329, 0x198c, 0x10de, "NVIDIA H20" },
{ 0x2330, 0x16c0, 0x10de, "NVIDIA H100 80GB HBM3" },
{ 0x2330, 0x16c1, 0x10de, "NVIDIA H100 80GB HBM3" },
{ 0x2331, 0x1626, 0x10de, "NVIDIA H100 PCIe" },
{ 0x2339, 0x17fc, 0x10de, "NVIDIA H100" },
{ 0x233A, 0x183a, 0x10de, "NVIDIA H800 NVL" },
{ 0x2342, 0x16eb, 0x10de, "NVIDIA GH200 120GB" },
{ 0x2342, 0x1805, 0x10de, "NVIDIA GH200 120GB" },
{ 0x2342, 0x1809, 0x10de, "NVIDIA GH200 480GB" },
{ 0x2414, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" },
{ 0x2420, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Ti Laptop GPU" },
@@ -955,6 +960,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2571, 0x1611, 0x10de, "NVIDIA RTX A2000 12GB" },
{ 0x2571, 0x1611, 0x17aa, "NVIDIA RTX A2000 12GB" },
{ 0x2582, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050" },
{ 0x2584, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050" },
{ 0x25A0, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Ti Laptop GPU" },
{ 0x25A0, 0x8928, 0x103c, "NVIDIA GeForce RTX 3050Ti Laptop GPU" },
{ 0x25A0, 0x89f9, 0x103c, "NVIDIA GeForce RTX 3050Ti Laptop GPU" },
@@ -996,6 +1002,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x26B2, 0x17fa, 0x103c, "NVIDIA RTX 5000 Ada Generation" },
{ 0x26B2, 0x17fa, 0x10de, "NVIDIA RTX 5000 Ada Generation" },
{ 0x26B2, 0x17fa, 0x17aa, "NVIDIA RTX 5000 Ada Generation" },
{ 0x26B3, 0x1934, 0x1028, "NVIDIA RTX 5880 Ada Generation" },
{ 0x26B3, 0x1934, 0x103c, "NVIDIA RTX 5880 Ada Generation" },
{ 0x26B3, 0x1934, 0x10de, "NVIDIA RTX 5880 Ada Generation" },
{ 0x26B3, 0x1934, 0x17aa, "NVIDIA RTX 5880 Ada Generation" },
@@ -1004,6 +1011,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x26B9, 0x1851, 0x10de, "NVIDIA L40S" },
{ 0x26B9, 0x18cf, 0x10de, "NVIDIA L40S" },
{ 0x26BA, 0x1957, 0x10de, "NVIDIA L20" },
{ 0x2702, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080 SUPER" },
{ 0x2704, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080" },
{ 0x2705, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070 Ti SUPER" },
{ 0x2717, 0x0000, 0x0000, "NVIDIA GeForce RTX 4090 Laptop GPU" },
@@ -1041,6 +1049,10 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2882, 0x0000, 0x0000, "NVIDIA GeForce RTX 4060" },
{ 0x28A0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4060 Laptop GPU" },
{ 0x28A1, 0x0000, 0x0000, "NVIDIA GeForce RTX 4050 Laptop GPU" },
{ 0x28B0, 0x1870, 0x1028, "NVIDIA RTX 2000 Ada Generation" },
{ 0x28B0, 0x1870, 0x103c, "NVIDIA RTX 2000 Ada Generation" },
{ 0x28B0, 0x1870, 0x10de, "NVIDIA RTX 2000 Ada Generation" },
{ 0x28B0, 0x1870, 0x17aa, "NVIDIA RTX 2000 Ada Generation" },
{ 0x28B8, 0x0000, 0x0000, "NVIDIA RTX 2000 Ada Generation Laptop GPU" },
{ 0x28E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4060 Laptop GPU" },
{ 0x28E1, 0x0000, 0x0000, "NVIDIA GeForce RTX 4050 Laptop GPU" },
@@ -1059,151 +1071,6 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x13BD, 0x1286, 0x10DE, "GRID M10-2B" },
{ 0x13BD, 0x12ee, 0x10DE, "GRID M10-2B4" },
{ 0x13BD, 0x1339, 0x10DE, "GRID M10-1B4" },
{ 0x13F2, 0x114c, 0x10DE, "GRID M60-0Q" },
{ 0x13F2, 0x114d, 0x10DE, "GRID M60-1Q" },
{ 0x13F2, 0x114e, 0x10DE, "GRID M60-2Q" },
{ 0x13F2, 0x114f, 0x10DE, "GRID M60-4Q" },
{ 0x13F2, 0x1150, 0x10DE, "GRID M60-8Q" },
{ 0x13F2, 0x1176, 0x10DE, "GRID M60-0B" },
{ 0x13F2, 0x1177, 0x10DE, "GRID M60-1B" },
{ 0x13F2, 0x117d, 0x10DE, "GRID M60-2B" },
{ 0x13F2, 0x11ae, 0x10DE, "GRID M60-1A" },
{ 0x13F2, 0x11af, 0x10DE, "GRID M60-2A" },
{ 0x13F2, 0x11b0, 0x10DE, "GRID M60-4A" },
{ 0x13F2, 0x11b1, 0x10DE, "GRID M60-8A" },
{ 0x13F2, 0x12ec, 0x10DE, "GRID M60-2B4" },
{ 0x13F2, 0x1337, 0x10DE, "GRID M60-1B4" },
{ 0x13F3, 0x117c, 0x10DE, "GRID M6-2B" },
{ 0x13F3, 0x117e, 0x10DE, "GRID M6-0B" },
{ 0x13F3, 0x117f, 0x10DE, "GRID M6-1B" },
{ 0x13F3, 0x1180, 0x10DE, "GRID M6-0Q" },
{ 0x13F3, 0x1181, 0x10DE, "GRID M6-1Q" },
{ 0x13F3, 0x1182, 0x10DE, "GRID M6-2Q" },
{ 0x13F3, 0x1183, 0x10DE, "GRID M6-4Q" },
{ 0x13F3, 0x1184, 0x10DE, "GRID M6-8Q" },
{ 0x13F3, 0x11aa, 0x10DE, "GRID M6-1A" },
{ 0x13F3, 0x11ab, 0x10DE, "GRID M6-2A" },
{ 0x13F3, 0x11ac, 0x10DE, "GRID M6-4A" },
{ 0x13F3, 0x11ad, 0x10DE, "GRID M6-8A" },
{ 0x13F3, 0x12ed, 0x10DE, "GRID M6-2B4" },
{ 0x13F3, 0x1338, 0x10DE, "GRID M6-1B4" },
{ 0x15F7, 0x1265, 0x10DE, "GRID P100C-1B" },
{ 0x15F7, 0x1266, 0x10DE, "GRID P100C-1Q" },
{ 0x15F7, 0x1267, 0x10DE, "GRID P100C-2Q" },
{ 0x15F7, 0x1268, 0x10DE, "GRID P100C-4Q" },
{ 0x15F7, 0x1269, 0x10DE, "GRID P100C-6Q" },
{ 0x15F7, 0x126a, 0x10DE, "GRID P100C-12Q" },
{ 0x15F7, 0x126b, 0x10DE, "GRID P100C-1A" },
{ 0x15F7, 0x126c, 0x10DE, "GRID P100C-2A" },
{ 0x15F7, 0x126d, 0x10DE, "GRID P100C-4A" },
{ 0x15F7, 0x126e, 0x10DE, "GRID P100C-6A" },
{ 0x15F7, 0x126f, 0x10DE, "GRID P100C-12A" },
{ 0x15F7, 0x128d, 0x10DE, "GRID P100C-2B" },
{ 0x15F7, 0x12f4, 0x10DE, "GRID P100C-2B4" },
{ 0x15F7, 0x133f, 0x10DE, "GRID P100C-1B4" },
{ 0x15F7, 0x137d, 0x10DE, "GRID P100C-12C" },
{ 0x15F7, 0x138c, 0x10DE, "GRID P100C-4C" },
{ 0x15F7, 0x138d, 0x10DE, "GRID P100C-6C" },
{ 0x15F8, 0x1221, 0x10DE, "GRID P100-1B" },
{ 0x15F8, 0x1222, 0x10DE, "GRID P100-1Q" },
{ 0x15F8, 0x1223, 0x10DE, "GRID P100-2Q" },
{ 0x15F8, 0x1224, 0x10DE, "GRID P100-4Q" },
{ 0x15F8, 0x1225, 0x10DE, "GRID P100-8Q" },
{ 0x15F8, 0x1226, 0x10DE, "GRID P100-16Q" },
{ 0x15F8, 0x1227, 0x10DE, "GRID P100-1A" },
{ 0x15F8, 0x1228, 0x10DE, "GRID P100-2A" },
{ 0x15F8, 0x1229, 0x10DE, "GRID P100-4A" },
{ 0x15F8, 0x122a, 0x10DE, "GRID P100-8A" },
{ 0x15F8, 0x122b, 0x10DE, "GRID P100-16A" },
{ 0x15F8, 0x128c, 0x10DE, "GRID P100-2B" },
{ 0x15F8, 0x12f2, 0x10DE, "GRID P100-2B4" },
{ 0x15F8, 0x133d, 0x10DE, "GRID P100-1B4" },
{ 0x15F8, 0x137c, 0x10DE, "GRID P100-16C" },
{ 0x15F8, 0x138a, 0x10DE, "GRID P100-4C" },
{ 0x15F8, 0x138b, 0x10DE, "GRID P100-8C" },
{ 0x15F9, 0x122c, 0x10DE, "GRID P100X-1B" },
{ 0x15F9, 0x122d, 0x10DE, "GRID P100X-1Q" },
{ 0x15F9, 0x122e, 0x10DE, "GRID P100X-2Q" },
{ 0x15F9, 0x122f, 0x10DE, "GRID P100X-4Q" },
{ 0x15F9, 0x1230, 0x10DE, "GRID P100X-8Q" },
{ 0x15F9, 0x1231, 0x10DE, "GRID P100X-16Q" },
{ 0x15F9, 0x1232, 0x10DE, "GRID P100X-1A" },
{ 0x15F9, 0x1233, 0x10DE, "GRID P100X-2A" },
{ 0x15F9, 0x1234, 0x10DE, "GRID P100X-4A" },
{ 0x15F9, 0x1235, 0x10DE, "GRID P100X-8A" },
{ 0x15F9, 0x1236, 0x10DE, "GRID P100X-16A" },
{ 0x15F9, 0x128b, 0x10DE, "GRID P100X-2B" },
{ 0x15F9, 0x12f3, 0x10DE, "GRID P100X-2B4" },
{ 0x15F9, 0x133e, 0x10DE, "GRID P100X-1B4" },
{ 0x15F9, 0x137b, 0x10DE, "GRID P100X-16C" },
{ 0x15F9, 0x1388, 0x10DE, "GRID P100X-4C" },
{ 0x15F9, 0x1389, 0x10DE, "GRID P100X-8C" },
{ 0x1B38, 0x11e7, 0x10DE, "GRID P40-1B" },
{ 0x1B38, 0x11e8, 0x10DE, "GRID P40-1Q" },
{ 0x1B38, 0x11e9, 0x10DE, "GRID P40-2Q" },
{ 0x1B38, 0x11ea, 0x10DE, "GRID P40-3Q" },
{ 0x1B38, 0x11eb, 0x10DE, "GRID P40-4Q" },
{ 0x1B38, 0x11ec, 0x10DE, "GRID P40-6Q" },
{ 0x1B38, 0x11ed, 0x10DE, "GRID P40-8Q" },
{ 0x1B38, 0x11ee, 0x10DE, "GRID P40-12Q" },
{ 0x1B38, 0x11ef, 0x10DE, "GRID P40-24Q" },
{ 0x1B38, 0x11f0, 0x10DE, "GRID P40-1A" },
{ 0x1B38, 0x11f1, 0x10DE, "GRID P40-2A" },
{ 0x1B38, 0x11f2, 0x10DE, "GRID P40-3A" },
{ 0x1B38, 0x11f3, 0x10DE, "GRID P40-4A" },
{ 0x1B38, 0x11f4, 0x10DE, "GRID P40-6A" },
{ 0x1B38, 0x11f5, 0x10DE, "GRID P40-8A" },
{ 0x1B38, 0x11f6, 0x10DE, "GRID P40-12A" },
{ 0x1B38, 0x11f7, 0x10DE, "GRID P40-24A" },
{ 0x1B38, 0x1287, 0x10DE, "GRID P40-2B" },
{ 0x1B38, 0x12b1, 0x10DE, "GeForce GTX P40-24" },
{ 0x1B38, 0x12b2, 0x10DE, "GeForce GTX P40-12" },
{ 0x1B38, 0x12b3, 0x10DE, "GeForce GTX P40-6" },
{ 0x1B38, 0x12ef, 0x10DE, "GRID P40-2B4" },
{ 0x1B38, 0x133a, 0x10DE, "GRID P40-1B4" },
{ 0x1B38, 0x137e, 0x10DE, "GRID P40-24C" },
{ 0x1B38, 0x1381, 0x10DE, "GRID P40-4C" },
{ 0x1B38, 0x1382, 0x10DE, "GRID P40-6C" },
{ 0x1B38, 0x1383, 0x10DE, "GRID P40-8C" },
{ 0x1B38, 0x1384, 0x10DE, "GRID P40-12C" },
{ 0x1B38, 0x13b0, 0x10DE, "GRID GTX P40-6" },
{ 0x1B38, 0x13b1, 0x10DE, "GRID GTX P40-12" },
{ 0x1B38, 0x13b2, 0x10DE, "GRID GTX P40-24" },
{ 0x1B38, 0x13d0, 0x10DE, "GRID GTX P40-8" },
{ 0x1BB3, 0x1203, 0x10DE, "GRID P4-1B" },
{ 0x1BB3, 0x1204, 0x10DE, "GRID P4-1Q" },
{ 0x1BB3, 0x1205, 0x10DE, "GRID P4-2Q" },
{ 0x1BB3, 0x1206, 0x10DE, "GRID P4-4Q" },
{ 0x1BB3, 0x1207, 0x10DE, "GRID P4-8Q" },
{ 0x1BB3, 0x1208, 0x10DE, "GRID P4-1A" },
{ 0x1BB3, 0x1209, 0x10DE, "GRID P4-2A" },
{ 0x1BB3, 0x120a, 0x10DE, "GRID P4-4A" },
{ 0x1BB3, 0x120b, 0x10DE, "GRID P4-8A" },
{ 0x1BB3, 0x1288, 0x10DE, "GRID P4-2B" },
{ 0x1BB3, 0x12f1, 0x10DE, "GRID P4-2B4" },
{ 0x1BB3, 0x133c, 0x10DE, "GRID P4-1B4" },
{ 0x1BB3, 0x136d, 0x10DE, "GRID GTX P4-2" },
{ 0x1BB3, 0x136e, 0x10DE, "GRID GTX P4-4" },
{ 0x1BB3, 0x136f, 0x10DE, "GRID GTX P4-8" },
{ 0x1BB3, 0x1380, 0x10DE, "GRID P4-8C" },
{ 0x1BB3, 0x1385, 0x10DE, "GRID P4-4C" },
{ 0x1BB4, 0x11f8, 0x10DE, "GRID P6-1B" },
{ 0x1BB4, 0x11f9, 0x10DE, "GRID P6-1Q" },
{ 0x1BB4, 0x11fa, 0x10DE, "GRID P6-2Q" },
{ 0x1BB4, 0x11fb, 0x10DE, "GRID P6-4Q" },
{ 0x1BB4, 0x11fc, 0x10DE, "GRID P6-8Q" },
{ 0x1BB4, 0x11fd, 0x10DE, "GRID P6-16Q" },
{ 0x1BB4, 0x11fe, 0x10DE, "GRID P6-1A" },
{ 0x1BB4, 0x11ff, 0x10DE, "GRID P6-2A" },
{ 0x1BB4, 0x1200, 0x10DE, "GRID P6-4A" },
{ 0x1BB4, 0x1201, 0x10DE, "GRID P6-8A" },
{ 0x1BB4, 0x1202, 0x10DE, "GRID P6-16A" },
{ 0x1BB4, 0x1289, 0x10DE, "GRID P6-2B" },
{ 0x1BB4, 0x12f0, 0x10DE, "GRID P6-2B4" },
{ 0x1BB4, 0x133b, 0x10DE, "GRID P6-1B4" },
{ 0x1BB4, 0x137f, 0x10DE, "GRID P6-16C" },
{ 0x1BB4, 0x1386, 0x10DE, "GRID P6-4C" },
{ 0x1BB4, 0x1387, 0x10DE, "GRID P6-8C" },
{ 0x1DB1, 0x1259, 0x10DE, "GRID V100X-1B" },
{ 0x1DB1, 0x125a, 0x10DE, "GRID V100X-1Q" },
{ 0x1DB1, 0x125b, 0x10DE, "GRID V100X-2Q" },
@@ -1828,6 +1695,20 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2324, 0x18e1, 0x10DE, "NVIDIA H800XM-20C" },
{ 0x2324, 0x18e2, 0x10DE, "NVIDIA H800XM-40C" },
{ 0x2324, 0x18e3, 0x10DE, "NVIDIA H800XM-80C" },
{ 0x2329, 0x2028, 0x10DE, "NVIDIA H20-1-12CME" },
{ 0x2329, 0x2029, 0x10DE, "NVIDIA H20-1-12C" },
{ 0x2329, 0x202a, 0x10DE, "NVIDIA H20-1-24C" },
{ 0x2329, 0x202b, 0x10DE, "NVIDIA H20-2-24C" },
{ 0x2329, 0x202c, 0x10DE, "NVIDIA H20-3-48C" },
{ 0x2329, 0x202d, 0x10DE, "NVIDIA H20-4-48C" },
{ 0x2329, 0x202e, 0x10DE, "NVIDIA H20-7-96C" },
{ 0x2329, 0x202f, 0x10DE, "NVIDIA H20-4C" },
{ 0x2329, 0x2030, 0x10DE, "NVIDIA H20-6C" },
{ 0x2329, 0x2031, 0x10DE, "NVIDIA H20-12C" },
{ 0x2329, 0x2032, 0x10DE, "NVIDIA H20-16C" },
{ 0x2329, 0x2033, 0x10DE, "NVIDIA H20-24C" },
{ 0x2329, 0x2034, 0x10DE, "NVIDIA H20-48C" },
{ 0x2329, 0x2035, 0x10DE, "NVIDIA H20-96C" },
{ 0x2330, 0x187a, 0x10DE, "NVIDIA H100XM-1-10CME" },
{ 0x2330, 0x187b, 0x10DE, "NVIDIA H100XM-1-10C" },
{ 0x2330, 0x187c, 0x10DE, "NVIDIA H100XM-1-20C" },
@@ -1898,14 +1779,14 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x233A, 0x186c, 0x10DE, "NVIDIA H800L-23C" },
{ 0x233A, 0x186d, 0x10DE, "NVIDIA H800L-47C" },
{ 0x233A, 0x186e, 0x10DE, "NVIDIA H800L-94C" },
{ 0x2342, 0x18c2, 0x10DE, "NVIDIA H100GL-1-12CME" },
{ 0x2342, 0x18c3, 0x10DE, "NVIDIA H100GL-1-12C" },
{ 0x2342, 0x18c4, 0x10DE, "NVIDIA H100GL-1-24C" },
{ 0x2342, 0x18c5, 0x10DE, "NVIDIA H100GL-2-24C" },
{ 0x2342, 0x18c6, 0x10DE, "NVIDIA H100GL-3-48C" },
{ 0x2342, 0x18c7, 0x10DE, "NVIDIA H100GL-4-48C" },
{ 0x2342, 0x18c8, 0x10DE, "NVIDIA H100GL-7-96C" },
{ 0x2342, 0x18c9, 0x10DE, "NVIDIA H100GL-96C" },
{ 0x2342, 0x18c2, 0x10DE, "NVIDIA GH200-1-12CME" },
{ 0x2342, 0x18c3, 0x10DE, "NVIDIA GH200-1-12C" },
{ 0x2342, 0x18c4, 0x10DE, "NVIDIA GH200-1-24C" },
{ 0x2342, 0x18c5, 0x10DE, "NVIDIA GH200-2-24C" },
{ 0x2342, 0x18c6, 0x10DE, "NVIDIA GH200-3-48C" },
{ 0x2342, 0x18c7, 0x10DE, "NVIDIA GH200-4-48C" },
{ 0x2342, 0x18c8, 0x10DE, "NVIDIA GH200-7-96C" },
{ 0x2342, 0x18c9, 0x10DE, "NVIDIA GH200-96C" },
{ 0x25B6, 0x159d, 0x10DE, "NVIDIA A16-1B" },
{ 0x25B6, 0x159e, 0x10DE, "NVIDIA A16-2B" },
{ 0x25B6, 0x159f, 0x10DE, "NVIDIA A16-1Q" },
@@ -2002,6 +1883,45 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x26B2, 0x1836, 0x10DE, "NVIDIA RTX5000-Ada-8C" },
{ 0x26B2, 0x1837, 0x10DE, "NVIDIA RTX5000-Ada-16C" },
{ 0x26B2, 0x1838, 0x10DE, "NVIDIA RTX5000-Ada-32C" },
{ 0x26B3, 0x1958, 0x10DE, "NVIDIA RTX 5880-Ada-1B" },
{ 0x26B3, 0x1959, 0x10DE, "NVIDIA RTX 5880-Ada-2B" },
{ 0x26B3, 0x195a, 0x10DE, "NVIDIA RTX 5880-Ada-1Q" },
{ 0x26B3, 0x195b, 0x10DE, "NVIDIA RTX 5880-Ada-2Q" },
{ 0x26B3, 0x195c, 0x10DE, "NVIDIA RTX 5880-Ada-3Q" },
{ 0x26B3, 0x195d, 0x10DE, "NVIDIA RTX 5880-Ada-4Q" },
{ 0x26B3, 0x195e, 0x10DE, "NVIDIA RTX 5880-Ada-6Q" },
{ 0x26B3, 0x195f, 0x10DE, "NVIDIA RTX 5880-Ada-8Q" },
{ 0x26B3, 0x1960, 0x10DE, "NVIDIA RTX 5880-Ada-12Q" },
{ 0x26B3, 0x1961, 0x10DE, "NVIDIA RTX 5880-Ada-16Q" },
{ 0x26B3, 0x1962, 0x10DE, "NVIDIA RTX 5880-Ada-24Q" },
{ 0x26B3, 0x1963, 0x10DE, "NVIDIA RTX 5880-Ada-48Q" },
{ 0x26B3, 0x1964, 0x10DE, "NVIDIA RTX 5880-Ada-1A" },
{ 0x26B3, 0x1965, 0x10DE, "NVIDIA RTX 5880-Ada-2A" },
{ 0x26B3, 0x1966, 0x10DE, "NVIDIA RTX 5880-Ada-3A" },
{ 0x26B3, 0x1967, 0x10DE, "NVIDIA RTX 5880-Ada-4A" },
{ 0x26B3, 0x1968, 0x10DE, "NVIDIA RTX 5880-Ada-6A" },
{ 0x26B3, 0x1969, 0x10DE, "NVIDIA RTX 5880-Ada-8A" },
{ 0x26B3, 0x196a, 0x10DE, "NVIDIA RTX 5880-Ada-12A" },
{ 0x26B3, 0x196b, 0x10DE, "NVIDIA RTX 5880-Ada-16A" },
{ 0x26B3, 0x196c, 0x10DE, "NVIDIA RTX 5880-Ada-24A" },
{ 0x26B3, 0x196d, 0x10DE, "NVIDIA RTX 5880-Ada-48A" },
{ 0x26B3, 0x196e, 0x10DE, "NVIDIA RTX 5880-Ada-1" },
{ 0x26B3, 0x196f, 0x10DE, "NVIDIA RTX 5880-Ada-2" },
{ 0x26B3, 0x1970, 0x10DE, "NVIDIA RTX 5880-Ada-3" },
{ 0x26B3, 0x1971, 0x10DE, "NVIDIA RTX 5880-Ada-4" },
{ 0x26B3, 0x1972, 0x10DE, "NVIDIA RTX 5880-Ada-6" },
{ 0x26B3, 0x1973, 0x10DE, "NVIDIA RTX 5880-Ada-8" },
{ 0x26B3, 0x1974, 0x10DE, "NVIDIA RTX 5880-Ada-12" },
{ 0x26B3, 0x1975, 0x10DE, "NVIDIA RTX 5880-Ada-16" },
{ 0x26B3, 0x1976, 0x10DE, "NVIDIA RTX 5880-Ada-24" },
{ 0x26B3, 0x1977, 0x10DE, "NVIDIA RTX 5880-Ada-48" },
{ 0x26B3, 0x1978, 0x10DE, "NVIDIA RTX 5880-Ada-4C" },
{ 0x26B3, 0x1979, 0x10DE, "NVIDIA RTX 5880-Ada-6C" },
{ 0x26B3, 0x197a, 0x10DE, "NVIDIA RTX 5880-Ada-8C" },
{ 0x26B3, 0x197b, 0x10DE, "NVIDIA RTX 5880-Ada-12C" },
{ 0x26B3, 0x197c, 0x10DE, "NVIDIA RTX 5880-Ada-16C" },
{ 0x26B3, 0x197d, 0x10DE, "NVIDIA RTX 5880-Ada-24C" },
{ 0x26B3, 0x197e, 0x10DE, "NVIDIA RTX 5880-Ada-48C" },
{ 0x26B5, 0x176d, 0x10DE, "NVIDIA L40-1B" },
{ 0x26B5, 0x176e, 0x10DE, "NVIDIA L40-2B" },
{ 0x26B5, 0x176f, 0x10DE, "NVIDIA L40-1Q" },
@@ -2117,6 +2037,78 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x26B9, 0x18af, 0x10DE, "NVIDIA L40S-16C" },
{ 0x26B9, 0x18b0, 0x10DE, "NVIDIA L40S-24C" },
{ 0x26B9, 0x18b1, 0x10DE, "NVIDIA L40S-48C" },
{ 0x26BA, 0x1909, 0x10DE, "NVIDIA L20-1B" },
{ 0x26BA, 0x190a, 0x10DE, "NVIDIA L20-2B" },
{ 0x26BA, 0x190b, 0x10DE, "NVIDIA L20-1Q" },
{ 0x26BA, 0x190c, 0x10DE, "NVIDIA L20-2Q" },
{ 0x26BA, 0x190d, 0x10DE, "NVIDIA L20-3Q" },
{ 0x26BA, 0x190e, 0x10DE, "NVIDIA L20-4Q" },
{ 0x26BA, 0x190f, 0x10DE, "NVIDIA L20-6Q" },
{ 0x26BA, 0x1910, 0x10DE, "NVIDIA L20-8Q" },
{ 0x26BA, 0x1911, 0x10DE, "NVIDIA L20-12Q" },
{ 0x26BA, 0x1912, 0x10DE, "NVIDIA L20-16Q" },
{ 0x26BA, 0x1913, 0x10DE, "NVIDIA L20-24Q" },
{ 0x26BA, 0x1914, 0x10DE, "NVIDIA L20-48Q" },
{ 0x26BA, 0x1915, 0x10DE, "NVIDIA L20-1A" },
{ 0x26BA, 0x1916, 0x10DE, "NVIDIA L20-2A" },
{ 0x26BA, 0x1917, 0x10DE, "NVIDIA L20-3A" },
{ 0x26BA, 0x1918, 0x10DE, "NVIDIA L20-4A" },
{ 0x26BA, 0x1919, 0x10DE, "NVIDIA L20-6A" },
{ 0x26BA, 0x191a, 0x10DE, "NVIDIA L20-8A" },
{ 0x26BA, 0x191b, 0x10DE, "NVIDIA L20-12A" },
{ 0x26BA, 0x191c, 0x10DE, "NVIDIA L20-16A" },
{ 0x26BA, 0x191d, 0x10DE, "NVIDIA L20-24A" },
{ 0x26BA, 0x191e, 0x10DE, "NVIDIA L20-48A" },
{ 0x26BA, 0x191f, 0x10DE, "NVIDIA GeForce RTX 3050" },
{ 0x26BA, 0x1920, 0x10DE, "NVIDIA GeForce RTX 3060" },
{ 0x26BA, 0x1921, 0x10DE, "NVIDIA L20-1" },
{ 0x26BA, 0x1922, 0x10DE, "NVIDIA L20-2" },
{ 0x26BA, 0x1923, 0x10DE, "NVIDIA L20-3" },
{ 0x26BA, 0x1924, 0x10DE, "NVIDIA L20-4" },
{ 0x26BA, 0x1925, 0x10DE, "NVIDIA L20-6" },
{ 0x26BA, 0x1926, 0x10DE, "NVIDIA L20-8" },
{ 0x26BA, 0x1927, 0x10DE, "NVIDIA L20-12" },
{ 0x26BA, 0x1928, 0x10DE, "NVIDIA L20-16" },
{ 0x26BA, 0x1929, 0x10DE, "NVIDIA L20-24" },
{ 0x26BA, 0x192a, 0x10DE, "NVIDIA L20-48" },
{ 0x26BA, 0x192b, 0x10DE, "NVIDIA L20-4C" },
{ 0x26BA, 0x192c, 0x10DE, "NVIDIA L20-6C" },
{ 0x26BA, 0x192d, 0x10DE, "NVIDIA L20-8C" },
{ 0x26BA, 0x192e, 0x10DE, "NVIDIA L20-12C" },
{ 0x26BA, 0x192f, 0x10DE, "NVIDIA L20-16C" },
{ 0x26BA, 0x1930, 0x10DE, "NVIDIA L20-24C" },
{ 0x26BA, 0x1931, 0x10DE, "NVIDIA L20-48C" },
{ 0x27B6, 0x1938, 0x10DE, "NVIDIA L2-1B" },
{ 0x27B6, 0x1939, 0x10DE, "NVIDIA L2-2B" },
{ 0x27B6, 0x193a, 0x10DE, "NVIDIA L2-1Q" },
{ 0x27B6, 0x193b, 0x10DE, "NVIDIA L2-2Q" },
{ 0x27B6, 0x193c, 0x10DE, "NVIDIA L2-3Q" },
{ 0x27B6, 0x193d, 0x10DE, "NVIDIA L2-4Q" },
{ 0x27B6, 0x193e, 0x10DE, "NVIDIA L2-6Q" },
{ 0x27B6, 0x193f, 0x10DE, "NVIDIA L2-8Q" },
{ 0x27B6, 0x1940, 0x10DE, "NVIDIA L2-12Q" },
{ 0x27B6, 0x1941, 0x10DE, "NVIDIA L2-24Q" },
{ 0x27B6, 0x1942, 0x10DE, "NVIDIA L2-1A" },
{ 0x27B6, 0x1943, 0x10DE, "NVIDIA L2-2A" },
{ 0x27B6, 0x1944, 0x10DE, "NVIDIA L2-3A" },
{ 0x27B6, 0x1945, 0x10DE, "NVIDIA L2-4A" },
{ 0x27B6, 0x1946, 0x10DE, "NVIDIA L2-6A" },
{ 0x27B6, 0x1947, 0x10DE, "NVIDIA L2-8A" },
{ 0x27B6, 0x1948, 0x10DE, "NVIDIA L2-12A" },
{ 0x27B6, 0x1949, 0x10DE, "NVIDIA L2-24A" },
{ 0x27B6, 0x194a, 0x10DE, "NVIDIA L2-1" },
{ 0x27B6, 0x194b, 0x10DE, "NVIDIA L2-2" },
{ 0x27B6, 0x194c, 0x10DE, "NVIDIA L2-3" },
{ 0x27B6, 0x194d, 0x10DE, "NVIDIA L2-4" },
{ 0x27B6, 0x194e, 0x10DE, "NVIDIA L2-6" },
{ 0x27B6, 0x194f, 0x10DE, "NVIDIA L2-8" },
{ 0x27B6, 0x1950, 0x10DE, "NVIDIA L2-12" },
{ 0x27B6, 0x1951, 0x10DE, "NVIDIA L2-24" },
{ 0x27B6, 0x1952, 0x10DE, "NVIDIA L2-4C" },
{ 0x27B6, 0x1953, 0x10DE, "NVIDIA L2-6C" },
{ 0x27B6, 0x1954, 0x10DE, "NVIDIA L2-8C" },
{ 0x27B6, 0x1955, 0x10DE, "NVIDIA L2-12C" },
{ 0x27B6, 0x1956, 0x10DE, "NVIDIA L2-24C" },
{ 0x27B8, 0x172f, 0x10DE, "NVIDIA L4-1B" },
{ 0x27B8, 0x1730, 0x10DE, "NVIDIA L4-2B" },
{ 0x27B8, 0x1731, 0x10DE, "NVIDIA L4-1Q" },

View File

@@ -800,6 +800,10 @@ NV_STATUS osRmCapRegisterSmcExecutionPartition(
NvU32 execPartitionId);
NV_STATUS osRmCapRegisterSys(OS_RM_CAPS **ppOsRmCaps);
NvBool osImexChannelIsSupported(void);
NvS32 osImexChannelGet(NvU64 descriptor);
NvS32 osImexChannelCount(void);
NV_STATUS osGetRandomBytes(NvU8 *pBytes, NvU16 numBytes);
NV_STATUS osAllocWaitQueue(OS_WAIT_QUEUE **ppWq);
@@ -813,6 +817,8 @@ NvU32 osGetDynamicPowerSupportMask(void);
void osUnrefGpuAccessNeeded(OS_GPU_INFO *pOsGpuInfo);
NV_STATUS osRefGpuAccessNeeded(OS_GPU_INFO *pOsGpuInfo);
NvU32 osGetGridCspSupport(void);
NV_STATUS osIovaMap(PIOVAMAPPING pIovaMapping);
void osIovaUnmap(PIOVAMAPPING pIovaMapping);
NV_STATUS osGetAtsTargetAddressRange(OBJGPU *pGpu,

View File

@@ -56,7 +56,16 @@ typedef struct rpc_set_guest_system_info_ext_v15_02
NvU16 device;
} rpc_set_guest_system_info_ext_v15_02;
typedef rpc_set_guest_system_info_ext_v15_02 rpc_set_guest_system_info_ext_v;
typedef struct rpc_set_guest_system_info_ext_v25_1B
{
char guestDriverBranch[0x100];
NvU32 domain;
NvU16 bus;
NvU16 device;
NvU32 gridBuildCsp;
} rpc_set_guest_system_info_ext_v25_1B;
typedef rpc_set_guest_system_info_ext_v25_1B rpc_set_guest_system_info_ext_v;
typedef struct rpc_alloc_root_v07_00
{
@@ -224,7 +233,19 @@ typedef struct rpc_rm_api_control_v25_18
NvP64 rm_api_params NV_ALIGN_BYTES(8);
} rpc_rm_api_control_v25_18;
typedef rpc_rm_api_control_v25_18 rpc_rm_api_control_v;
typedef struct rpc_rm_api_control_v25_19
{
NVOS54_PARAMETERS_v03_00 params;
NvP64 rm_api_params NV_ALIGN_BYTES(8);
} rpc_rm_api_control_v25_19;
typedef struct rpc_rm_api_control_v25_1A
{
NVOS54_PARAMETERS_v03_00 params;
NvP64 rm_api_params NV_ALIGN_BYTES(8);
} rpc_rm_api_control_v25_1A;
typedef rpc_rm_api_control_v25_1A rpc_rm_api_control_v;
typedef struct rpc_alloc_share_device_v03_00
{
@@ -361,6 +382,13 @@ typedef struct rpc_vgpu_pf_reg_read32_v15_00
typedef rpc_vgpu_pf_reg_read32_v15_00 rpc_vgpu_pf_reg_read32_v;
typedef struct rpc_ctrl_set_vgpu_fb_usage_v1A_08
{
NVA080_CTRL_SET_FB_USAGE_PARAMS_v07_02 setFbUsage;
} rpc_ctrl_set_vgpu_fb_usage_v1A_08;
typedef rpc_ctrl_set_vgpu_fb_usage_v1A_08 rpc_ctrl_set_vgpu_fb_usage_v;
typedef struct rpc_ctrl_nvenc_sw_session_update_info_v1A_09
{
NvHandle hClient;
@@ -1604,6 +1632,58 @@ static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_set_guest_system_info_v03_00 = {
};
#endif
#ifndef SKIP_PRINT_rpc_set_guest_system_info_ext_v25_1B
static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_set_guest_system_info_ext_v25_1B[] = {
{
.vtype = vtype_char_array,
.offset = NV_OFFSETOF(rpc_set_guest_system_info_ext_v25_1B, guestDriverBranch),
.array_length = 0x100,
#if (defined(DEBUG) || defined(DEVELOP))
.name = "guestDriverBranch"
#endif
},
{
.vtype = vtype_NvU32,
.offset = NV_OFFSETOF(rpc_set_guest_system_info_ext_v25_1B, domain),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "domain"
#endif
},
{
.vtype = vtype_NvU16,
.offset = NV_OFFSETOF(rpc_set_guest_system_info_ext_v25_1B, bus),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "bus"
#endif
},
{
.vtype = vtype_NvU16,
.offset = NV_OFFSETOF(rpc_set_guest_system_info_ext_v25_1B, device),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "device"
#endif
},
{
.vtype = vtype_NvU32,
.offset = NV_OFFSETOF(rpc_set_guest_system_info_ext_v25_1B, gridBuildCsp),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "gridBuildCsp"
#endif
},
{
.vtype = vt_end
}
};
static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_set_guest_system_info_ext_v25_1B = {
#if (defined(DEBUG) || defined(DEVELOP))
.name = "rpc_set_guest_system_info_ext",
#endif
.header_length = sizeof(rpc_set_guest_system_info_ext_v25_1B),
.fdesc = vmiopd_fdesc_t_rpc_set_guest_system_info_ext_v25_1B
};
#endif
#ifndef SKIP_PRINT_rpc_set_guest_system_info_ext_v15_02
static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_set_guest_system_info_ext_v15_02[] = {
{
@@ -2168,6 +2248,36 @@ static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_alloc_event_v03_00 = {
};
#endif
#ifndef SKIP_PRINT_rpc_rm_api_control_v25_19
static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_rm_api_control_v25_19[] = {
{
.vtype = vtype_NVOS54_PARAMETERS_v03_00,
.offset = NV_OFFSETOF(rpc_rm_api_control_v25_19, params),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "params"
#endif
},
{
.vtype = vtype_NvP64,
.offset = NV_OFFSETOF(rpc_rm_api_control_v25_19, rm_api_params),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "rm_api_params"
#endif
},
{
.vtype = vt_end
}
};
static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_rm_api_control_v25_19 = {
#if (defined(DEBUG) || defined(DEVELOP))
.name = "rpc_rm_api_control",
#endif
.header_length = sizeof(rpc_rm_api_control_v25_19),
.fdesc = vmiopd_fdesc_t_rpc_rm_api_control_v25_19
};
#endif
#ifndef SKIP_PRINT_rpc_rm_api_control_v25_0F
static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_rm_api_control_v25_0F[] = {
{
@@ -2378,6 +2488,36 @@ static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_rm_api_control_v25_18 = {
};
#endif
#ifndef SKIP_PRINT_rpc_rm_api_control_v25_1A
static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_rm_api_control_v25_1A[] = {
{
.vtype = vtype_NVOS54_PARAMETERS_v03_00,
.offset = NV_OFFSETOF(rpc_rm_api_control_v25_1A, params),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "params"
#endif
},
{
.vtype = vtype_NvP64,
.offset = NV_OFFSETOF(rpc_rm_api_control_v25_1A, rm_api_params),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "rm_api_params"
#endif
},
{
.vtype = vt_end
}
};
static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_rm_api_control_v25_1A = {
#if (defined(DEBUG) || defined(DEVELOP))
.name = "rpc_rm_api_control",
#endif
.header_length = sizeof(rpc_rm_api_control_v25_1A),
.fdesc = vmiopd_fdesc_t_rpc_rm_api_control_v25_1A
};
#endif
#ifndef SKIP_PRINT_rpc_rm_api_control_v25_14
static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_rm_api_control_v25_14[] = {
{
@@ -2982,6 +3122,29 @@ static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_vgpu_pf_reg_read32_v15_00 = {
};
#endif
#ifndef SKIP_PRINT_rpc_ctrl_set_vgpu_fb_usage_v1A_08
static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_ctrl_set_vgpu_fb_usage_v1A_08[] = {
{
.vtype = vtype_NVA080_CTRL_SET_FB_USAGE_PARAMS_v07_02,
.offset = NV_OFFSETOF(rpc_ctrl_set_vgpu_fb_usage_v1A_08, setFbUsage),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "setFbUsage"
#endif
},
{
.vtype = vt_end
}
};
static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_ctrl_set_vgpu_fb_usage_v1A_08 = {
#if (defined(DEBUG) || defined(DEVELOP))
.name = "rpc_ctrl_set_vgpu_fb_usage",
#endif
.header_length = sizeof(rpc_ctrl_set_vgpu_fb_usage_v1A_08),
.fdesc = vmiopd_fdesc_t_rpc_ctrl_set_vgpu_fb_usage_v1A_08
};
#endif
#ifndef SKIP_PRINT_rpc_ctrl_nvenc_sw_session_update_info_v1A_09
static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_ctrl_nvenc_sw_session_update_info_v1A_09[] = {
{
@@ -7708,6 +7871,13 @@ vmiopd_mdesc_t *rpcdebugSetGuestSystemInfo_v03_00(void)
}
#endif
#ifndef SKIP_PRINT_rpc_set_guest_system_info_ext_v25_1B
vmiopd_mdesc_t *rpcdebugSetGuestSystemInfoExt_v25_1B(void)
{
return &vmiopd_mdesc_t_rpc_set_guest_system_info_ext_v25_1B;
}
#endif
#ifndef SKIP_PRINT_rpc_set_guest_system_info_ext_v15_02
vmiopd_mdesc_t *rpcdebugSetGuestSystemInfoExt_v15_02(void)
{
@@ -7799,6 +7969,13 @@ vmiopd_mdesc_t *rpcdebugAllocEvent_v03_00(void)
}
#endif
#ifndef SKIP_PRINT_rpc_rm_api_control_v25_19
vmiopd_mdesc_t *rpcdebugRmApiControl_v25_19(void)
{
return &vmiopd_mdesc_t_rpc_rm_api_control_v25_19;
}
#endif
#ifndef SKIP_PRINT_rpc_rm_api_control_v25_0F
vmiopd_mdesc_t *rpcdebugRmApiControl_v25_0F(void)
{
@@ -7848,6 +8025,13 @@ vmiopd_mdesc_t *rpcdebugRmApiControl_v25_18(void)
}
#endif
#ifndef SKIP_PRINT_rpc_rm_api_control_v25_1A
vmiopd_mdesc_t *rpcdebugRmApiControl_v25_1A(void)
{
return &vmiopd_mdesc_t_rpc_rm_api_control_v25_1A;
}
#endif
#ifndef SKIP_PRINT_rpc_rm_api_control_v25_14
vmiopd_mdesc_t *rpcdebugRmApiControl_v25_14(void)
{
@@ -7967,6 +8151,13 @@ vmiopd_mdesc_t *rpcdebugVgpuPfRegRead32_v15_00(void)
}
#endif
#ifndef SKIP_PRINT_rpc_ctrl_set_vgpu_fb_usage_v1A_08
vmiopd_mdesc_t *rpcdebugCtrlSetVgpuFbUsage_v1A_08(void)
{
return &vmiopd_mdesc_t_rpc_ctrl_set_vgpu_fb_usage_v1A_08;
}
#endif
#ifndef SKIP_PRINT_rpc_ctrl_nvenc_sw_session_update_info_v1A_09
vmiopd_mdesc_t *rpcdebugCtrlNvencSwSessionUpdateInfo_v1A_09(void)
{
@@ -8878,6 +9069,7 @@ vmiopd_mdesc_t *rpcdebugCtrlNvlinkGetInbandReceivedData_v25_0C(void)
typedef union rpc_generic_union {
rpc_set_guest_system_info_v03_00 set_guest_system_info_v03_00;
rpc_set_guest_system_info_v set_guest_system_info_v;
rpc_set_guest_system_info_ext_v25_1B set_guest_system_info_ext_v25_1B;
rpc_set_guest_system_info_ext_v15_02 set_guest_system_info_ext_v15_02;
rpc_set_guest_system_info_ext_v set_guest_system_info_ext_v;
rpc_alloc_root_v07_00 alloc_root_v07_00;
@@ -8904,6 +9096,7 @@ typedef union rpc_generic_union {
rpc_idle_channels_v idle_channels_v;
rpc_alloc_event_v03_00 alloc_event_v03_00;
rpc_alloc_event_v alloc_event_v;
rpc_rm_api_control_v25_19 rm_api_control_v25_19;
rpc_rm_api_control_v25_0F rm_api_control_v25_0F;
rpc_rm_api_control_v25_16 rm_api_control_v25_16;
rpc_rm_api_control_v25_10 rm_api_control_v25_10;
@@ -8911,6 +9104,7 @@ typedef union rpc_generic_union {
rpc_rm_api_control_v25_0D rm_api_control_v25_0D;
rpc_rm_api_control_v25_17 rm_api_control_v25_17;
rpc_rm_api_control_v25_18 rm_api_control_v25_18;
rpc_rm_api_control_v25_1A rm_api_control_v25_1A;
rpc_rm_api_control_v25_14 rm_api_control_v25_14;
rpc_rm_api_control_v rm_api_control_v;
rpc_alloc_share_device_v03_00 alloc_share_device_v03_00;
@@ -8943,6 +9137,8 @@ typedef union rpc_generic_union {
rpc_get_encoder_capacity_v get_encoder_capacity_v;
rpc_vgpu_pf_reg_read32_v15_00 vgpu_pf_reg_read32_v15_00;
rpc_vgpu_pf_reg_read32_v vgpu_pf_reg_read32_v;
rpc_ctrl_set_vgpu_fb_usage_v1A_08 ctrl_set_vgpu_fb_usage_v1A_08;
rpc_ctrl_set_vgpu_fb_usage_v ctrl_set_vgpu_fb_usage_v;
rpc_ctrl_nvenc_sw_session_update_info_v1A_09 ctrl_nvenc_sw_session_update_info_v1A_09;
rpc_ctrl_nvenc_sw_session_update_info_v ctrl_nvenc_sw_session_update_info_v;
rpc_ctrl_reset_channel_v1A_09 ctrl_reset_channel_v1A_09;

View File

@@ -209,6 +209,7 @@ typedef NV_STATUS RpcUpdateBarPde(POBJGPU, POBJRPC, NV_RPC_UPDATE_PDE_BAR_T
typedef NV_STATUS RpcCtrlBindPmResources(POBJGPU, POBJRPC, NvHandle, NvHandle);
typedef NV_STATUS RpcMapMemoryDma(POBJGPU, POBJRPC, NvHandle, NvHandle, NvHandle,
NvHandle, NvU64, NvU64, NvU32, NvU64*);
typedef NV_STATUS RpcCtrlSetVgpuFbUsage(POBJGPU, POBJRPC, void*);
typedef NV_STATUS RpcUnmapMemoryDma(POBJGPU, POBJRPC, NvHandle, NvHandle, NvHandle, NvHandle, NvU32, NvU64);
typedef NV_STATUS RpcSetGuestSystemInfoExt(POBJGPU, POBJRPC);
typedef NV_STATUS Rpc_iGrp_ipVersions_getInfo(IGRP_IP_VERSIONS_TABLE_INFO *);
@@ -347,6 +348,7 @@ typedef struct RPC_HAL_IFACES {
RpcUpdateBarPde *rpcUpdateBarPde; /* Update the value of BAR1/BAR2 PDE */
RpcCtrlBindPmResources *rpcCtrlBindPmResources; /* CTRL_BIND_PM_RESOURCES */
RpcMapMemoryDma *rpcMapMemoryDma; /* MAP_MEMORY_DMA */
RpcCtrlSetVgpuFbUsage *rpcCtrlSetVgpuFbUsage; /* CTRL_SET_VGPU_FB_USAGE */
RpcUnmapMemoryDma *rpcUnmapMemoryDma; /* UNMAP_MEMORY_DMA */
RpcSetGuestSystemInfoExt *rpcSetGuestSystemInfoExt; /* SET_GUEST_SYSTEM_INFO_EXT */
Rpc_iGrp_ipVersions_getInfo *rpc_iGrp_ipVersions_getInfo; /* Return lookup table of hal interface ptrs based on IP_VERSION */
@@ -614,6 +616,8 @@ typedef struct RPC_HAL_IFACES {
(_pRpc)->_hal.rpcCtrlBindPmResources(_pGpu, _pRpc, _arg0, _arg1)
#define rpcMapMemoryDma_HAL(_pGpu, _pRpc, _arg0, _arg1, _arg2, _arg3, _arg4, _arg5, _arg6, _pArg7) \
(_pRpc)->_hal.rpcMapMemoryDma(_pGpu, _pRpc, _arg0, _arg1, _arg2, _arg3, _arg4, _arg5, _arg6, _pArg7)
#define rpcCtrlSetVgpuFbUsage_HAL(_pGpu, _pRpc, _pArg0) \
(_pRpc)->_hal.rpcCtrlSetVgpuFbUsage(_pGpu, _pRpc, _pArg0)
#define rpcUnmapMemoryDma_HAL(_pGpu, _pRpc, _arg0, _arg1, _arg2, _arg3, _arg4, _arg5) \
(_pRpc)->_hal.rpcUnmapMemoryDma(_pGpu, _pRpc, _arg0, _arg1, _arg2, _arg3, _arg4, _arg5)
#define rpcSetGuestSystemInfoExt_HAL(_pGpu, _pRpc) \

View File

@@ -385,6 +385,8 @@ RpcRmApiControl rpcRmApiControl_v25_15;
RpcRmApiControl rpcRmApiControl_v25_16;
RpcRmApiControl rpcRmApiControl_v25_17;
RpcRmApiControl rpcRmApiControl_v25_18;
RpcRmApiControl rpcRmApiControl_v25_19;
RpcRmApiControl rpcRmApiControl_v25_1A;
RpcRmApiControl rpcRmApiControl_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X
// RPC:CTRL_FABRIC_MEM_STATS
@@ -531,12 +533,17 @@ RpcCtrlBindPmResources rpcCtrlBindPmResources_STUB; // TU10X, GA100
RpcMapMemoryDma rpcMapMemoryDma_v03_00;
RpcMapMemoryDma rpcMapMemoryDma_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X
// RPC:CTRL_SET_VGPU_FB_USAGE
RpcCtrlSetVgpuFbUsage rpcCtrlSetVgpuFbUsage_v1A_08;
RpcCtrlSetVgpuFbUsage rpcCtrlSetVgpuFbUsage_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X
// RPC:UNMAP_MEMORY_DMA
RpcUnmapMemoryDma rpcUnmapMemoryDma_v03_00;
RpcUnmapMemoryDma rpcUnmapMemoryDma_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X
// RPC:SET_GUEST_SYSTEM_INFO_EXT
RpcSetGuestSystemInfoExt rpcSetGuestSystemInfoExt_v15_02;
RpcSetGuestSystemInfoExt rpcSetGuestSystemInfoExt_v25_1B;
RpcSetGuestSystemInfoExt rpcSetGuestSystemInfoExt_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107, AD102, AD103, AD104, AD106, AD107, GH10X
@@ -2107,6 +2114,57 @@ static void rpc_iGrp_ipVersions_Install_v25_18(IGRP_IP_VERSIONS_TABLE_INFO *pInf
#endif //
}
// No enabled chips use this variant provider
static void rpc_iGrp_ipVersions_Install_v25_19(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
{
#if 0
POBJGPU pGpu = pInfo->pGpu;
OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic;
RPC_HAL_IFACES *pRpcHal = &pRpc->_hal;
// avoid possible unused warnings
pGpu += 0;
pRpcHal += 0;
#endif //
}
// No enabled chips use this variant provider
static void rpc_iGrp_ipVersions_Install_v25_1A(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
{
#if 0
POBJGPU pGpu = pInfo->pGpu;
OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic;
RPC_HAL_IFACES *pRpcHal = &pRpc->_hal;
// avoid possible unused warnings
pGpu += 0;
pRpcHal += 0;
#endif //
}
// No enabled chips use this variant provider
static void rpc_iGrp_ipVersions_Install_v25_1B(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
{
#if 0
POBJGPU pGpu = pInfo->pGpu;
OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic;
RPC_HAL_IFACES *pRpcHal = &pRpc->_hal;
// avoid possible unused warnings
pGpu += 0;
pRpcHal += 0;
#endif //
}
@@ -2313,8 +2371,12 @@ static NV_STATUS rpc_iGrp_ipVersions_Wrapup(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
pRpcHal->rpcRmApiControl = rpcRmApiControl_v25_16;
if (IsIPVersionInRange(pRpc, 0x25170000, 0x2517FFFF))
pRpcHal->rpcRmApiControl = rpcRmApiControl_v25_17;
if (IsIPVersionInRange(pRpc, 0x25180000, 0xFFFFFFFF))
if (IsIPVersionInRange(pRpc, 0x25180000, 0x2518FFFF))
pRpcHal->rpcRmApiControl = rpcRmApiControl_v25_18;
if (IsIPVersionInRange(pRpc, 0x25190000, 0x2519FFFF))
pRpcHal->rpcRmApiControl = rpcRmApiControl_v25_19;
if (IsIPVersionInRange(pRpc, 0x251A0000, 0xFFFFFFFF))
pRpcHal->rpcRmApiControl = rpcRmApiControl_v25_1A;
if (IsIPVersionInRange(pRpc, 0x1E0C0000, 0xFFFFFFFF))
pRpcHal->rpcCtrlFabricMemStats = rpcCtrlFabricMemStats_v1E_0C;
if (IsIPVersionInRange(pRpc, 0x1A0E0000, 0xFFFFFFFF))
@@ -2387,10 +2449,14 @@ static NV_STATUS rpc_iGrp_ipVersions_Wrapup(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
pRpcHal->rpcCtrlBindPmResources = rpcCtrlBindPmResources_v1A_0F;
if (IsIPVersionInRange(pRpc, 0x03000000, 0xFFFFFFFF))
pRpcHal->rpcMapMemoryDma = rpcMapMemoryDma_v03_00;
if (IsIPVersionInRange(pRpc, 0x1A080000, 0xFFFFFFFF))
pRpcHal->rpcCtrlSetVgpuFbUsage = rpcCtrlSetVgpuFbUsage_v1A_08;
if (IsIPVersionInRange(pRpc, 0x03000000, 0xFFFFFFFF))
pRpcHal->rpcUnmapMemoryDma = rpcUnmapMemoryDma_v03_00;
if (IsIPVersionInRange(pRpc, 0x15020000, 0xFFFFFFFF))
if (IsIPVersionInRange(pRpc, 0x15020000, 0x251AFFFF))
pRpcHal->rpcSetGuestSystemInfoExt = rpcSetGuestSystemInfoExt_v15_02;
if (IsIPVersionInRange(pRpc, 0x251B0000, 0xFFFFFFFF))
pRpcHal->rpcSetGuestSystemInfoExt = rpcSetGuestSystemInfoExt_v25_1B;
// Verify each 'dynamically set' interface was actually set
@@ -2522,6 +2588,7 @@ static NV_STATUS rpc_iGrp_ipVersions_Wrapup(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcUpdateBarPde);
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcCtrlBindPmResources);
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcMapMemoryDma);
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcCtrlSetVgpuFbUsage);
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcUnmapMemoryDma);
_RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcSetGuestSystemInfoExt);
@@ -2806,6 +2873,15 @@ static NV_STATUS rpc_iGrp_ipVersions_getInfo(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v25_18[] = {
{ 0x25180000, 0xFFFFFFFF, }, //
};
static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v25_19[] = {
{ 0x25190000, 0xFFFFFFFF, }, //
};
static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v25_1A[] = {
{ 0x251A0000, 0xFFFFFFFF, }, //
};
static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v25_1B[] = {
{ 0x251B0000, 0xFFFFFFFF, }, //
};
#define _RPC_HAL_IGRP_ENTRY_INIT(v) \
{ RPC_IGRP_IP_VERSIONS_RANGES_##v, NV_ARRAY_ELEMENTS(RPC_IGRP_IP_VERSIONS_RANGES_##v), rpc_iGrp_ipVersions_Install_##v, }
@@ -2902,6 +2978,9 @@ static NV_STATUS rpc_iGrp_ipVersions_getInfo(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
_RPC_HAL_IGRP_ENTRY_INIT(v25_16), //
_RPC_HAL_IGRP_ENTRY_INIT(v25_17), //
_RPC_HAL_IGRP_ENTRY_INIT(v25_18), //
_RPC_HAL_IGRP_ENTRY_INIT(v25_19), //
_RPC_HAL_IGRP_ENTRY_INIT(v25_1A), //
_RPC_HAL_IGRP_ENTRY_INIT(v25_1B), //
};
#undef _RPC_HAL_IGRP_ENTRY_INIT
@@ -3054,6 +3133,7 @@ static void rpcHalIfacesSetup_TU102(RPC_HAL_IFACES *pRpcHal)
rpcUpdateBarPde_STUB, // rpcUpdateBarPde
rpcCtrlBindPmResources_STUB, // rpcCtrlBindPmResources
rpcMapMemoryDma_STUB, // rpcMapMemoryDma
rpcCtrlSetVgpuFbUsage_STUB, // rpcCtrlSetVgpuFbUsage
rpcUnmapMemoryDma_STUB, // rpcUnmapMemoryDma
rpcSetGuestSystemInfoExt_STUB, // rpcSetGuestSystemInfoExt
rpc_iGrp_ipVersions_getInfo, // rpc_iGrp_ipVersions_getInfo
@@ -3238,6 +3318,7 @@ static void rpcHalIfacesSetup_GA100(RPC_HAL_IFACES *pRpcHal)
rpcUpdateBarPde_STUB, // rpcUpdateBarPde
rpcCtrlBindPmResources_STUB, // rpcCtrlBindPmResources
rpcMapMemoryDma_STUB, // rpcMapMemoryDma
rpcCtrlSetVgpuFbUsage_STUB, // rpcCtrlSetVgpuFbUsage
rpcUnmapMemoryDma_STUB, // rpcUnmapMemoryDma
rpcSetGuestSystemInfoExt_STUB, // rpcSetGuestSystemInfoExt
rpc_iGrp_ipVersions_getInfo, // rpc_iGrp_ipVersions_getInfo
@@ -3434,6 +3515,7 @@ static void rpcHalIfacesSetup_AD102(RPC_HAL_IFACES *pRpcHal)
rpcUpdateBarPde_STUB, // rpcUpdateBarPde
rpcCtrlBindPmResources_STUB, // rpcCtrlBindPmResources
rpcMapMemoryDma_STUB, // rpcMapMemoryDma
rpcCtrlSetVgpuFbUsage_STUB, // rpcCtrlSetVgpuFbUsage
rpcUnmapMemoryDma_STUB, // rpcUnmapMemoryDma
rpcSetGuestSystemInfoExt_STUB, // rpcSetGuestSystemInfoExt
rpc_iGrp_ipVersions_getInfo, // rpc_iGrp_ipVersions_getInfo
@@ -3618,6 +3700,7 @@ static void rpcHalIfacesSetup_GH100(RPC_HAL_IFACES *pRpcHal)
rpcUpdateBarPde_STUB, // rpcUpdateBarPde
rpcCtrlBindPmResources_STUB, // rpcCtrlBindPmResources
rpcMapMemoryDma_STUB, // rpcMapMemoryDma
rpcCtrlSetVgpuFbUsage_STUB, // rpcCtrlSetVgpuFbUsage
rpcUnmapMemoryDma_STUB, // rpcUnmapMemoryDma
rpcSetGuestSystemInfoExt_STUB, // rpcSetGuestSystemInfoExt
rpc_iGrp_ipVersions_getInfo, // rpc_iGrp_ipVersions_getInfo

View File

@@ -1561,12 +1561,12 @@ typedef struct NVA0BD_CTRL_NVFBC_TIMESTAMP_v12_04
typedef NVA0BD_CTRL_NVFBC_TIMESTAMP_v12_04 NVA0BD_CTRL_NVFBC_TIMESTAMP_v;
typedef struct NVA082_CTRL_HOST_VGPU_DEVICE_SET_VGPU_FB_USAGE_PARAMS_v07_02
typedef struct NVA080_CTRL_SET_FB_USAGE_PARAMS_v07_02
{
NvU64 fbUsed NV_ALIGN_BYTES(8);
} NVA082_CTRL_HOST_VGPU_DEVICE_SET_VGPU_FB_USAGE_PARAMS_v07_02;
} NVA080_CTRL_SET_FB_USAGE_PARAMS_v07_02;
typedef NVA082_CTRL_HOST_VGPU_DEVICE_SET_VGPU_FB_USAGE_PARAMS_v07_02 NVA082_CTRL_HOST_VGPU_DEVICE_SET_VGPU_FB_USAGE_PARAMS_v;
typedef NVA080_CTRL_SET_FB_USAGE_PARAMS_v07_02 NVA080_CTRL_SET_FB_USAGE_PARAMS_v;
typedef struct NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS_v08_00
{

File diff suppressed because it is too large Load Diff

View File

@@ -363,6 +363,12 @@ struct Subdevice {
NV_STATUS (*__subdeviceCtrlCmdKGrInternalStaticGetFecsRecordSize__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdKGrInternalStaticGetFecsTraceDefines__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdKGrInternalStaticGetPdbProperties__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_GR_GET_PDB_PROPERTIES_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdGrInternalSetFecsTraceHwEnable__)(struct Subdevice *, NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdGrInternalGetFecsTraceHwEnable__)(struct Subdevice *, NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_HW_ENABLE_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdGrInternalSetFecsTraceRdOffset__)(struct Subdevice *, NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdGrInternalGetFecsTraceRdOffset__)(struct Subdevice *, NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_RD_OFFSET_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdGrInternalSetFecsTraceWrOffset__)(struct Subdevice *, NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_WR_OFFSET_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdGrStaticGetFecsTraceDefines__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdGpuGetCachedInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdGpuGetInfoV2__)(struct Subdevice *, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdGpuGetIpVersion__)(struct Subdevice *, NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS *);
@@ -1002,6 +1008,12 @@ NV_STATUS __nvoc_objCreate_Subdevice(Subdevice**, Dynamic*, NvU32, struct CALL_C
#define subdeviceCtrlCmdKGrInternalStaticGetFecsRecordSize(pSubdevice, pParams) subdeviceCtrlCmdKGrInternalStaticGetFecsRecordSize_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdKGrInternalStaticGetFecsTraceDefines(pSubdevice, pParams) subdeviceCtrlCmdKGrInternalStaticGetFecsTraceDefines_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdKGrInternalStaticGetPdbProperties(pSubdevice, pParams) subdeviceCtrlCmdKGrInternalStaticGetPdbProperties_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdGrInternalSetFecsTraceHwEnable(pSubdevice, pParams) subdeviceCtrlCmdGrInternalSetFecsTraceHwEnable_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdGrInternalGetFecsTraceHwEnable(pSubdevice, pParams) subdeviceCtrlCmdGrInternalGetFecsTraceHwEnable_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdGrInternalSetFecsTraceRdOffset(pSubdevice, pParams) subdeviceCtrlCmdGrInternalSetFecsTraceRdOffset_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdGrInternalGetFecsTraceRdOffset(pSubdevice, pParams) subdeviceCtrlCmdGrInternalGetFecsTraceRdOffset_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdGrInternalSetFecsTraceWrOffset(pSubdevice, pParams) subdeviceCtrlCmdGrInternalSetFecsTraceWrOffset_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdGrStaticGetFecsTraceDefines(pSubdevice, pParams) subdeviceCtrlCmdGrStaticGetFecsTraceDefines_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdGpuGetCachedInfo(pSubdevice, pGpuInfoParams) subdeviceCtrlCmdGpuGetCachedInfo_DISPATCH(pSubdevice, pGpuInfoParams)
#define subdeviceCtrlCmdGpuGetInfoV2(pSubdevice, pGpuInfoParams) subdeviceCtrlCmdGpuGetInfoV2_DISPATCH(pSubdevice, pGpuInfoParams)
#define subdeviceCtrlCmdGpuGetIpVersion(pSubdevice, pGpuIpVersionParams) subdeviceCtrlCmdGpuGetIpVersion_DISPATCH(pSubdevice, pGpuIpVersionParams)
@@ -2864,6 +2876,42 @@ static inline NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetPdbProperties_DISPAT
return pSubdevice->__subdeviceCtrlCmdKGrInternalStaticGetPdbProperties__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdGrInternalSetFecsTraceHwEnable_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdGrInternalSetFecsTraceHwEnable_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdGrInternalSetFecsTraceHwEnable__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdGrInternalGetFecsTraceHwEnable_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_HW_ENABLE_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdGrInternalGetFecsTraceHwEnable_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_HW_ENABLE_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdGrInternalGetFecsTraceHwEnable__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdGrInternalSetFecsTraceRdOffset_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdGrInternalSetFecsTraceRdOffset_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdGrInternalSetFecsTraceRdOffset__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdGrInternalGetFecsTraceRdOffset_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_RD_OFFSET_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdGrInternalGetFecsTraceRdOffset_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_RD_OFFSET_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdGrInternalGetFecsTraceRdOffset__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdGrInternalSetFecsTraceWrOffset_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_WR_OFFSET_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdGrInternalSetFecsTraceWrOffset_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_WR_OFFSET_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdGrInternalSetFecsTraceWrOffset__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdGrStaticGetFecsTraceDefines_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdGrStaticGetFecsTraceDefines_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdGrStaticGetFecsTraceDefines__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdGpuGetCachedInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams);
static inline NV_STATUS subdeviceCtrlCmdGpuGetCachedInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams) {

View File

@@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -415,6 +415,7 @@ struct OBJSYS {
PNODE pMemFilterList;
NvBool PDB_PROP_SYS_IS_QSYNC_FW_REVISION_CHECK_DISABLED;
NvBool PDB_PROP_SYS_GPU_LOCK_MIDPATH_ENABLED;
NvBool PDB_PROP_SYS_DESTRUCTING;
NvU64 rmInstanceId;
NvU32 currentCid;
NvBool bUseDeferredClientListFree;
@@ -424,7 +425,6 @@ struct OBJSYS {
PORT_RWLOCK *pSysMemExportModuleLock;
volatile NvU64 sysExportObjectCounter;
NvHandle hSysMemExportClient;
NvBool bSysUuidBasedMemExportSupport;
struct OBJGPUMGR *pGpuMgr;
struct OBJGSYNCMGR *pGsyncMgr;
struct OBJVGPUMGR *pVgpuMgr;
@@ -484,6 +484,8 @@ extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJSYS;
#define PDB_PROP_SYS_NVIF_INIT_DONE_BASE_NAME PDB_PROP_SYS_NVIF_INIT_DONE
#define PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT_BASE_CAST
#define PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT_BASE_NAME PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT
#define PDB_PROP_SYS_DESTRUCTING_BASE_CAST
#define PDB_PROP_SYS_DESTRUCTING_BASE_NAME PDB_PROP_SYS_DESTRUCTING
#define PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS_BASE_CAST
#define PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS_BASE_NAME PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS
#define PDB_PROP_SYS_PRIMARY_VBIOS_STATE_SAVED_BASE_CAST

View File

@@ -580,6 +580,21 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_VgpuConf
/*pClassInfo=*/ &(__nvoc_class_def_VgpuConfigApi.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "vgpuconfigapiCtrlCmdVgpuConfigSetCapability"
#endif
},
{ /* [23] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) vgpuconfigapiCtrlCmdVgpuConfigGetCapability_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*flags=*/ 0x10u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xa081011fu,
/*paramSize=*/ sizeof(NVA081_CTRL_VGPU_GET_CAPABILITY_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_VgpuConfigApi.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "vgpuconfigapiCtrlCmdVgpuConfigGetCapability"
#endif
},
@@ -587,7 +602,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_VgpuConf
const struct NVOC_EXPORT_INFO __nvoc_export_info_VgpuConfigApi =
{
/*numEntries=*/ 23,
/*numEntries=*/ 24,
/*pExportEntries=*/ __nvoc_exported_method_def_VgpuConfigApi
};
@@ -659,6 +674,10 @@ static void __nvoc_init_funcTable_VgpuConfigApi_1(VgpuConfigApi *pThis) {
pThis->__vgpuconfigapiCtrlCmdVgpuConfigSetCapability__ = &vgpuconfigapiCtrlCmdVgpuConfigSetCapability_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__vgpuconfigapiCtrlCmdVgpuConfigGetCapability__ = &vgpuconfigapiCtrlCmdVgpuConfigGetCapability_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__vgpuconfigapiCtrlCmdVgpuConfigNotifyStart__ = &vgpuconfigapiCtrlCmdVgpuConfigNotifyStart_IMPL;
#endif

View File

@@ -74,6 +74,7 @@ struct VgpuConfigApi {
NV_STATUS (*__vgpuconfigapiCtrlCmdVgpuConfigGetCreatableVgpuTypes__)(struct VgpuConfigApi *, NVA081_CTRL_VGPU_CONFIG_GET_VGPU_TYPES_PARAMS *);
NV_STATUS (*__vgpuconfigapiCtrlCmdVgpuConfigEventSetNotification__)(struct VgpuConfigApi *, NVA081_CTRL_VGPU_CONFIG_EVENT_SET_NOTIFICATION_PARAMS *);
NV_STATUS (*__vgpuconfigapiCtrlCmdVgpuConfigSetCapability__)(struct VgpuConfigApi *, NVA081_CTRL_VGPU_SET_CAPABILITY_PARAMS *);
NV_STATUS (*__vgpuconfigapiCtrlCmdVgpuConfigGetCapability__)(struct VgpuConfigApi *, NVA081_CTRL_VGPU_GET_CAPABILITY_PARAMS *);
NV_STATUS (*__vgpuconfigapiCtrlCmdVgpuConfigNotifyStart__)(struct VgpuConfigApi *, NVA081_CTRL_VGPU_CONFIG_NOTIFY_START_PARAMS *);
NV_STATUS (*__vgpuconfigapiCtrlCmdVgpuConfigGetCreatablePlacements__)(struct VgpuConfigApi *, NVA081_CTRL_VGPU_CONFIG_GET_CREATABLE_PLACEMENTS_PARAMS *);
NV_STATUS (*__vgpuconfigapiCtrlCmdVgpuConfigUpdatePgpuInfo__)(struct VgpuConfigApi *);
@@ -158,6 +159,7 @@ NV_STATUS __nvoc_objCreate_VgpuConfigApi(VgpuConfigApi**, Dynamic*, NvU32, struc
#define vgpuconfigapiCtrlCmdVgpuConfigGetCreatableVgpuTypes(pVgpuConfigApi, pParams) vgpuconfigapiCtrlCmdVgpuConfigGetCreatableVgpuTypes_DISPATCH(pVgpuConfigApi, pParams)
#define vgpuconfigapiCtrlCmdVgpuConfigEventSetNotification(pVgpuConfigApi, pSetEventParams) vgpuconfigapiCtrlCmdVgpuConfigEventSetNotification_DISPATCH(pVgpuConfigApi, pSetEventParams)
#define vgpuconfigapiCtrlCmdVgpuConfigSetCapability(pVgpuConfigApi, pParams) vgpuconfigapiCtrlCmdVgpuConfigSetCapability_DISPATCH(pVgpuConfigApi, pParams)
#define vgpuconfigapiCtrlCmdVgpuConfigGetCapability(pVgpuConfigApi, pParams) vgpuconfigapiCtrlCmdVgpuConfigGetCapability_DISPATCH(pVgpuConfigApi, pParams)
#define vgpuconfigapiCtrlCmdVgpuConfigNotifyStart(pVgpuConfigApi, pNotifyParams) vgpuconfigapiCtrlCmdVgpuConfigNotifyStart_DISPATCH(pVgpuConfigApi, pNotifyParams)
#define vgpuconfigapiCtrlCmdVgpuConfigGetCreatablePlacements(pVgpuConfigApi, pParams) vgpuconfigapiCtrlCmdVgpuConfigGetCreatablePlacements_DISPATCH(pVgpuConfigApi, pParams)
#define vgpuconfigapiCtrlCmdVgpuConfigUpdatePgpuInfo(pVgpuConfigApi) vgpuconfigapiCtrlCmdVgpuConfigUpdatePgpuInfo_DISPATCH(pVgpuConfigApi)
@@ -246,6 +248,12 @@ static inline NV_STATUS vgpuconfigapiCtrlCmdVgpuConfigSetCapability_DISPATCH(str
return pVgpuConfigApi->__vgpuconfigapiCtrlCmdVgpuConfigSetCapability__(pVgpuConfigApi, pParams);
}
NV_STATUS vgpuconfigapiCtrlCmdVgpuConfigGetCapability_IMPL(struct VgpuConfigApi *pVgpuConfigApi, NVA081_CTRL_VGPU_GET_CAPABILITY_PARAMS *pParams);
static inline NV_STATUS vgpuconfigapiCtrlCmdVgpuConfigGetCapability_DISPATCH(struct VgpuConfigApi *pVgpuConfigApi, NVA081_CTRL_VGPU_GET_CAPABILITY_PARAMS *pParams) {
return pVgpuConfigApi->__vgpuconfigapiCtrlCmdVgpuConfigGetCapability__(pVgpuConfigApi, pParams);
}
NV_STATUS vgpuconfigapiCtrlCmdVgpuConfigNotifyStart_IMPL(struct VgpuConfigApi *pVgpuConfigApi, NVA081_CTRL_VGPU_CONFIG_NOTIFY_START_PARAMS *pNotifyParams);
static inline NV_STATUS vgpuconfigapiCtrlCmdVgpuConfigNotifyStart_DISPATCH(struct VgpuConfigApi *pVgpuConfigApi, NVA081_CTRL_VGPU_CONFIG_NOTIFY_START_PARAMS *pNotifyParams) {