535.146.02

This commit is contained in:
Bernhard Stoeckner
2023-12-07 15:09:52 +01:00
parent e573018659
commit 7165299dee
77 changed files with 965 additions and 362 deletions

View File

@@ -358,6 +358,7 @@ typedef struct
*/
NvU64 s0ix_gcoff_max_fb_size;
NvU32 pmc_boot_1;
NvU32 pmc_boot_42;
} nv_priv_t;

View File

@@ -924,6 +924,7 @@ NV_STATUS NV_API_CALL rm_ioctl (nvidia_stack_t *, nv_state_t *
NvBool NV_API_CALL rm_isr (nvidia_stack_t *, nv_state_t *, NvU32 *);
void NV_API_CALL rm_isr_bh (nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_isr_bh_unlocked (nvidia_stack_t *, nv_state_t *);
NvBool NV_API_CALL rm_is_msix_allowed (nvidia_stack_t *, nv_state_t *);
NV_STATUS NV_API_CALL rm_power_management (nvidia_stack_t *, nv_state_t *, nv_pm_action_t);
NV_STATUS NV_API_CALL rm_stop_user_channels (nvidia_stack_t *, nv_state_t *);
NV_STATUS NV_API_CALL rm_restart_user_channels (nvidia_stack_t *, nv_state_t *);

View File

@@ -1160,6 +1160,7 @@ NvBool RmInitPrivateState(
nv_priv_t *nvp;
NvU32 gpuId;
NvU32 pmc_boot_0 = 0;
NvU32 pmc_boot_1 = 0;
NvU32 pmc_boot_42 = 0;
NV_SET_NV_PRIV(pNv, NULL);
@@ -1177,6 +1178,7 @@ NvBool RmInitPrivateState(
}
pmc_boot_0 = NV_PRIV_REG_RD32(pNv->regs->map_u, NV_PMC_BOOT_0);
pmc_boot_1 = NV_PRIV_REG_RD32(pNv->regs->map_u, NV_PMC_BOOT_1);
pmc_boot_42 = NV_PRIV_REG_RD32(pNv->regs->map_u, NV_PMC_BOOT_42);
os_unmap_kernel_space(pNv->regs->map_u, os_page_size);
@@ -1216,6 +1218,7 @@ NvBool RmInitPrivateState(
os_mem_set(nvp, 0, sizeof(*nvp));
nvp->status = NV_ERR_INVALID_STATE;
nvp->pmc_boot_0 = pmc_boot_0;
nvp->pmc_boot_1 = pmc_boot_1;
nvp->pmc_boot_42 = pmc_boot_42;
NV_SET_NV_PRIV(pNv, nvp);
@@ -1234,7 +1237,7 @@ void RmClearPrivateState(
nv_i2c_adapter_entry_t i2c_adapters[MAX_I2C_ADAPTERS];
nv_dynamic_power_t dynamicPowerCopy;
NvU32 x = 0;
NvU32 pmc_boot_0, pmc_boot_42;
NvU32 pmc_boot_0, pmc_boot_1, pmc_boot_42;
//
// Do not clear private state after GPU resets, it is used while
@@ -1252,6 +1255,7 @@ void RmClearPrivateState(
pRegistryCopy = nvp->pRegistry;
dynamicPowerCopy = nvp->dynamic_power;
pmc_boot_0 = nvp->pmc_boot_0;
pmc_boot_1 = nvp->pmc_boot_1;
pmc_boot_42 = nvp->pmc_boot_42;
for (x = 0; x < MAX_I2C_ADAPTERS; x++)
@@ -1267,6 +1271,7 @@ void RmClearPrivateState(
nvp->pRegistry = pRegistryCopy;
nvp->dynamic_power = dynamicPowerCopy;
nvp->pmc_boot_0 = pmc_boot_0;
nvp->pmc_boot_1 = pmc_boot_1;
nvp->pmc_boot_42 = pmc_boot_42;
for (x = 0; x < MAX_I2C_ADAPTERS; x++)

View File

@@ -706,3 +706,27 @@ NV_STATUS rm_gpu_handle_mmu_faults(
return status;
}
NvBool NV_API_CALL rm_is_msix_allowed(
nvidia_stack_t *sp,
nv_state_t *nv
)
{
nv_priv_t *pNvp = NV_GET_NV_PRIV(nv);
THREAD_STATE_NODE threadState;
void *fp;
NvBool ret = NV_FALSE;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT) == NV_OK)
{
ret = gpumgrIsDeviceMsixAllowed(nv->regs->cpu_address,
pNvp->pmc_boot_1, pNvp->pmc_boot_42);
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
return ret;
}

View File

@@ -23,6 +23,7 @@
--undefined=rm_isr
--undefined=rm_isr_bh
--undefined=rm_isr_bh_unlocked
--undefined=rm_is_msix_allowed
--undefined=rm_perform_version_check
--undefined=rm_power_management
--undefined=rm_stop_user_channels

View File

@@ -590,6 +590,21 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
#endif
},
{ /* [26] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdSetDefaultVidmemPhysicality_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*flags=*/ 0x11u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x801308u,
/*paramSize=*/ sizeof(NV0080_CTRL_FB_SET_DEFAULT_VIDMEM_PHYSICALITY_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "deviceCtrlCmdSetDefaultVidmemPhysicality"
#endif
},
{ /* [27] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -604,7 +619,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdHostGetCaps"
#endif
},
{ /* [27] */
{ /* [28] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4850u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -619,7 +634,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdHostGetCapsV2"
#endif
},
{ /* [28] */
{ /* [29] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -634,7 +649,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoGetCaps"
#endif
},
{ /* [29] */
{ /* [30] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -649,7 +664,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoStartSelectedChannels"
#endif
},
{ /* [30] */
{ /* [31] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -664,7 +679,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoGetEngineContextProperties"
#endif
},
{ /* [31] */
{ /* [32] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -679,7 +694,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoGetChannelList"
#endif
},
{ /* [32] */
{ /* [33] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2211u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -694,7 +709,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoGetLatencyBufferSize"
#endif
},
{ /* [33] */
{ /* [34] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -709,7 +724,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoSetChannelProperties"
#endif
},
{ /* [34] */
{ /* [35] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -724,7 +739,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoStopRunlist"
#endif
},
{ /* [35] */
{ /* [36] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -739,7 +754,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoStartRunlist"
#endif
},
{ /* [36] */
{ /* [37] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -754,7 +769,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoGetCapsV2"
#endif
},
{ /* [37] */
{ /* [38] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -769,7 +784,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoIdleChannels"
#endif
},
{ /* [38] */
{ /* [39] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -784,7 +799,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaGetPteInfo"
#endif
},
{ /* [39] */
{ /* [40] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -799,7 +814,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaFlush"
#endif
},
{ /* [40] */
{ /* [41] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -814,7 +829,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaAdvSchedGetVaCaps"
#endif
},
{ /* [41] */
{ /* [42] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -829,7 +844,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaGetPdeInfo"
#endif
},
{ /* [42] */
{ /* [43] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -844,7 +859,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaSetPteInfo"
#endif
},
{ /* [43] */
{ /* [44] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -859,7 +874,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaInvalidateTLB"
#endif
},
{ /* [44] */
{ /* [45] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -874,7 +889,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaGetCaps"
#endif
},
{ /* [45] */
{ /* [46] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -889,7 +904,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaSetVASpaceSize"
#endif
},
{ /* [46] */
{ /* [47] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -904,7 +919,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaUpdatePde2"
#endif
},
{ /* [47] */
{ /* [48] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -919,7 +934,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaEnablePrivilegedRange"
#endif
},
{ /* [48] */
{ /* [49] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c0000u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -934,7 +949,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaSetDefaultVASpace"
#endif
},
{ /* [49] */
{ /* [50] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x140004u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -949,7 +964,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaSetPageDirectory"
#endif
},
{ /* [50] */
{ /* [51] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x140004u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -964,7 +979,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaUnsetPageDirectory"
#endif
},
{ /* [51] */
{ /* [52] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -979,7 +994,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdMsencGetCaps"
#endif
},
{ /* [52] */
{ /* [53] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -994,7 +1009,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdBspGetCapsV2"
#endif
},
{ /* [53] */
{ /* [54] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -1009,7 +1024,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdOsUnixVTSwitch"
#endif
},
{ /* [54] */
{ /* [55] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -1024,7 +1039,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdOsUnixVTGetFBInfo"
#endif
},
{ /* [55] */
{ /* [56] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -1039,7 +1054,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdNvjpgGetCapsV2"
#endif
},
{ /* [56] */
{ /* [57] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -1054,7 +1069,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdInternalPerfCudaLimitDisable"
#endif
},
{ /* [57] */
{ /* [58] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -1069,7 +1084,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdInternalPerfGetUnderpoweredGpuCount"
#endif
},
{ /* [58] */
{ /* [59] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xe50u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -1089,7 +1104,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
const struct NVOC_EXPORT_INFO __nvoc_export_info_Device =
{
/*numEntries=*/ 59,
/*numEntries=*/ 60,
/*pExportEntries=*/ __nvoc_exported_method_def_Device
};
@@ -1234,6 +1249,10 @@ static void __nvoc_init_funcTable_Device_1(Device *pThis) {
pThis->__deviceCtrlCmdFbGetCapsV2__ = &deviceCtrlCmdFbGetCapsV2_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
pThis->__deviceCtrlCmdSetDefaultVidmemPhysicality__ = &deviceCtrlCmdSetDefaultVidmemPhysicality_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
pThis->__deviceCtrlCmdFifoGetCaps__ = &deviceCtrlCmdFifoGetCaps_IMPL;
#endif

View File

@@ -109,6 +109,7 @@ struct Device {
NV_STATUS (*__deviceCtrlCmdFbGetCompbitStoreInfo__)(struct Device *, NV0080_CTRL_FB_GET_COMPBIT_STORE_INFO_PARAMS *);
NV_STATUS (*__deviceCtrlCmdFbGetCaps__)(struct Device *, NV0080_CTRL_FB_GET_CAPS_PARAMS *);
NV_STATUS (*__deviceCtrlCmdFbGetCapsV2__)(struct Device *, NV0080_CTRL_FB_GET_CAPS_V2_PARAMS *);
NV_STATUS (*__deviceCtrlCmdSetDefaultVidmemPhysicality__)(struct Device *, NV0080_CTRL_FB_SET_DEFAULT_VIDMEM_PHYSICALITY_PARAMS *);
NV_STATUS (*__deviceCtrlCmdFifoGetCaps__)(struct Device *, NV0080_CTRL_FIFO_GET_CAPS_PARAMS *);
NV_STATUS (*__deviceCtrlCmdFifoGetCapsV2__)(struct Device *, NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS *);
NV_STATUS (*__deviceCtrlCmdFifoStartSelectedChannels__)(struct Device *, NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS *);
@@ -182,6 +183,7 @@ struct Device {
NvU64 vaLimitInternal;
NvU64 vaSize;
NvU32 vaMode;
NvU32 defaultVidmemPhysicalityOverride;
struct KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice;
};
@@ -241,6 +243,7 @@ NV_STATUS __nvoc_objCreate_Device(Device**, Dynamic*, NvU32, struct CALL_CONTEXT
#define deviceCtrlCmdFbGetCompbitStoreInfo(pDevice, pCompbitStoreParams) deviceCtrlCmdFbGetCompbitStoreInfo_DISPATCH(pDevice, pCompbitStoreParams)
#define deviceCtrlCmdFbGetCaps(pDevice, pFbCapsParams) deviceCtrlCmdFbGetCaps_DISPATCH(pDevice, pFbCapsParams)
#define deviceCtrlCmdFbGetCapsV2(pDevice, pFbCapsParams) deviceCtrlCmdFbGetCapsV2_DISPATCH(pDevice, pFbCapsParams)
#define deviceCtrlCmdSetDefaultVidmemPhysicality(pDevice, pParams) deviceCtrlCmdSetDefaultVidmemPhysicality_DISPATCH(pDevice, pParams)
#define deviceCtrlCmdFifoGetCaps(pDevice, pFifoCapsParams) deviceCtrlCmdFifoGetCaps_DISPATCH(pDevice, pFifoCapsParams)
#define deviceCtrlCmdFifoGetCapsV2(pDevice, pFifoCapsParams) deviceCtrlCmdFifoGetCapsV2_DISPATCH(pDevice, pFifoCapsParams)
#define deviceCtrlCmdFifoStartSelectedChannels(pDevice, pStartSel) deviceCtrlCmdFifoStartSelectedChannels_DISPATCH(pDevice, pStartSel)
@@ -465,6 +468,12 @@ static inline NV_STATUS deviceCtrlCmdFbGetCapsV2_DISPATCH(struct Device *pDevice
return pDevice->__deviceCtrlCmdFbGetCapsV2__(pDevice, pFbCapsParams);
}
NV_STATUS deviceCtrlCmdSetDefaultVidmemPhysicality_IMPL(struct Device *pDevice, NV0080_CTRL_FB_SET_DEFAULT_VIDMEM_PHYSICALITY_PARAMS *pParams);
static inline NV_STATUS deviceCtrlCmdSetDefaultVidmemPhysicality_DISPATCH(struct Device *pDevice, NV0080_CTRL_FB_SET_DEFAULT_VIDMEM_PHYSICALITY_PARAMS *pParams) {
return pDevice->__deviceCtrlCmdSetDefaultVidmemPhysicality__(pDevice, pParams);
}
NV_STATUS deviceCtrlCmdFifoGetCaps_IMPL(struct Device *pDevice, NV0080_CTRL_FIFO_GET_CAPS_PARAMS *pFifoCapsParams);
static inline NV_STATUS deviceCtrlCmdFifoGetCaps_DISPATCH(struct Device *pDevice, NV0080_CTRL_FIFO_GET_CAPS_PARAMS *pFifoCapsParams) {

View File

@@ -529,6 +529,7 @@ void gpumgrSetGpuRelease(void);
NvU8 gpumgrGetGpuBridgeType(void);
NvBool gpumgrAreAllGpusInOffloadMode(void);
NvBool gpumgrIsSafeToReadGpuInfo(void);
NvBool gpumgrIsDeviceMsixAllowed(RmPhysAddr bar0BaseAddr, NvU32 pmcBoot1, NvU32 pmcBoot42);
//
// gpumgrIsSubDeviceCountOne

View File

@@ -765,6 +765,17 @@ static void __nvoc_init_funcTable_OBJGPU_1(OBJGPU *pThis) {
{
pThis->__gpuClearEccCounts__ = &gpuClearEccCounts_ac1694;
}
// Hal function -- gpuWaitForGfwBootComplete
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f0ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 */
{
pThis->__gpuWaitForGfwBootComplete__ = &gpuWaitForGfwBootComplete_TU102;
}
// default
else
{
pThis->__gpuWaitForGfwBootComplete__ = &gpuWaitForGfwBootComplete_5baef9;
}
}
void __nvoc_init_funcTable_OBJGPU(OBJGPU *pThis) {

View File

@@ -898,6 +898,7 @@ struct OBJGPU {
NvBool (*__gpuIsCtxBufAllocInPmaSupported__)(struct OBJGPU *);
void (*__gpuCheckEccCounts__)(struct OBJGPU *);
NV_STATUS (*__gpuClearEccCounts__)(struct OBJGPU *);
NV_STATUS (*__gpuWaitForGfwBootComplete__)(struct OBJGPU *);
NvBool PDB_PROP_GPU_HIGH_SPEED_BRIDGE_CONNECTED;
NvBool bVideoLinkDisabled;
GPU_FABRIC_PROBE_INFO_KERNEL *pGpuFabricProbeInfoKernel;
@@ -1476,6 +1477,8 @@ NV_STATUS __nvoc_objCreate_OBJGPU(OBJGPU**, Dynamic*, NvU32,
#define gpuCheckEccCounts_HAL(pGpu) gpuCheckEccCounts_DISPATCH(pGpu)
#define gpuClearEccCounts(pGpu) gpuClearEccCounts_DISPATCH(pGpu)
#define gpuClearEccCounts_HAL(pGpu) gpuClearEccCounts_DISPATCH(pGpu)
#define gpuWaitForGfwBootComplete(pGpu) gpuWaitForGfwBootComplete_DISPATCH(pGpu)
#define gpuWaitForGfwBootComplete_HAL(pGpu) gpuWaitForGfwBootComplete_DISPATCH(pGpu)
static inline NV_STATUS gpuConstructPhysical_56cd7a(struct OBJGPU *pGpu) {
return NV_OK;
}
@@ -3228,6 +3231,16 @@ static inline NV_STATUS gpuClearEccCounts_DISPATCH(struct OBJGPU *pGpu) {
return pGpu->__gpuClearEccCounts__(pGpu);
}
NV_STATUS gpuWaitForGfwBootComplete_TU102(struct OBJGPU *pGpu);
static inline NV_STATUS gpuWaitForGfwBootComplete_5baef9(struct OBJGPU *pGpu) {
NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
}
static inline NV_STATUS gpuWaitForGfwBootComplete_DISPATCH(struct OBJGPU *pGpu) {
return pGpu->__gpuWaitForGfwBootComplete__(pGpu);
}
static inline PENGDESCRIPTOR gpuGetInitEngineDescriptors(struct OBJGPU *pGpu) {
return pGpu->engineOrder.pEngineInitDescriptors;
}
@@ -4707,6 +4720,13 @@ VGPU_STATIC_INFO *gpuGetStaticInfo(struct OBJGPU *pGpu);
GspStaticConfigInfo *gpuGetGspStaticInfo(struct OBJGPU *pGpu);
#define GPU_GET_GSP_STATIC_INFO(pGpu) gpuGetGspStaticInfo(pGpu)
//
// This function needs to be called when OBJGPU is not created. HAL
// infrastructure cant be used for this case, so it has been added manually.
// It will be invoked directly by gpumgrIsDeviceMsixAllowed().
//
NvBool gpuIsMsixAllowed_TU102(RmPhysAddr bar0BaseAddr);
#endif // _OBJGPU_H_
#ifdef __cplusplus

View File

@@ -527,6 +527,17 @@ static inline void krcWatchdogCallbackVblankRecovery(struct OBJGPU *pGpu, struct
#define krcWatchdogCallbackVblankRecovery(pGpu, pKernelRc) krcWatchdogCallbackVblankRecovery_IMPL(pGpu, pKernelRc)
#endif //__nvoc_kernel_rc_h_disabled
NV_STATUS krcWatchdogGetClientHandle_IMPL(struct KernelRc *arg0, NvHandle *arg1);
#ifdef __nvoc_kernel_rc_h_disabled
static inline NV_STATUS krcWatchdogGetClientHandle(struct KernelRc *arg0, NvHandle *arg1) {
NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_kernel_rc_h_disabled
#define krcWatchdogGetClientHandle(arg0, arg1) krcWatchdogGetClientHandle_IMPL(arg0, arg1)
#endif //__nvoc_kernel_rc_h_disabled
#undef PRIVATE_FIELD

View File

@@ -901,8 +901,8 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2331, 0x1626, 0x10de, "NVIDIA H100 PCIe" },
{ 0x2339, 0x17fc, 0x10de, "NVIDIA H100" },
{ 0x233A, 0x183a, 0x10de, "NVIDIA H800 NVL" },
{ 0x2342, 0x16eb, 0x10de, "GH200 120GB" },
{ 0x2342, 0x1809, 0x10de, "GH200 480GB" },
{ 0x2342, 0x16eb, 0x10de, "NVIDIA GH200 120GB" },
{ 0x2342, 0x1809, 0x10de, "NVIDIA GH200 480GB" },
{ 0x2414, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" },
{ 0x2420, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Ti Laptop GPU" },
{ 0x2438, 0x0000, 0x0000, "NVIDIA RTX A5500 Laptop GPU" },
@@ -995,6 +995,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x26B2, 0x17fa, 0x103c, "NVIDIA RTX 5000 Ada Generation" },
{ 0x26B2, 0x17fa, 0x10de, "NVIDIA RTX 5000 Ada Generation" },
{ 0x26B2, 0x17fa, 0x17aa, "NVIDIA RTX 5000 Ada Generation" },
{ 0x26B3, 0x1934, 0x10de, "NVIDIA RTX 5880 Ada Generation" },
{ 0x26B5, 0x169d, 0x10de, "NVIDIA L40" },
{ 0x26B5, 0x17da, 0x10de, "NVIDIA L40" },
{ 0x26B9, 0x1851, 0x10de, "NVIDIA L40S" },
@@ -2026,8 +2027,8 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x26B9, 0x189c, 0x10DE, "NVIDIA L40S-16A" },
{ 0x26B9, 0x189d, 0x10DE, "NVIDIA L40S-24A" },
{ 0x26B9, 0x189e, 0x10DE, "NVIDIA L40S-48A" },
{ 0x26B9, 0x189f, 0x10DE, "GeForce RTX 3050" },
{ 0x26B9, 0x18a0, 0x10DE, "GeForce RTX 3060" },
{ 0x26B9, 0x189f, 0x10DE, "NVIDIA GeForce RTX 3050" },
{ 0x26B9, 0x18a0, 0x10DE, "NVIDIA GeForce RTX 3060" },
{ 0x26B9, 0x18a1, 0x10DE, "NVIDIA L40S-1" },
{ 0x26B9, 0x18a2, 0x10DE, "NVIDIA L40S-2" },
{ 0x26B9, 0x18a3, 0x10DE, "NVIDIA L40S-3" },

View File

@@ -1753,12 +1753,12 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
#endif
},
{ /* [101] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBiosGetSKUInfo_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*flags=*/ 0x210u,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u)
/*flags=*/ 0x212u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x20800808u,
/*paramSize=*/ sizeof(NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS),
@@ -5668,12 +5668,12 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
#endif
},
{ /* [362] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x6210u)
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusGetPcieReqAtomicsCaps_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x6210u)
/*flags=*/ 0x6210u,
/*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusGetPcieReqAtomicsCaps_DISPATCH,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4210u)
/*flags=*/ 0x4210u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x20801829u,
/*paramSize=*/ sizeof(NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_PARAMS),
@@ -7825,7 +7825,7 @@ static void __nvoc_init_funcTable_Subdevice_1(Subdevice *pThis, RmHalspecOwner *
pThis->__subdeviceCtrlCmdBiosGetNbsiV2__ = &subdeviceCtrlCmdBiosGetNbsiV2_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u)
pThis->__subdeviceCtrlCmdBiosGetSKUInfo__ = &subdeviceCtrlCmdBiosGetSKUInfo_IMPL;
#endif
@@ -7969,9 +7969,8 @@ static void __nvoc_init_funcTable_Subdevice_1(Subdevice *pThis, RmHalspecOwner *
pThis->__subdeviceCtrlCmdBusGetEomStatus__ = &subdeviceCtrlCmdBusGetEomStatus_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x6210u)
pThis->__subdeviceCtrlCmdBusGetPcieReqAtomicsCaps__ = &subdeviceCtrlCmdBusGetPcieReqAtomicsCaps_IMPL;
#endif
// Hal function -- subdeviceCtrlCmdBusGetPcieReqAtomicsCaps
pThis->__subdeviceCtrlCmdBusGetPcieReqAtomicsCaps__ = &subdeviceCtrlCmdBusGetPcieReqAtomicsCaps_92bfc3;
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x6210u)
pThis->__subdeviceCtrlCmdBusGetPcieSupportedGpuAtomics__ = &subdeviceCtrlCmdBusGetPcieSupportedGpuAtomics_IMPL;

View File

@@ -747,6 +747,7 @@ NV_STATUS __nvoc_objCreate_Subdevice(Subdevice**, Dynamic*, NvU32, struct CALL_C
#define subdeviceCtrlCmdBusGetUphyDlnCfgSpace(pSubdevice, pParams) subdeviceCtrlCmdBusGetUphyDlnCfgSpace_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdBusGetEomStatus(pSubdevice, pParams) subdeviceCtrlCmdBusGetEomStatus_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdBusGetPcieReqAtomicsCaps(pSubdevice, pParams) subdeviceCtrlCmdBusGetPcieReqAtomicsCaps_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdBusGetPcieReqAtomicsCaps_HAL(pSubdevice, pParams) subdeviceCtrlCmdBusGetPcieReqAtomicsCaps_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdBusGetPcieSupportedGpuAtomics(pSubdevice, pParams) subdeviceCtrlCmdBusGetPcieSupportedGpuAtomics_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdBusGetC2CInfo(pSubdevice, pParams) subdeviceCtrlCmdBusGetC2CInfo_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdBusGetC2CErrorInfo(pSubdevice, pParams) subdeviceCtrlCmdBusGetC2CErrorInfo_DISPATCH(pSubdevice, pParams)
@@ -1487,7 +1488,10 @@ static inline NV_STATUS subdeviceCtrlCmdBusGetEomStatus_DISPATCH(struct Subdevic
return pSubdevice->__subdeviceCtrlCmdBusGetEomStatus__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdBusGetPcieReqAtomicsCaps_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdBusGetPcieReqAtomicsCaps_92bfc3(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_PARAMS *pParams) {
NV_ASSERT_PRECOMP(0);
return NV_ERR_NOT_SUPPORTED;
}
static inline NV_STATUS subdeviceCtrlCmdBusGetPcieReqAtomicsCaps_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_BUS_GET_PCIE_REQ_ATOMICS_CAPS_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdBusGetPcieReqAtomicsCaps__(pSubdevice, pParams);

View File

@@ -79,6 +79,7 @@ typedef struct {
NvNotification *notifiers[NV_MAX_SUBDEVICES];
NvNotification *errorContext;
NvNotification *notifierToken;
NvBool bHandleValid;
} KernelWatchdog;

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2017-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -22,6 +22,7 @@
*/
#include "core/core.h"
#include "core/locks.h"
#include "os/os.h"
#include "gpu/gpu.h"
#include "vgpu/vgpu_version.h"
@@ -39,7 +40,7 @@ vgpuCreateObject
OBJGPU *pGpu
)
{
NV_STATUS rmStatus = NV_OK;
NV_STATUS rmStatus = NV_OK;
return rmStatus;
}

View File

@@ -1283,7 +1283,7 @@ done:
// RmMsgPrefix - Add the RmMsg prefix to the passed in string, returning
// the length of the formatted string.
//
// Format: "NVRM file linenum function timestamp: "
// Format: "NVRM: file linenum function timestamp: "
//
NvU32
RmMsgPrefix
@@ -1306,7 +1306,8 @@ RmMsgPrefix
{
portStringCopy(str + len, totalLen - len, NV_PRINTF_PREFIX, sizeof(NV_PRINTF_PREFIX));
len += sizeof(NV_PRINTF_PREFIX) - 1;
space = " ";
portStringCopy(str + len, totalLen - len, NV_PRINTF_PREFIX_SEPARATOR, sizeof(NV_PRINTF_PREFIX_SEPARATOR));
len += sizeof(NV_PRINTF_PREFIX_SEPARATOR) - 1;
}
if (prefix & NVRM_MSG_PREFIX_FILE)

View File

@@ -27,9 +27,14 @@
#include "gpu/mem_sys/kern_mem_sys.h"
#include "gpu/bus/kern_bus.h"
#include "gpu/bif/kernel_bif.h"
#include "gpu/mem_mgr/rm_page_size.h"
#include "nverror.h"
#include "jt.h"
#include "published/turing/tu102/dev_nv_xve.h"
#include "published/turing/tu102/dev_gc6_island.h"
#include "published/turing/tu102/dev_gc6_island_addendum.h"
/*!
* @brief Returns SR-IOV capabilities
*
@@ -320,3 +325,176 @@ gpuClearEccCounts_TU102
return NV_OK;
}
//
// This function checks for GFW boot completion status by reading
// NV_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT_PROGRESS_COMPLETED bits and
// return true if GFW boot has completed.
//
// Either pGpu or pgc6VirtAddr should be not null.
// This function needs to be called in early init code-path where OBJGPU
// has not created. For that case, the NV_PGC6 base address will be mapped
// and pgc6VirtAddr will contain the virtual address for NV_PGC6.
// If pgc6VirtAddr is not null, then read the register with MEM_RD32,
// otherwise use the GPU_REG_RD32.
//
// The current GFW boot progress value will be returned in gfwBootProgressVal.
//
static NvBool
_gpuIsGfwBootCompleted_TU102
(
OBJGPU *pGpu,
NvU8 *pgc6VirtAddr,
NvU32 *gfwBootProgressVal
)
{
NvU32 regVal;
if (pgc6VirtAddr != NULL)
{
regVal = MEM_RD32(pgc6VirtAddr +
(NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK -
DEVICE_BASE(NV_PGC6)));
}
else
{
regVal = GPU_REG_RD32(pGpu, NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK);
}
//
// Before reading the actual GFW_BOOT status register,
// we want to check that FWSEC has lowered its PLM first.
// If not then obviously it has not completed.
//
if (!FLD_TEST_DRF(_PGC6, _AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK,
_READ_PROTECTION_LEVEL0, _ENABLE, regVal))
{
*gfwBootProgressVal = 0x0;
return NV_FALSE;
}
if (pgc6VirtAddr != NULL)
{
regVal = MEM_RD32(pgc6VirtAddr +
(NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT -
DEVICE_BASE(NV_PGC6)));
}
else
{
regVal = GPU_REG_RD32(pGpu, NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT);
}
*gfwBootProgressVal = DRF_VAL(_PGC6, _AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT,
_PROGRESS, regVal);
return FLD_TEST_DRF(_PGC6, _AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT,
_PROGRESS, _COMPLETED, regVal);
}
#define FWSECLIC_PROG_START_TIMEOUT 50000 // 50ms
#define FWSECLIC_PROG_COMPLETE_TIMEOUT 2000000 // 2s
#define GPU_GFW_BOOT_COMPLETION_TIMEOUT_US (FWSECLIC_PROG_START_TIMEOUT + \
FWSECLIC_PROG_COMPLETE_TIMEOUT)
NV_STATUS
gpuWaitForGfwBootComplete_TU102
(
OBJGPU *pGpu
)
{
NvU32 timeoutUs = GPU_GFW_BOOT_COMPLETION_TIMEOUT_US;
NvU32 gfwBootProgressVal = 0;
RMTIMEOUT timeout;
NV_STATUS status = NV_OK;
// Use the OS timer since the GPU timer is not ready yet
gpuSetTimeout(pGpu, gpuScaleTimeout(pGpu, timeoutUs), &timeout,
GPU_TIMEOUT_FLAGS_OSTIMER);
while (status == NV_OK)
{
if (_gpuIsGfwBootCompleted_TU102(pGpu, NULL, &gfwBootProgressVal))
{
return NV_OK;
}
status = gpuCheckTimeout(pGpu, &timeout);
}
NV_PRINTF(LEVEL_ERROR, "failed to wait for GFW_BOOT: (progress 0x%x)\n",
gfwBootProgressVal);
return status;
}
//
// Workaround for Bug 3809777.
//
// This function is not created through HAL infrastructure. It needs to be
// called when OBJGPU is not created. HAL infrastructure cant be used for
// this case, so it has been added manually. It will be invoked directly by
// gpumgrIsDeviceMsixAllowed() after checking the GPU architecture.
//
// When driver is running inside guest in pass-through mode, check if MSI-X
// is enabled by reading NV_XVE_PRIV_MISC_1_CYA_HIDE_MSIX_CAP. The devinit
// can disable MSI-X capability, if configured. The hypervisor issues reset
// before launching VM. After reset, the MSI-X capability will be visible
// for some duration and then devinit hides the MSI-X capability. This
// devinit will run in the background. During this time, the hypervisor can
// assume that MSI-X capability is present in the GPU and configure the guest
// GPU PCIe device instance with MSI-X capability. When GPU tries to use the
// MSI-X interrupts later, then interrupt wont be triggered. To identify
// this case, wait for GPU devinit completion and check if MSI-X capability
// is not hidden.
//
NvBool gpuIsMsixAllowed_TU102
(
RmPhysAddr bar0BaseAddr
)
{
NvU8 *vAddr;
NvU32 regVal;
NvU32 timeUs = 0;
NvU32 gfwBootProgressVal = 0;
NvBool bGfwBootCompleted = NV_FALSE;
ct_assert(DRF_SIZE(NV_PGC6) <= RM_PAGE_SIZE);
vAddr = osMapKernelSpace(bar0BaseAddr + DEVICE_BASE(NV_PGC6),
RM_PAGE_SIZE, NV_MEMORY_UNCACHED,
NV_PROTECT_READABLE);
if (vAddr == NULL)
{
return NV_FALSE;
}
while (timeUs < GPU_GFW_BOOT_COMPLETION_TIMEOUT_US)
{
bGfwBootCompleted = _gpuIsGfwBootCompleted_TU102(NULL, vAddr, &gfwBootProgressVal);
if (bGfwBootCompleted)
{
break;
}
osDelayUs(1000);
timeUs += 1000;
}
osUnmapKernelSpace(vAddr, RM_PAGE_SIZE);
if (!bGfwBootCompleted)
{
NV_PRINTF(LEVEL_ERROR, "failed to wait for GFW_BOOT: (progress 0x%x)\n",
gfwBootProgressVal);
return NV_FALSE;
}
vAddr = osMapKernelSpace(bar0BaseAddr + DEVICE_BASE(NV_PCFG) +
NV_XVE_PRIV_MISC_1, 4, NV_MEMORY_UNCACHED,
NV_PROTECT_READABLE);
if (vAddr == NULL)
{
return NV_FALSE;
}
regVal = MEM_RD32(vAddr);
osUnmapKernelSpace(vAddr, 4);
return FLD_TEST_DRF(_XVE, _PRIV_MISC_1, _CYA_HIDE_MSIX_CAP, _FALSE, regVal);
}

View File

@@ -660,7 +660,8 @@ _class5080DeferredApiV2
callContext.secInfo.pProcessToken = (void *)(NvU64) gfid;
}
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_GOTO(rmStatus,
resservSwapTlsCallContext(&pOldContext, &callContext), cleanup);
rmStatus = serverControl_Prologue(&g_resServ, &rmCtrlParams, &access, &releaseFlags);
@@ -680,7 +681,7 @@ _class5080DeferredApiV2
}
}
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
rmStatus = serverControl_Epilogue(&g_resServ, &rmCtrlParams, access, &releaseFlags, rmStatus);
}

View File

@@ -28,6 +28,7 @@
#include "kernel/gpu/mem_mgr/mem_mgr.h"
#include "kernel/gpu/gr/kernel_graphics.h"
#include "kernel/gpu/falcon/kernel_falcon.h"
#include "kernel/gpu/rc/kernel_rc.h"
#include "kernel/gpu/conf_compute/conf_compute.h"
@@ -242,13 +243,28 @@ kchangrpapiConstruct_IMPL
if (!RMCFG_FEATURE_PLATFORM_GSP)
{
NV_ASSERT_OK_OR_GOTO(rmStatus,
ctxBufPoolInit(pGpu, pHeap, &pKernelChannelGroup->pCtxBufPool),
failed);
NvHandle hRcWatchdog;
NV_ASSERT_OK_OR_GOTO(rmStatus,
ctxBufPoolInit(pGpu, pHeap, &pKernelChannelGroup->pChannelBufPool),
failed);
//
// WAR for 4217716 - Force allocations made on behalf of watchdog client to
// RM reserved heap. This avoids a constant memory allocation from appearing
// due to the ctxBufPool reservation out of PMA.
//
rmStatus = krcWatchdogGetClientHandle(GPU_GET_KERNEL_RC(pGpu), &hRcWatchdog);
if ((rmStatus != NV_OK) || (pParams->hClient != hRcWatchdog))
{
NV_ASSERT_OK_OR_GOTO(rmStatus,
ctxBufPoolInit(pGpu, pHeap, &pKernelChannelGroup->pCtxBufPool),
failed);
NV_ASSERT_OK_OR_GOTO(rmStatus,
ctxBufPoolInit(pGpu, pHeap, &pKernelChannelGroup->pChannelBufPool),
failed);
}
else
{
NV_PRINTF(LEVEL_INFO, "Skipping ctxBufPoolInit for RC watchdog\n");
}
}
NV_ASSERT_OK_OR_GOTO(rmStatus,

View File

@@ -618,7 +618,7 @@ static NV_STATUS _gpuRmApiControl
callCtx.pControlParams = &rmCtrlParams;
callCtx.pLockInfo = rmCtrlParams.pLockInfo;
resservSwapTlsCallContext(&oldCtx, &callCtx);
NV_ASSERT_OK_OR_RETURN(resservSwapTlsCallContext(&oldCtx, &callCtx));
if (pEntry->paramSize == 0)
{
@@ -629,7 +629,7 @@ static NV_STATUS _gpuRmApiControl
status = ((NV_STATUS(*)(void*,void*))pEntry->pFunc)(pGpu->pCachedSubdevice, pParams);
}
resservRestoreTlsCallContext(oldCtx);
NV_ASSERT_OK(resservRestoreTlsCallContext(oldCtx));
}
else
{

View File

@@ -381,6 +381,7 @@ gpuresControl_IMPL
RS_RES_CONTROL_PARAMS_INTERNAL *pParams
)
{
NV_ASSERT_OR_RETURN(pGpuResource->pGpu != NULL, NV_ERR_INVALID_STATE);
gpuresControlSetup(pParams, pGpuResource);
return resControl_IMPL(staticCast(pGpuResource, RsResource),

View File

@@ -68,8 +68,7 @@ kgspConfigureFalcon_GA102
//
// No CrashCat queue when CC is enabled, as it's not encrypted.
// Don't bother enabling the host-side decoding either, as CrashCat
// currently only supports sysmem queue reporting on GA10x+.
// Don't bother enabling the host-side decoding either.
//
if (pCC == NULL || !pCC->getProperty(pCC, PDB_PROP_CONFCOMPUTE_CC_FEATURE_ENABLED))
{
@@ -77,7 +76,6 @@ kgspConfigureFalcon_GA102
falconConfig.crashcatEngConfig.bEnable = NV_TRUE;
falconConfig.crashcatEngConfig.pName = MAKE_NV_PRINTF_STR("GSP");
falconConfig.crashcatEngConfig.errorId = GSP_ERROR;
falconConfig.crashcatEngConfig.allocQueueSize = RM_PAGE_SIZE;
}
kflcnConfigureEngine(pGpu, staticCast(pKernelGsp, KernelFalcon), &falconConfig);

View File

@@ -956,9 +956,6 @@ kgspIsWpr2Up_TU102
return (wpr2HiVal != 0);
}
#define FWSECLIC_PROG_START_TIMEOUT 50000 // 50ms
#define FWSECLIC_PROG_COMPLETE_TIMEOUT 2000000 // 2s
NV_STATUS
kgspWaitForGfwBootOk_TU102
(
@@ -966,51 +963,16 @@ kgspWaitForGfwBootOk_TU102
KernelGsp *pKernelGsp
)
{
NvU32 timeoutUs = FWSECLIC_PROG_START_TIMEOUT + FWSECLIC_PROG_COMPLETE_TIMEOUT;
RMTIMEOUT timeout;
NV_STATUS status = NV_OK;
// Use the OS timer since the GPU timer is not ready yet
gpuSetTimeout(pGpu, gpuScaleTimeout(pGpu, timeoutUs), &timeout,
GPU_TIMEOUT_FLAGS_OSTIMER);
while (status == NV_OK)
status = gpuWaitForGfwBootComplete_HAL(pGpu);
if (status != NV_OK)
{
//
// Before reading the actual GFW_BOOT status register,
// we want to check that FWSEC has lowered its PLM first.
// If not then obviously it has not completed.
//
if (GPU_FLD_TEST_DRF_DEF(pGpu,
_PGC6,
_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK,
_READ_PROTECTION_LEVEL0,
_ENABLE)
)
{
if (GPU_FLD_TEST_DRF_DEF(pGpu,
_PGC6,
_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT,
_PROGRESS,
_COMPLETED)
)
{
return NV_OK;
}
}
status = gpuCheckTimeout(pGpu, &timeout);
NV_PRINTF(LEVEL_ERROR, "failed to wait for GFW boot complete: 0x%x VBIOS version %s\n",
status, pKernelGsp->vbiosVersionStr);
NV_PRINTF(LEVEL_ERROR, "(the GPU may be in a bad state and may need to be reset)\n");
}
// The wait failed if we reach here (as above loop returns upon success).
NV_PRINTF(LEVEL_ERROR, "failed to wait for GFW_BOOT: 0x%x (progress 0x%x, VBIOS version %s)\n",
status, GPU_REG_RD_DRF(pGpu,
_PGC6,
_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT,
_PROGRESS),
pKernelGsp->vbiosVersionStr);
NV_PRINTF(LEVEL_ERROR, "(the GPU may be in a bad state and may need to be reset)\n");
return status;
}

View File

@@ -1409,12 +1409,20 @@ _tsDiffToDuration
duration /= tsFreqUs;
// 999999us then 1000ms
if (duration >= 1000000)
{
duration /= 1000;
*pDurationUnitsChar = 'm';
}
// 9999ms then 10s
if (duration >= 10000)
{
duration /= 1000;
*pDurationUnitsChar = ' '; // so caller can always just append 's'
}
return duration;
}
@@ -1467,7 +1475,7 @@ _kgspLogRpcHistoryEntry
duration = _tsDiffToDuration(duration, &durationUnitsChar);
NV_ERROR_LOG_DATA(pGpu, errorNum,
" %c%-4d %-4d %-21.21s 0x%016llx 0x%016llx 0x%016llx 0x%016llx %6lld%cs %c\n",
" %c%-4d %-4d %-21.21s 0x%016llx 0x%016llx 0x%016llx 0x%016llx %6llu%cs %c\n",
((historyIndex == 0) ? ' ' : '-'),
historyIndex,
pEntry->function,
@@ -1556,23 +1564,32 @@ _kgspLogXid119
NvU32 expectedFunc
)
{
NvU32 historyEntry = pRpc->rpcHistoryCurrent;
RpcHistoryEntry *pHistoryEntry = &pRpc->rpcHistory[pRpc->rpcHistoryCurrent];
NvU64 ts_end = osGetTimestamp();
NvU64 duration;
char durationUnitsChar;
if (pRpc->timeoutCount == 1)
{
NV_PRINTF(LEVEL_ERROR,
"********************************* GSP Failure **********************************\n");
"********************************* GSP Timeout **********************************\n");
NV_PRINTF(LEVEL_ERROR,
"Note: Please also check logs above.\n");
}
NV_ASSERT(expectedFunc == pRpc->rpcHistory[historyEntry].function);
NV_ASSERT(expectedFunc == pHistoryEntry->function);
NV_ASSERT(ts_end > pHistoryEntry->ts_start);
duration = _tsDiffToDuration(ts_end - pHistoryEntry->ts_start, &durationUnitsChar);
NV_ERROR_LOG(pGpu, GSP_RPC_TIMEOUT,
"Timeout waiting for RPC from GPU%d GSP! Expected function %d (%s) (0x%x 0x%x).",
"Timeout after %llus of waiting for RPC response from GPU%d GSP! Expected function %d (%s) (0x%x 0x%x).",
(durationUnitsChar == 'm' ? duration / 1000 : duration),
gpuGetInstance(pGpu),
expectedFunc,
_getRpcName(expectedFunc),
pRpc->rpcHistory[historyEntry].data[0],
pRpc->rpcHistory[historyEntry].data[1]);
pHistoryEntry->data[0],
pHistoryEntry->data[1]);
if (pRpc->timeoutCount == 1)
{

View File

@@ -1579,7 +1579,7 @@ intrServiceStallList_IMPL
}
}
resservSwapTlsCallContext(&pOldContext, NULL);
NV_ASSERT_OK_OR_ELSE(status, resservSwapTlsCallContext(&pOldContext, NULL), return);
// prevent the isr from coming in
_intrEnterCriticalSection(pGpu, pIntr, &intrMaskCtx);
@@ -1608,7 +1608,7 @@ done:
// allow the isr to come in.
_intrExitCriticalSection(pGpu, pIntr, &intrMaskCtx);
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
}

View File

@@ -36,6 +36,7 @@
#include "gpu/mem_mgr/ce_utils.h"
#include "gpu/subdevice/subdevice.h"
#include "kernel/gpu/mem_mgr/ce_utils_sizes.h"
#include "vgpu/rpc_headers.h"
#include "class/clb0b5.h" // MAXWELL_DMA_COPY_A
#include "class/clc0b5.h" // PASCAL_DMA_COPY_A
@@ -91,8 +92,21 @@ ceutilsConstruct_IMPL
status = serverGetClientUnderLock(&g_resServ, pChannel->hClient, &pChannel->pRsClient);
NV_ASSERT_OR_GOTO(status == NV_OK, free_client);
status = clientSetHandleGenerator(staticCast(pClient, RsClient), 1U, ~0U - 1U);
NV_ASSERT_OR_GOTO(status == NV_OK, free_client);
if (IS_VIRTUAL(pGpu))
{
NV_ASSERT_OK_OR_GOTO(
status,
clientSetHandleGenerator(staticCast(pClient, RsClient), RS_UNIQUE_HANDLE_BASE,
RS_UNIQUE_HANDLE_RANGE/2 - VGPU_RESERVED_HANDLE_RANGE),
free_client);
}
else
{
NV_ASSERT_OK_OR_GOTO(
status,
clientSetHandleGenerator(staticCast(pClient, RsClient), 1U, ~0U - 1U),
free_client);
}
pChannel->bClientAllocated = NV_TRUE;
pChannel->pGpu = pGpu;

View File

@@ -249,6 +249,44 @@ deviceCtrlCmdFbGetCapsV2_IMPL
return rmStatus;
}
//
// deviceCtrlCmdSetDefaultVidmemPhysicality
//
// Lock Requirements:
// Assert that API lock held on entry
//
NV_STATUS
deviceCtrlCmdSetDefaultVidmemPhysicality_IMPL
(
Device *pDevice,
NV0080_CTRL_FB_SET_DEFAULT_VIDMEM_PHYSICALITY_PARAMS *pParams
)
{
LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner());
NvU32 override;
switch (pParams->value)
{
case NV0080_CTRL_FB_DEFAULT_VIDMEM_PHYSICALITY_DEFAULT:
override = NVOS32_ATTR_PHYSICALITY_DEFAULT;
break;
case NV0080_CTRL_FB_DEFAULT_VIDMEM_PHYSICALITY_CONTIGUOUS:
override = NVOS32_ATTR_PHYSICALITY_CONTIGUOUS;
break;
case NV0080_CTRL_FB_DEFAULT_VIDMEM_PHYSICALITY_NONCONTIGUOUS:
override = NVOS32_ATTR_PHYSICALITY_NONCONTIGUOUS;
break;
case NV0080_CTRL_FB_DEFAULT_VIDMEM_PHYSICALITY_ALLOW_NONCONTIGUOUS:
override = NVOS32_ATTR_PHYSICALITY_ALLOW_NONCONTIGUOUS;
break;
default:
return NV_ERR_INVALID_ARGUMENT;
}
pDevice->defaultVidmemPhysicalityOverride = override;
return NV_OK;
}
//
// subdeviceCtrlCmdFbGetBar1Offset
//

View File

@@ -52,7 +52,6 @@
#include "deprecated/rmapi_deprecated.h"
#include "nvRmReg.h"
//
// Watchdog object ids
//
@@ -107,7 +106,6 @@
#define SUBDEVICE_MASK_ALL DRF_MASK(NV906F_DMA_SET_SUBDEVICE_MASK_VALUE)
NV_STATUS
krcWatchdogChangeState_IMPL
(
@@ -402,7 +400,7 @@ krcWatchdogShutdown_IMPL
//
// Make sure to clear any old watchdog data this also clears
// WATCHDOG_FLAGS_INITIALIZED
// WATCHDOG_FLAGS_INITIALIZED, bHandleValid, and hClient
//
portMemSet(&pKernelRc->watchdog, 0, sizeof pKernelRc->watchdog);
portMemSet(&pKernelRc->watchdogChannelInfo, 0,
@@ -519,6 +517,8 @@ krcWatchdogInit_IMPL
status = NV_ERR_NO_MEMORY;
goto error;
}
pKernelRc->watchdog.hClient = hClient;
pKernelRc->watchdog.bHandleValid = NV_TRUE;
}
if (bAcquireLock)
@@ -1178,6 +1178,7 @@ error:
if (status != NV_OK)
{
pRmApi->Free(pRmApi, hClient, hClient);
pKernelRc->watchdog.bHandleValid = NV_FALSE;
}
portMemFree(pParams);
@@ -1417,4 +1418,11 @@ krcWatchdogWriteNotifierToGpfifo_IMPL
SLI_LOOP_END;
}
NV_STATUS krcWatchdogGetClientHandle(KernelRc *pKernelRc, NvHandle *phClient)
{
if (!pKernelRc->watchdog.bHandleValid)
return NV_ERR_INVALID_STATE;
*phClient = pKernelRc->watchdog.hClient;
return NV_OK;
}

View File

@@ -3893,3 +3893,41 @@ NvBool gpumgrIsSafeToReadGpuInfo(void)
//
return rmapiLockIsOwner() || (rmGpuLocksGetOwnedMask() != 0);
}
//
// Workaround for Bug 3809777. This is a HW bug happening in Ampere and
// Ada GPU's. For these GPU's, after device reset, CRS (Configuration Request
// Retry Status) is being released without waiting for GFW boot completion.
// MSI-X capability in the config space may be inconsistent when GFW boot
// is in progress, so this function checks if MSI-X is allowed.
// For Hopper and above, the CRS will be released after
// GFW boot completion, so the WAR is not needed.
// The bug will be exposed only when GPU is running inside guest in
// pass-through mode.
//
NvBool gpumgrIsDeviceMsixAllowed
(
RmPhysAddr bar0BaseAddr,
NvU32 pmcBoot1,
NvU32 pmcBoot42
)
{
OBJSYS *pSys = SYS_GET_INSTANCE();
OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
NvU32 chipArch;
if ((hypervisorGetHypervisorType(pHypervisor) == OS_HYPERVISOR_UNKNOWN) ||
!FLD_TEST_DRF(_PMC, _BOOT_1, _VGPU, _REAL, pmcBoot1))
{
return NV_TRUE;
}
chipArch = DRF_VAL(_PMC, _BOOT_42, _ARCHITECTURE, pmcBoot42);
if ((chipArch != NV_PMC_BOOT_42_ARCHITECTURE_AD100) &&
(chipArch != NV_PMC_BOOT_42_ARCHITECTURE_GA100))
{
return NV_TRUE;
}
return gpuIsMsixAllowed_TU102(bar0BaseAddr);
}

View File

@@ -562,6 +562,14 @@ vidmemConstruct_IMPL
goto done;
}
if (FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, _DEFAULT, pAllocData->attr))
{
pAllocData->attr =
FLD_SET_DRF_NUM(OS32, _ATTR, _PHYSICALITY,
pDevice->defaultVidmemPhysicalityOverride,
pAllocData->attr);
}
NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, stdmemValidateParams(pGpu, hClient, pAllocData));
NV_CHECK_OR_RETURN(LEVEL_WARNING,
DRF_VAL(OS32, _ATTR, _LOCATION, pAllocData->attr) == NVOS32_ATTR_LOCATION_VIDMEM &&

View File

@@ -913,7 +913,8 @@ serverAllocResourceUnderLock
callContext.pLockInfo = pRmAllocParams->pLockInfo;
callContext.secInfo = *pRmAllocParams->pSecInfo;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_GOTO(status,
resservSwapTlsCallContext(&pOldContext, &callContext), done);
NV_RM_RPC_ALLOC_OBJECT(pGpu,
pRmAllocParams->hClient,
pRmAllocParams->hParent,
@@ -922,7 +923,7 @@ serverAllocResourceUnderLock
pRmAllocParams->pAllocParams,
pRmAllocParams->paramsSize,
status);
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
if (status != NV_OK)
goto done;

View File

@@ -4148,9 +4148,10 @@ cliresCtrlCmdClientShareObject_IMPL
callContext.pResourceRef = pObjectRef;
callContext.secInfo = pCallContext->secInfo;
resservSwapTlsCallContext(&pOldCallContext, &callContext);
NV_ASSERT_OK_OR_RETURN(resservSwapTlsCallContext(&pOldCallContext, &callContext));
status = clientShareResource(pClient, pObjectRef, pSharePolicy, &callContext);
resservRestoreTlsCallContext(pOldCallContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldCallContext));
if (status != NV_OK)
return status;

View File

@@ -3446,8 +3446,6 @@ NV_STATUS nvGpuOpsGetExternalAllocPtes(struct gpuAddressSpace *vaSpace,
(memdescGetAddressSpace(pAdjustedMemDesc) == ADDR_FABRIC_MC) ||
(memdescGetAddressSpace(pAdjustedMemDesc) == ADDR_FABRIC_V2))
{
KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pMappingGpu);
isPeerSupported = NV_TRUE;
pPeerGpu = pAdjustedMemDesc->pGpu;
peerId = BUS_INVALID_PEER;
@@ -3462,10 +3460,21 @@ NV_STATUS nvGpuOpsGetExternalAllocPtes(struct gpuAddressSpace *vaSpace,
if (pPeerGpu != NULL)
{
if ((pKernelNvlink != NULL) &&
knvlinkIsNvlinkP2pSupported(pMappingGpu, pKernelNvlink, pPeerGpu))
if (IS_VIRTUAL_WITH_SRIOV(pMappingGpu) &&
!gpuIsWarBug200577889SriovHeavyEnabled(pMappingGpu))
{
peerId = kbusGetPeerId_HAL(pMappingGpu, GPU_GET_KERNEL_BUS(pMappingGpu), pPeerGpu);
peerId = kbusGetNvlinkPeerId_HAL(pMappingGpu,
GPU_GET_KERNEL_BUS(pMappingGpu),
pPeerGpu);
}
else
{
KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pMappingGpu);
if ((pKernelNvlink != NULL) &&
knvlinkIsNvlinkP2pSupported(pMappingGpu, pKernelNvlink, pPeerGpu))
{
peerId = kbusGetPeerId_HAL(pMappingGpu, GPU_GET_KERNEL_BUS(pMappingGpu), pPeerGpu);
}
}
}
else

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -523,7 +523,7 @@ clientCopyResource_IMPL
callContext.secInfo = *pParams->pSecInfo;
callContext.pLockInfo = pParams->pLockInfo;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_RETURN(resservSwapTlsCallContext(&pOldContext, &callContext));
//
// Kernel clients are allowed to dup anything, unless they request otherwise.
@@ -560,7 +560,7 @@ clientCopyResource_IMPL
}
}
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
if (status != NV_OK)
return status;
@@ -668,9 +668,11 @@ _clientAllocResourceHelper
}
callContext.secInfo = *pParams->pSecInfo;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_GOTO(status,
resservSwapTlsCallContext(&pOldContext, &callContext), fail);
status = resservResourceFactory(pServer->pAllocator, &callContext, pParams, &pResource);
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
if (status != NV_OK)
goto fail;
@@ -724,6 +726,8 @@ _clientAllocResourceHelper
fail:
if (pResource != NULL)
{
NV_STATUS callContextStatus;
RS_RES_FREE_PARAMS_INTERNAL params;
pOldContext = NULL;
@@ -738,11 +742,20 @@ fail:
callContext.pResourceRef = pResourceRef;
callContext.pLockInfo = pParams->pLockInfo;
resservSwapTlsCallContext(&pOldContext, &callContext);
resSetFreeParams(pResource, &callContext, &params);
callContextStatus = resservSwapTlsCallContext(&pOldContext, &callContext);
if (callContextStatus == NV_OK)
{
resSetFreeParams(pResource, &callContext, &params);
objDelete(pResource);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
}
else
{
NV_PRINTF(LEVEL_ERROR, "Failed to set call context! Error: 0x%x\n",
callContextStatus);
}
objDelete(pResource);
resservRestoreTlsCallContext(pOldContext);
}
if (pResourceRef != NULL)
@@ -798,7 +811,9 @@ clientFreeResource_IMPL
if (pParams->pSecInfo != NULL)
callContext.secInfo = *pParams->pSecInfo;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_GOTO(status,
resservSwapTlsCallContext(&pOldContext, &callContext), done);
resSetFreeParams(pResource, &callContext, pParams);
resPreDestruct(pResource);
@@ -825,7 +840,7 @@ clientFreeResource_IMPL
pResourceRef->pResource = NULL;
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
done:
if (!pParams->bInvalidateOnly)
@@ -872,9 +887,10 @@ clientUnmapMemory_IMPL
if (pSecInfo != NULL)
callContext.secInfo = *pSecInfo;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_RETURN(resservSwapTlsCallContext(&pOldContext, &callContext));
status = resUnmap(pResourceRef->pResource, &callContext, pCpuMapping);
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
if (status != NV_OK)
{

View File

@@ -1344,9 +1344,11 @@ serverControl
}
pLockInfo->pContextRef = pResourceRef->pParentRef;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_GOTO(status,
resservSwapTlsCallContext(&pOldContext, &callContext), done);
status = resControl(pResourceRef->pResource, &callContext, pParams);
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
done:
@@ -1521,7 +1523,8 @@ _serverShareResourceAccessClient
callContext.pResourceRef = pResourceRef;
callContext.secInfo = *pParams->pSecInfo;
callContext.pLockInfo = pParams->pLockInfo;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_GOTO(status,
resservSwapTlsCallContext(&pOldContext, &callContext), done);
if (hClientOwner == hClientTarget)
{
@@ -1544,7 +1547,7 @@ _serverShareResourceAccessClient
goto restore_context;
restore_context:
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
// NV_PRINTF(LEVEL_INFO, "hClientOwner %x: Shared hResource: %x with hClientTarget: %x\n",
// hClientOwner, pParams->hResource, hClientTarget);
@@ -1631,9 +1634,11 @@ serverShareResourceAccess
callContext.secInfo = *pParams->pSecInfo;
callContext.pLockInfo = pParams->pLockInfo;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_GOTO(status,
resservSwapTlsCallContext(&pOldContext, &callContext), done);
status = clientShareResource(pClient, pResourceRef, pParams->pSharePolicy, &callContext);
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
if (status != NV_OK)
goto done;
@@ -1735,9 +1740,11 @@ serverMap
if (pParams->pSecInfo != NULL)
callContext.secInfo = *pParams->pSecInfo;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_GOTO(status,
resservSwapTlsCallContext(&pOldContext, &callContext), done);
status = resMap(pResource, &callContext, pParams, pCpuMapping);
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
if (status != NV_OK)
goto done;
@@ -1910,7 +1917,9 @@ serverInterMap
if (pParams->pSecInfo != NULL)
callContext.secInfo = *pParams->pSecInfo;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_GOTO(status,
resservSwapTlsCallContext(&pOldContext, &callContext), done);
bRestoreCallContext = NV_TRUE;
status = refAddInterMapping(pMapperRef, pMappableRef, pContextRef, &pMapping);
@@ -1934,7 +1943,7 @@ done:
serverInterMap_Epilogue(pServer, pParams, &releaseFlags);
if (bRestoreCallContext)
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
if (status != NV_OK)
{
@@ -2024,7 +2033,9 @@ serverInterUnmap
if (pLockInfo->pContextRef == NULL)
pLockInfo->pContextRef = pContextRef;
resservSwapTlsCallContext(&pOldContext, &callContext);
NV_ASSERT_OK_OR_GOTO(status,
resservSwapTlsCallContext(&pOldContext, &callContext), done);
bRestoreCallContext = NV_TRUE;
status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags);
@@ -2045,7 +2056,7 @@ done:
serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags);
if (bRestoreCallContext)
resservRestoreTlsCallContext(pOldContext);
NV_ASSERT_OK(resservRestoreTlsCallContext(pOldContext));
_serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, pLockInfo, &releaseFlags);
serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags);