535.216.01

This commit is contained in:
Bernhard Stoeckner
2024-10-22 17:35:00 +02:00
parent c588c3877f
commit 60d85c464b
92 changed files with 1189 additions and 1226 deletions

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2004-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -190,6 +190,7 @@ CSINFO chipsetInfo[] =
{PCI_VENDOR_ID_INTEL, 0x7A8A, CS_INTEL_1B81, "Intel-SapphireRapids", NULL},
{PCI_VENDOR_ID_INTEL, 0x18DC, CS_INTEL_18DC, "Intel-IceLake", NULL},
{PCI_VENDOR_ID_INTEL, 0x7A04, CS_INTEL_7A04, "Intel-RaptorLake", Intel_7A04_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x5795, CS_INTEL_5795, "Intel-GraniteRapids", NULL},
{PCI_VENDOR_ID_NVIDIA, 0x0FAE, CS_NVIDIA_T210, "T210", Nvidia_T210_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x0FAF, CS_NVIDIA_T210, "T210", Nvidia_T210_setupFunc},
@@ -354,7 +355,8 @@ ARMCSALLOWLISTINFO armChipsetAllowListInfo[] =
{PCI_VENDOR_ID_MELLANOX, 0xA2D0, CS_MELLANOX_BLUEFIELD}, // Mellanox BlueField
{PCI_VENDOR_ID_MELLANOX, 0xA2D4, CS_MELLANOX_BLUEFIELD2},// Mellanox BlueField 2
{PCI_VENDOR_ID_MELLANOX, 0xA2D5, CS_MELLANOX_BLUEFIELD2},// Mellanox BlueField 2 Crypto disabled
{PCI_VENDOR_ID_MELLANOX, 0xA2DB, CS_MELLANOX_BLUEFIELD3},// Mellanox BlueField 3
{PCI_VENDOR_ID_MELLANOX, 0xA2DB, CS_MELLANOX_BLUEFIELD3},// Mellanox BlueField 3 Crypto disabled
{PCI_VENDOR_ID_MELLANOX, 0xA2DA, CS_MELLANOX_BLUEFIELD3},// Mellanox BlueField 3 Crypto enabled
{PCI_VENDOR_ID_AMAZON, 0x0200, CS_AMAZON_GRAVITRON2}, // Amazon Gravitron2
{PCI_VENDOR_ID_FUJITSU, 0x1952, CS_FUJITSU_A64FX}, // Fujitsu A64FX
{PCI_VENDOR_ID_CADENCE, 0xDC01, CS_PHYTIUM_S2500}, // Phytium S2500

View File

@@ -1034,12 +1034,11 @@ NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, c
NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16);
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool);
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8);
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU32, void *);
NV_STATUS NV_API_CALL nv_vgpu_start(nvidia_stack_t *, const NvU8 *, void *, NvS32 *, NvU8 *, NvU32);
NV_STATUS NV_API_CALL nv_vgpu_get_sparse_mmap(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 **, NvU64 **, NvU32 *);
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *,
NvU64 *, NvU64 *, NvU32 *, NvU8 *);
NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *);
NV_STATUS NV_API_CALL nv_vgpu_update_request(nvidia_stack_t *, const NvU8 *, NvU32, NvU64 *, NvU64 *, const char *);
NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *);
NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *, NvU32, NvBool *);
NV_STATUS NV_API_CALL nv_gpu_unbind_event(nvidia_stack_t *, NvU32, NvBool *);
NV_STATUS NV_API_CALL nv_get_usermap_access_params(nv_state_t*, nv_usermap_access_params_t*);
nv_soc_irq_type_t NV_API_CALL nv_get_current_irq_type(nv_state_t*);

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2014-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -45,6 +45,7 @@
#include "gpu/bus/kern_bus.h"
#include <nv_ref.h> // NV_PMC_BOOT_1_VGPU
#include "nvdevid.h"
#include "ctrl/ctrl0000/ctrl0000vgpu.h"
#define NV_VFIO_PCI_BAR1_REGION_INDEX 1
#define NV_VFIO_PCI_BAR2_REGION_INDEX 2
@@ -80,8 +81,8 @@ NV_STATUS hypervisorInjectInterrupt_IMPL
{
NV_STATUS status = NV_ERR_NOT_SUPPORTED;
if (pVgpuNsIntr->pVgpuVfioRef)
status = osVgpuInjectInterrupt(pVgpuNsIntr->pVgpuVfioRef);
if (osIsVgpuVfioPresent() == NV_TRUE)
return NV_ERR_NOT_SUPPORTED;
else
{
if (pVgpuNsIntr->guestMSIAddr && pVgpuNsIntr->guestMSIData)
@@ -102,127 +103,6 @@ HYPERVISOR_TYPE NV_API_CALL nv_get_hypervisor_type(void)
return hypervisorGetHypervisorType(pHypervisor);
}
static NV_STATUS get_available_instances(
NvU32 *avail_instances,
nv_state_t *pNv,
VGPU_TYPE *vgpuTypeInfo,
NvU32 pgpuIndex,
NvU8 devfn
)
{
NV_STATUS rmStatus = NV_OK;
OBJGPU *pGpu = NULL;
OBJSYS *pSys = SYS_GET_INSTANCE();
KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
*avail_instances = 0;
pGpu = NV_GET_NV_PRIV_PGPU(pNv);
if (pGpu == NULL)
{
NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n", __FUNCTION__);
rmStatus = NV_ERR_INVALID_STATE;
goto exit;
}
/* TODO: Needs to have a proper fix this for DriverVM config */
if (gpuIsSriovEnabled(pGpu) &&
!(pHypervisor->getProperty(pHypervisor, PDB_PROP_HYPERVISOR_DRIVERVM_ENABLED)))
{
NvU8 fnId = devfn - pGpu->sriovState.firstVFOffset;
if (fnId > 63)
{
NV_ASSERT(0);
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto exit;
}
if (IS_MIG_ENABLED(pGpu))
{
if (IS_MIG_IN_USE(pGpu)) {
NvU64 swizzIdInUseMask = 0;
NvU32 partitionFlag = PARTITIONID_INVALID;
KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu);
NvU32 id;
swizzIdInUseMask = kmigmgrGetSwizzIdInUseMask(pGpu, pKernelMIGManager);
rmStatus = kvgpumgrGetPartitionFlag(vgpuTypeInfo->vgpuTypeId,
&partitionFlag);
if (rmStatus != NV_OK)
{
// Query for a non MIG vgpuType
NV_PRINTF(LEVEL_ERROR, "%s Query for a non MIG vGPU type \n",
__FUNCTION__);
rmStatus = NV_OK;
goto exit;
}
// Determine valid swizzids not assigned to any vGPU device.
FOR_EACH_INDEX_IN_MASK(64, id, swizzIdInUseMask)
{
KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance;
NvU64 mask = 0;
rmStatus = kmigmgrGetGPUInstanceInfo(pGpu, pKernelMIGManager,
id, &pKernelMIGGpuInstance);
if (rmStatus != NV_OK)
{
// Didn't find requested GPU instance
NV_PRINTF(LEVEL_ERROR,
"No valid GPU instance with SwizzId - %d found\n", id);
goto exit;
}
mask = NVBIT64(id);
if (pKernelMIGGpuInstance->partitionFlag == partitionFlag)
{
// Validate that same ID is not already set and VF is available
if (!(mask & pKernelVgpuMgr->pgpuInfo[pgpuIndex].assignedSwizzIdMask) &&
!(pKernelVgpuMgr->pgpuInfo[pgpuIndex].createdVfMask & NVBIT64(fnId)))
{
*avail_instances = 1;
break;
}
}
}
FOR_EACH_INDEX_IN_MASK_END;
}
}
else
{
if (pKernelVgpuMgr->pgpuInfo[pgpuIndex].numCreatedVgpu < vgpuTypeInfo->maxInstance)
{
if (vgpuTypeInfo->gpuInstanceSize)
{
// Query for a MIG vgpuType
NV_PRINTF(LEVEL_ERROR, "%s Query for a MIG vGPU type \n",
__FUNCTION__);
rmStatus = NV_OK;
goto exit;
}
if (!(pKernelVgpuMgr->pgpuInfo[pgpuIndex].createdVfMask & NVBIT64(fnId)))
{
if (kvgpumgrCheckVgpuTypeCreatable(&pKernelVgpuMgr->pgpuInfo[pgpuIndex], vgpuTypeInfo) == NV_OK)
*avail_instances = 1;
}
}
}
}
else
{
if (kvgpumgrCheckVgpuTypeCreatable(&pKernelVgpuMgr->pgpuInfo[pgpuIndex], vgpuTypeInfo) == NV_OK)
*avail_instances = vgpuTypeInfo->maxInstance - pKernelVgpuMgr->pgpuInfo[pgpuIndex].numCreatedVgpu;
}
exit:
return rmStatus;
}
#define MAX_STR_LEN 256
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(
nvidia_stack_t *sp,
@@ -239,6 +119,7 @@ NV_STATUS NV_API_CALL nv_vgpu_get_type_info(
NV_STATUS rmStatus = NV_OK;
VGPU_TYPE *vgpuTypeInfo;
NvU32 pgpuIndex, i, avail_instances = 0;
OBJGPU *pGpu = NULL;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
@@ -261,24 +142,19 @@ NV_STATUS NV_API_CALL nv_vgpu_get_type_info(
switch (type_info)
{
case VGPU_TYPE_NAME:
os_snprintf(buffer, VGPU_STRING_BUFFER_SIZE, "%s\n",
vgpuTypeInfo->vgpuName);
break;
case VGPU_TYPE_DESCRIPTION:
os_snprintf(buffer, MAX_STR_LEN,
"num_heads=%d, frl_config=%d, "
"framebuffer=%dM, max_resolution=%dx%d, max_instance=%d\n",
vgpuTypeInfo->numHeads, vgpuTypeInfo->frlConfig,
vgpuTypeInfo->profileSize >> 20,
vgpuTypeInfo->maxResolutionX,
vgpuTypeInfo->maxResolutionY,
vgpuTypeInfo->maxInstance);
break;
case VGPU_TYPE_INSTANCES:
rmStatus = get_available_instances(&avail_instances, pNv,
vgpuTypeInfo,
pgpuIndex, devfn);
pGpu = NV_GET_NV_PRIV_PGPU(pNv);
if (pGpu == NULL)
{
NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n",
__FUNCTION__);
rmStatus = NV_ERR_INVALID_STATE;
goto exit;
}
rmStatus = kvgpumgrGetAvailableInstances(&avail_instances, pGpu,
vgpuTypeInfo,
pgpuIndex, devfn);
if (rmStatus != NV_OK)
goto exit;
@@ -314,6 +190,7 @@ NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(
{
THREAD_STATE_NODE threadState;
OBJSYS *pSys = SYS_GET_INSTANCE();
OBJGPU *pGpu = NULL;
KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
NV_STATUS rmStatus = NV_OK;
NvU32 pgpuIndex, i, avail_instances = 0;
@@ -354,9 +231,17 @@ NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(
continue;
}
rmStatus = get_available_instances(&avail_instances, pNv,
vgpuTypeInfo, pgpuIndex,
devfn);
pGpu = NV_GET_NV_PRIV_PGPU(pNv);
if (pGpu == NULL)
{
NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n",
__FUNCTION__);
goto exit;
}
rmStatus = kvgpumgrGetAvailableInstances(&avail_instances, pGpu,
vgpuTypeInfo, pgpuIndex,
devfn);
if (rmStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Failed to get available instances for vGPU ID: %d, status: 0x%x\n",
@@ -373,6 +258,7 @@ NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(
}
}
exit:
// UNLOCK: release API lock
rmapiLockRelease();
}
@@ -474,56 +360,19 @@ NV_STATUS NV_API_CALL nv_vgpu_create_request(
return rmStatus;
}
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(
nvidia_stack_t *sp,
nv_state_t *pNv,
const NvU8 *pMdevUuid,
NvU64 *size,
NvU32 regionIndex,
void *pVgpuVfioRef
)
static NV_STATUS
_nv_vgpu_get_bar_size(OBJGPU *pGpu, KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice,
NvU32 regionIndex, NvU64 *size, NvU8 *configParams)
{
REQUEST_VGPU_INFO_NODE *pRequestVgpu = NULL;
THREAD_STATE_NODE threadState;
NV_STATUS rmStatus = NV_OK, status;
OBJGPU *pGpu = NULL;
KernelBus *pKernelBus;
KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice;
void *fp = NULL;
NvU32 value = 0;
OBJSYS *pSys = SYS_GET_INSTANCE();
KernelVgpuMgr * pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
OBJSYS *pSys = SYS_GET_INSTANCE();
KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
NV_STATUS status;
KernelBus *pKernelBus;
NvU32 value = 0;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// LOCK: acquire API lock
NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT, rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR), exit);
pGpu = NV_GET_NV_PRIV_PGPU(pNv);
if (pGpu == NULL)
{
NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n", __FUNCTION__);
rmStatus = NV_ERR_INVALID_STATE;
goto release_lock;
}
pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
*size = kbusGetPciBarSize(pKernelBus, regionIndex);
NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
kvgpumgrGetHostVgpuDeviceFromMdevUuid(pNv->gpu_id,
pMdevUuid,
&pKernelHostVgpuDevice), release_lock);
pRequestVgpu = pKernelHostVgpuDevice->pRequestVgpuInfoNode;
if (pRequestVgpu == NULL)
{
rmStatus = NV_ERR_INVALID_POINTER;
goto release_lock;
}
pKernelHostVgpuDevice->pVgpuVfioRef = pVgpuVfioRef;
if (regionIndex == NV_VFIO_PCI_BAR1_REGION_INDEX)
{
VGPU_TYPE *vgpuTypeInfo;
@@ -531,34 +380,36 @@ NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(
NvBool bOverrideBar1Size = NV_FALSE;
// Read BAR1 length from vgpuTypeInfo
NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
kvgpumgrGetVgpuTypeInfo(pKernelHostVgpuDevice->vgpuType, &vgpuTypeInfo), release_lock);
NV_ASSERT_OK_OR_RETURN(kvgpumgrGetVgpuTypeInfo(pKernelHostVgpuDevice->vgpuType,
&vgpuTypeInfo));
*size = vgpuTypeInfo->bar1Length << 20;
NV_ASSERT_OK_OR_RETURN(kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pGpu->gpuId, &pgpuIndex));
NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pNv->gpu_id, &pgpuIndex), release_lock);
/*
/*
* check for 'override_bar1_size' param in vgpuExtraParams list first,
* if param is missing there then check it in vgpu_params list
*/
status = nv_parse_config_params((const char*)vgpuTypeInfo->vgpuExtraParams,
"override_bar1_size", ';', &value);
if (status == NV_OK && value) {
if (status == NV_OK && value)
{
bOverrideBar1Size = NV_TRUE;
} else if (status == NV_ERR_OBJECT_NOT_FOUND) {
status = nv_parse_config_params(pRequestVgpu->configParams,
}
else if (status == NV_ERR_OBJECT_NOT_FOUND)
{
status = nv_parse_config_params((const char *)configParams,
"override_bar1_size", ',', &value);
if (status == NV_OK && value)
bOverrideBar1Size = NV_TRUE;
}
if (bOverrideBar1Size) {
if (bOverrideBar1Size)
{
NvU64 bar1SizeInBytes, guestBar1;
NvU64 gpuBar1LowerLimit = 256 * 1024 * 1024; // bar1 lower limit for override_bar1_length parameter
bar1SizeInBytes = kbusGetPciBarSize(pKernelBus, NV_VFIO_PCI_BAR1_REGION_INDEX);
if (pKernelVgpuMgr->pgpuInfo[pgpuIndex].sriovEnabled)
{
*size = pGpu->sriovState.vfBarSize[1];
@@ -573,7 +424,7 @@ NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(
else if (regionIndex == NV_VFIO_PCI_BAR2_REGION_INDEX ||
regionIndex == NV_VFIO_PCI_BAR3_REGION_INDEX)
{
status = nv_parse_config_params(pRequestVgpu->configParams,
status = nv_parse_config_params((const char *)configParams,
"address64", ',', &value);
if ((status != NV_OK) || ((status == NV_OK) && (value != 0)))
@@ -585,53 +436,51 @@ NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(
}
}
release_lock:
// UNLOCK: release API lock
rmapiLockRelease();
exit:
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
return NV_OK;
}
NV_STATUS osVgpuVfioWake(
void *waitQueue
)
{
vgpu_vfio_info vgpu_info;
vgpu_info.waitQueue = waitQueue;
return os_call_vgpu_vfio((void *) &vgpu_info, CMD_VGPU_VFIO_WAKE_WAIT_QUEUE);
}
NV_STATUS NV_API_CALL nv_vgpu_start(
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info
(
nvidia_stack_t *sp,
nv_state_t *pNv,
const NvU8 *pMdevUuid,
void *waitQueue,
NvS32 *returnStatus,
NvU8 *vmName,
NvU32 qemuPid
NvU64 *barSizes,
NvU64 *sparseOffsets,
NvU64 *sparseSizes,
NvU32 *sparseCount,
NvU8 *configParams
)
{
THREAD_STATE_NODE threadState;
NV_STATUS rmStatus = NV_OK;
OBJGPU *pGpu = NULL;
void *fp = NULL;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// LOCK: acquire API lock
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
{
rmStatus = kvgpumgrStart(pMdevUuid, waitQueue, returnStatus,
vmName, qemuPid);
NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR), exit);
// UNLOCK: release API lock
rmapiLockRelease();
pGpu = NV_GET_NV_PRIV_PGPU(pNv);
if (pGpu == NULL)
{
NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n", __FUNCTION__);
rmStatus = NV_ERR_INVALID_STATE;
goto release_lock;
}
NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
nv_vgpu_rm_get_bar_info(pGpu, pMdevUuid, barSizes,
sparseOffsets, sparseSizes,
sparseCount, configParams),
release_lock);
release_lock:
// UNLOCK: release API lock
rmapiLockRelease();
exit:
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
@@ -674,207 +523,136 @@ static NV_STATUS nv_parse_config_params(
return rmStatus;
}
NV_STATUS NV_API_CALL nv_vgpu_get_sparse_mmap(
nvidia_stack_t *sp ,
nv_state_t *pNv,
const NvU8 *pMdevUuid,
NvU64 **offsets,
NvU64 **sizes,
NvU32 *numAreas
)
{
THREAD_STATE_NODE threadState;
NV_STATUS rmStatus = NV_ERR_INVALID_STATE, status;
OBJGPU *pGpu = NULL;
POBJTMR pTmr = NULL;
KernelFifo *pKernelFifo = NULL;
void *fp = NULL;
REQUEST_VGPU_INFO_NODE *pRequestVgpu = NULL;
KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice;
NvU32 bar0TmrMapSize = 0, bar0FifoMapSize = 0, value = 0;
NvU64 bar0TmrMapOffset = 0, bar0FifoMapOffset = 0;
NvU64 *vfRegionSizes = NULL;
NvU64 *vfRegionOffsets = NULL;
KernelBif *pKernelBif = NULL;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// LOCK: acquire API lock
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
{
pGpu = NV_GET_NV_PRIV_PGPU(pNv);
if (pGpu == NULL)
{
rmStatus = NV_ERR_INVALID_STATE;
goto cleanup;
}
pTmr = GPU_GET_TIMER(pGpu);
pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
pKernelBif = GPU_GET_KERNEL_BIF(pGpu);
*numAreas = 0;
rmStatus = kvgpumgrGetHostVgpuDeviceFromMdevUuid(pNv->gpu_id, pMdevUuid,
&pKernelHostVgpuDevice);
if (rmStatus == NV_OK)
{
if (pKernelHostVgpuDevice->gfid != 0)
{
rmStatus = kbifGetVFSparseMmapRegions_HAL(pGpu, pKernelBif, pKernelHostVgpuDevice, os_page_size,
numAreas, NULL, NULL);
if (rmStatus == NV_OK)
{
os_alloc_mem((void **)&vfRegionOffsets, sizeof(NvU64) * (*numAreas));
os_alloc_mem((void **)&vfRegionSizes, sizeof (NvU64) * (*numAreas));
if (vfRegionOffsets && vfRegionSizes)
{
rmStatus = kbifGetVFSparseMmapRegions_HAL(pGpu, pKernelBif, pKernelHostVgpuDevice, os_page_size,
numAreas, vfRegionOffsets, vfRegionSizes);
if (rmStatus == NV_OK)
{
*offsets = vfRegionOffsets;
*sizes = vfRegionSizes;
}
}
else
{
if (vfRegionOffsets != NULL)
os_free_mem(vfRegionOffsets);
if (vfRegionSizes != NULL)
os_free_mem(vfRegionSizes);
rmStatus = NV_ERR_INSUFFICIENT_RESOURCES;
}
}
}
else
{
pRequestVgpu = pKernelHostVgpuDevice->pRequestVgpuInfoNode;
if (pRequestVgpu == NULL)
{
rmStatus = NV_ERR_INVALID_POINTER;
goto cleanup;
}
status = nv_parse_config_params(pRequestVgpu->configParams, "direct_gpu_timer_access", ',', &value);
if ((status == NV_OK) && (value != 0))
{
rmStatus = tmrGetTimerBar0MapInfo_HAL(pGpu, pTmr,
&bar0TmrMapOffset,
&bar0TmrMapSize);
if (rmStatus == NV_OK)
(*numAreas)++;
else
NV_PRINTF(LEVEL_ERROR,
"%s Failed to get NV_PTIMER region \n",
__FUNCTION__);
}
value = 0;
{
status = kfifoGetUsermodeMapInfo_HAL(pGpu, pKernelFifo,
&bar0FifoMapOffset,
&bar0FifoMapSize);
if (status == NV_OK)
(*numAreas)++;
}
if (*numAreas != 0)
{
NvU32 i = 0;
NvU64 *tmpOffset, *tmpSize;
os_alloc_mem((void **)offsets, sizeof(NvU64) * (*numAreas));
os_alloc_mem((void **)sizes, sizeof (NvU64) * (*numAreas));
tmpOffset = *offsets;
tmpSize = *sizes;
if (bar0TmrMapSize != 0)
{
tmpOffset[i] = bar0TmrMapOffset;
tmpSize[i] = bar0TmrMapSize;
i++;
}
if (bar0FifoMapSize != 0)
{
tmpOffset[i] = bar0FifoMapOffset;
tmpSize[i] = bar0FifoMapSize;
}
}
}
}
cleanup:
// UNLOCK: release API lock
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL nv_vgpu_update_request(
nvidia_stack_t *sp ,
const NvU8 *pMdevUuid,
VGPU_DEVICE_STATE deviceState,
static NV_STATUS _nv_vgpu_get_sparse_mmap(
OBJGPU *pGpu,
KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice,
NvU64 *offsets,
NvU64 *sizes,
const char *configParams
NvU32 *numAreas,
NvU8 *configParams
)
{
THREAD_STATE_NODE threadState;
NV_STATUS rmStatus = NV_ERR_OBJECT_NOT_FOUND;
void *fp = NULL;
REQUEST_VGPU_INFO_NODE *pRequestVgpu = NULL;
OBJSYS *pSys = SYS_GET_INSTANCE();
KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
NV_STATUS rmStatus = NV_OK, status;
POBJTMR pTmr = GPU_GET_TIMER(pGpu);;
KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);;
KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu);
NvU32 value = 0;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
if (offsets != NULL)
os_free_mem(offsets);
if (sizes != NULL)
os_free_mem(sizes);
// LOCK: acquire API lock
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
*numAreas = 0;
if (pKernelHostVgpuDevice->gfid != 0)
{
for (pRequestVgpu = listHead(&pKernelVgpuMgr->listRequestVgpuHead);
pRequestVgpu != NULL;
pRequestVgpu = listNext(&pKernelVgpuMgr->listRequestVgpuHead, pRequestVgpu))
rmStatus = kbifGetVFSparseMmapRegions_HAL(pGpu, pKernelBif, pKernelHostVgpuDevice,
os_page_size, numAreas, NULL, NULL);
if (rmStatus == NV_OK)
{
if (portMemCmp(pRequestVgpu->mdevUuid, pMdevUuid, VGPU_UUID_SIZE) == 0)
if (*numAreas > NVA081_MAX_SPARSE_REGION_COUNT)
{
NV_PRINTF(LEVEL_ERROR, "Not enough space for sparse mmap region info\n");
return NV_ERR_INSUFFICIENT_RESOURCES;
}
if (configParams != NULL)
portStringCopy(pRequestVgpu->configParams,
sizeof(pRequestVgpu->configParams),
configParams, (portStringLength(configParams) + 1));
pRequestVgpu->deviceState = deviceState;
rmStatus = NV_OK;
rmStatus = kbifGetVFSparseMmapRegions_HAL(pGpu, pKernelBif, pKernelHostVgpuDevice, os_page_size,
numAreas, offsets, sizes);
if (rmStatus != NV_OK)
return rmStatus;
}
}
else
{
status = nv_parse_config_params((const char *)configParams,
"direct_gpu_timer_access", ',', &value);
if ((status == NV_OK) && (value != 0))
{
NvU64 offset = 0;
NvU32 size = 0;
rmStatus = tmrGetTimerBar0MapInfo_HAL(pGpu, pTmr, &offset, &size);
if (rmStatus == NV_OK)
{
offsets[*numAreas] = offset;
sizes[*numAreas] = size;
(*numAreas)++;
}
}
// UNLOCK: release API lock
rmapiLockRelease();
}
value = 0;
{
NvU64 offset = 0;
NvU32 size = 0;
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
status = kfifoGetUsermodeMapInfo_HAL(pGpu, pKernelFifo, &offset, &size);
if (status == NV_OK)
{
offsets[*numAreas] = offset;
sizes[*numAreas] = size;
(*numAreas)++;
}
}
}
return rmStatus;
}
NV_STATUS NV_API_CALL nv_gpu_bind_event(
nvidia_stack_t *sp
NV_STATUS nv_vgpu_rm_get_bar_info
(
OBJGPU *pGpu,
const NvU8 *pMdevUuid,
NvU64 *barSizes,
NvU64 *sparseOffsets,
NvU64 *sparseSizes,
NvU32 *sparseCount,
NvU8 *configParams
)
{
KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice;
NV_STATUS rmStatus;
NvU32 i = 0;
NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
kvgpumgrGetHostVgpuDeviceFromMdevUuid(pGpu->gpuId,
pMdevUuid,
&pKernelHostVgpuDevice),
exit);
for (i = 0; i < NVA081_MAX_BAR_REGION_COUNT; i++)
{
/*
* For SRIOV, only VF BAR1 is queried via RM, others BARs are directly
* queried via VF config space in vgpu-vfio
*/
if (gpuIsSriovEnabled(pGpu) && (i != NV_VFIO_PCI_BAR1_REGION_INDEX))
{
barSizes[i] = 0;
continue;
}
rmStatus = _nv_vgpu_get_bar_size(pGpu, pKernelHostVgpuDevice, i,
&barSizes[i], configParams);
if (rmStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Failed to query BAR size for index %u 0x%x\n",
i, rmStatus);
goto exit;
}
}
NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
_nv_vgpu_get_sparse_mmap(pGpu, pKernelHostVgpuDevice,
sparseOffsets, sparseSizes,
sparseCount, configParams),
exit);
exit:
return rmStatus;
}
NV_STATUS NV_API_CALL nv_gpu_unbind_event
(
nvidia_stack_t *sp,
NvU32 gpuId,
NvBool *isEventNotified
)
{
THREAD_STATE_NODE threadState;
@@ -887,7 +665,11 @@ NV_STATUS NV_API_CALL nv_gpu_bind_event(
// LOCK: acquire API lock
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
{
CliAddSystemEvent(NV0000_NOTIFIERS_GPU_BIND_EVENT, 0);
/*
* Send gpu_id in "status" field of the event so that nvidia-vgpu-mgr
* daemon knows which GPU is being unbound
*/
CliAddSystemEvent(NV0000_NOTIFIERS_GPU_UNBIND_EVENT, gpuId, isEventNotified);
// UNLOCK: release API lock
rmapiLockRelease();
@@ -899,101 +681,32 @@ NV_STATUS NV_API_CALL nv_gpu_bind_event(
return rmStatus;
}
NV_STATUS osVgpuInjectInterrupt(void *vgpuVfioRef)
{
vgpu_vfio_info vgpu_info;
vgpu_info.vgpuVfioRef = vgpuVfioRef;
return os_call_vgpu_vfio((void *) &vgpu_info, CMD_VGPU_VFIO_INJECT_INTERRUPT);
}
NV_STATUS osVgpuRegisterMdev
(
OS_GPU_INFO *pOsGpuInfo
NV_STATUS NV_API_CALL nv_gpu_bind_event(
nvidia_stack_t *sp,
NvU32 gpuId,
NvBool *isEventNotified
)
{
NV_STATUS status = NV_OK;
vgpu_vfio_info vgpu_info = {0};
OBJSYS *pSys = SYS_GET_INSTANCE();
KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
KERNEL_PHYS_GPU_INFO *pPhysGpuInfo;
NvU32 pgpuIndex, i;
OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
THREAD_STATE_NODE threadState;
NV_STATUS rmStatus = NV_OK;
void *fp = NULL;
status = kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pOsGpuInfo->gpu_id, &pgpuIndex);
if (status != NV_OK)
return status;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
pPhysGpuInfo = &(pKernelVgpuMgr->pgpuInfo[pgpuIndex]);
vgpu_info.numVgpuTypes = pKernelVgpuMgr->pgpuInfo[pgpuIndex].numVgpuTypes;
status = os_alloc_mem((void **)&vgpu_info.vgpuTypeIds,
((vgpu_info.numVgpuTypes) * sizeof(NvU32)));
if (status != NV_OK)
goto free_mem;
status = os_alloc_mem((void **)&vgpu_info.vgpuNames,
((vgpu_info.numVgpuTypes) * sizeof(char *)));
if (status != NV_OK)
goto free_mem;
vgpu_info.nv = pOsGpuInfo;
for (i = 0; i < pPhysGpuInfo->numVgpuTypes; i++)
// LOCK: acquire API lock
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
{
status = os_alloc_mem((void *)&vgpu_info.vgpuNames[i], (VGPU_STRING_BUFFER_SIZE * sizeof(char)));
if (status != NV_OK)
goto free_mem;
CliAddSystemEvent(NV0000_NOTIFIERS_GPU_BIND_EVENT, gpuId, isEventNotified);
vgpu_info.vgpuTypeIds[i] = pPhysGpuInfo->vgpuTypes[i]->vgpuTypeId;
os_snprintf((char *) vgpu_info.vgpuNames[i], VGPU_STRING_BUFFER_SIZE, "%s\n", pPhysGpuInfo->vgpuTypes[i]->vgpuName);
// UNLOCK: release API lock
rmapiLockRelease();
}
if ((!pPhysGpuInfo->sriovEnabled) ||
(pHypervisor->getProperty(pHypervisor, PDB_PROP_HYPERVISOR_DRIVERVM_ENABLED)))
{
vgpu_info.is_virtfn = NV_FALSE;
status = os_call_vgpu_vfio((void *)&vgpu_info, CMD_VGPU_VFIO_REGISTER_MDEV);
}
else
{
for (i = 0; i < MAX_VF_COUNT_PER_GPU; i++)
{
if (pPhysGpuInfo->vfPciInfo[i].isNvidiaAttached)
{
vgpu_info.is_virtfn = NV_TRUE;
vgpu_info.domain = pPhysGpuInfo->vfPciInfo[i].domain;
vgpu_info.bus = pPhysGpuInfo->vfPciInfo[i].bus;
vgpu_info.slot = pPhysGpuInfo->vfPciInfo[i].slot;
vgpu_info.function = pPhysGpuInfo->vfPciInfo[i].function;
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
status = os_call_vgpu_vfio((void *)&vgpu_info, CMD_VGPU_VFIO_REGISTER_MDEV);
if (status == NV_OK)
{
pPhysGpuInfo->vfPciInfo[i].isMdevAttached = NV_TRUE;
}
}
}
}
free_mem:
if (vgpu_info.vgpuTypeIds)
os_free_mem(vgpu_info.vgpuTypeIds);
if (vgpu_info.vgpuNames)
{
for (i = 0; i < pPhysGpuInfo->numVgpuTypes; i++)
{
if (vgpu_info.vgpuNames[i])
{
os_free_mem(vgpu_info.vgpuNames[i]);
}
}
os_free_mem(vgpu_info.vgpuNames);
}
return status;
return rmStatus;
}
NV_STATUS osIsVgpuVfioPresent(void)
@@ -1010,6 +723,19 @@ NV_STATUS osIsVfioPciCorePresent(void)
return os_call_vgpu_vfio((void *) &vgpu_info, CMD_VFIO_PCI_CORE_PRESENT);
}
void osWakeRemoveVgpu(NvU32 gpuId, NvU32 returnStatus)
{
vgpu_vfio_info vgpu_info;
vgpu_info.return_status = returnStatus;
vgpu_info.domain = GPU_32_BIT_ID_DECODE_DOMAIN(gpuId);
vgpu_info.bus = GPU_32_BIT_ID_DECODE_BUS(gpuId);
vgpu_info.device = GPU_32_BIT_ID_DECODE_DEVICE(gpuId);
os_call_vgpu_vfio((void *)&vgpu_info, CMD_VFIO_WAKE_REMOVE_GPU);
}
void initVGXSpecificRegistry(OBJGPU *pGpu)
{

View File

@@ -481,6 +481,11 @@ static NV_STATUS allocate_os_event(
status = NV_ERR_NO_MEMORY;
goto done;
}
new_event->hParent = hParent;
new_event->nvfp = nvfp;
new_event->fd = fd;
new_event->active = NV_TRUE;
new_event->refcount = 0;
portSyncSpinlockAcquire(nv->event_spinlock);
for (event = nv->event_list; event; event = event->next)
@@ -501,12 +506,6 @@ static NV_STATUS allocate_os_event(
done:
if (status == NV_OK)
{
new_event->hParent = hParent;
new_event->nvfp = nvfp;
new_event->fd = fd;
new_event->active = NV_TRUE;
new_event->refcount = 0;
nvfp->bCleanupRmapi = NV_TRUE;
NV_PRINTF(LEVEL_INFO, "allocated OS event:\n");

View File

@@ -59,6 +59,7 @@
#include <gpu/gsp/kernel_gsp.h>
#include "liblogdecode.h"
#include <gpu/fsp/kern_fsp.h>
#include <gpu/gsp/kernel_gsp.h>
#include <mem_mgr/virt_mem_mgr.h>
#include <virtualization/kernel_vgpu_mgr.h>
@@ -385,6 +386,13 @@ osHandleGpuLost
gpuSetDisconnectedProperties(pGpu);
if (IS_GSP_CLIENT(pGpu))
{
// Notify all channels of the error so that UVM can fail gracefully
KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu);
kgspRcAndNotifyAllChannels(pGpu, pKernelGsp, ROBUST_CHANNEL_GPU_HAS_FALLEN_OFF_THE_BUS, NV_FALSE);
}
// Trigger the OS's PCI recovery mechanism
if (nv_pci_trigger_recovery(nv) != NV_OK)
{

View File

@@ -181,13 +181,11 @@
--undefined=nv_vgpu_create_request
--undefined=nv_vgpu_delete
--undefined=nv_vgpu_get_bar_info
--undefined=nv_vgpu_start
--undefined=nv_vgpu_get_type_ids
--undefined=nv_vgpu_get_type_info
--undefined=nv_vgpu_get_sparse_mmap
--undefined=nv_vgpu_update_request
--undefined=nv_vgpu_process_vf_info
--undefined=nv_gpu_bind_event
--undefined=nv_gpu_unbind_event
--undefined=rm_check_for_gpu_surprise_removal
--undefined=rm_set_external_kernel_client_count
--undefined=rm_schedule_gpu_wakeup

View File

@@ -1461,18 +1461,18 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_RmClient
#endif
},
{ /* [84] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) cliresCtrlCmdVgpuGetStartData_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*flags=*/ 0x10u,
/*pFunc=*/ (void (*)(void)) cliresCtrlCmdVgpuVfioNotifyRMStatus_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*flags=*/ 0x4u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xc01u,
/*paramSize=*/ sizeof(NV0000_CTRL_VGPU_GET_START_DATA_PARAMS),
/*methodId=*/ 0xc05u,
/*paramSize=*/ sizeof(NV0000_CTRL_VGPU_VFIO_NOTIFY_RM_STATUS_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "cliresCtrlCmdVgpuGetStartData"
/*func=*/ "cliresCtrlCmdVgpuVfioNotifyRMStatus"
#endif
},
{ /* [85] */
@@ -2093,10 +2093,6 @@ static void __nvoc_init_funcTable_RmClientResource_1(RmClientResource *pThis) {
pThis->__cliresCtrlCmdSyncGpuBoostGroupInfo__ = &cliresCtrlCmdSyncGpuBoostGroupInfo_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__cliresCtrlCmdVgpuGetStartData__ = &cliresCtrlCmdVgpuGetStartData_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__cliresCtrlCmdVgpuGetVgpuVersion__ = &cliresCtrlCmdVgpuGetVgpuVersion_IMPL;
#endif
@@ -2105,6 +2101,10 @@ static void __nvoc_init_funcTable_RmClientResource_1(RmClientResource *pThis) {
pThis->__cliresCtrlCmdVgpuSetVgpuVersion__ = &cliresCtrlCmdVgpuSetVgpuVersion_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
pThis->__cliresCtrlCmdVgpuVfioNotifyRMStatus__ = &cliresCtrlCmdVgpuVfioNotifyRMStatus_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__cliresCtrlCmdSystemNVPCFGetPowerModeInfo__ = &cliresCtrlCmdSystemNVPCFGetPowerModeInfo_IMPL;
#endif

View File

@@ -164,9 +164,9 @@ struct RmClientResource {
NV_STATUS (*__cliresCtrlCmdSyncGpuBoostGroupCreate__)(struct RmClientResource *, NV0000_SYNC_GPU_BOOST_GROUP_CREATE_PARAMS *);
NV_STATUS (*__cliresCtrlCmdSyncGpuBoostGroupDestroy__)(struct RmClientResource *, NV0000_SYNC_GPU_BOOST_GROUP_DESTROY_PARAMS *);
NV_STATUS (*__cliresCtrlCmdSyncGpuBoostGroupInfo__)(struct RmClientResource *, NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS *);
NV_STATUS (*__cliresCtrlCmdVgpuGetStartData__)(struct RmClientResource *, NV0000_CTRL_VGPU_GET_START_DATA_PARAMS *);
NV_STATUS (*__cliresCtrlCmdVgpuGetVgpuVersion__)(struct RmClientResource *, NV0000_CTRL_VGPU_GET_VGPU_VERSION_PARAMS *);
NV_STATUS (*__cliresCtrlCmdVgpuSetVgpuVersion__)(struct RmClientResource *, NV0000_CTRL_VGPU_SET_VGPU_VERSION_PARAMS *);
NV_STATUS (*__cliresCtrlCmdVgpuVfioNotifyRMStatus__)(struct RmClientResource *, NV0000_CTRL_VGPU_VFIO_NOTIFY_RM_STATUS_PARAMS *);
NV_STATUS (*__cliresCtrlCmdSystemNVPCFGetPowerModeInfo__)(struct RmClientResource *, NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *);
NV_STATUS (*__cliresCtrlCmdSystemSyncExternalFabricMgmt__)(struct RmClientResource *, NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS *);
NV_STATUS (*__cliresCtrlCmdSystemPfmreqhndlrCtrl__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_PFM_REQ_HNDLR_CTRL_PARAMS *);
@@ -316,9 +316,9 @@ NV_STATUS __nvoc_objCreate_RmClientResource(RmClientResource**, Dynamic*, NvU32,
#define cliresCtrlCmdSyncGpuBoostGroupCreate(pRmCliRes, pParams) cliresCtrlCmdSyncGpuBoostGroupCreate_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdSyncGpuBoostGroupDestroy(pRmCliRes, pParams) cliresCtrlCmdSyncGpuBoostGroupDestroy_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdSyncGpuBoostGroupInfo(pRmCliRes, pParams) cliresCtrlCmdSyncGpuBoostGroupInfo_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdVgpuGetStartData(pRmCliRes, pVgpuStartParams) cliresCtrlCmdVgpuGetStartData_DISPATCH(pRmCliRes, pVgpuStartParams)
#define cliresCtrlCmdVgpuGetVgpuVersion(pRmCliRes, vgpuVersionInfo) cliresCtrlCmdVgpuGetVgpuVersion_DISPATCH(pRmCliRes, vgpuVersionInfo)
#define cliresCtrlCmdVgpuSetVgpuVersion(pRmCliRes, vgpuVersionInfo) cliresCtrlCmdVgpuSetVgpuVersion_DISPATCH(pRmCliRes, vgpuVersionInfo)
#define cliresCtrlCmdVgpuVfioNotifyRMStatus(pRmCliRes, pVgpuDeleteParams) cliresCtrlCmdVgpuVfioNotifyRMStatus_DISPATCH(pRmCliRes, pVgpuDeleteParams)
#define cliresCtrlCmdSystemNVPCFGetPowerModeInfo(pRmCliRes, pParams) cliresCtrlCmdSystemNVPCFGetPowerModeInfo_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdSystemSyncExternalFabricMgmt(pRmCliRes, pExtFabricMgmtParams) cliresCtrlCmdSystemSyncExternalFabricMgmt_DISPATCH(pRmCliRes, pExtFabricMgmtParams)
#define cliresCtrlCmdSystemPfmreqhndlrCtrl(pRmCliRes, pParams) cliresCtrlCmdSystemPfmreqhndlrCtrl_DISPATCH(pRmCliRes, pParams)
@@ -888,12 +888,6 @@ static inline NV_STATUS cliresCtrlCmdSyncGpuBoostGroupInfo_DISPATCH(struct RmCli
return pRmCliRes->__cliresCtrlCmdSyncGpuBoostGroupInfo__(pRmCliRes, pParams);
}
NV_STATUS cliresCtrlCmdVgpuGetStartData_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_VGPU_GET_START_DATA_PARAMS *pVgpuStartParams);
static inline NV_STATUS cliresCtrlCmdVgpuGetStartData_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_VGPU_GET_START_DATA_PARAMS *pVgpuStartParams) {
return pRmCliRes->__cliresCtrlCmdVgpuGetStartData__(pRmCliRes, pVgpuStartParams);
}
NV_STATUS cliresCtrlCmdVgpuGetVgpuVersion_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_VGPU_GET_VGPU_VERSION_PARAMS *vgpuVersionInfo);
static inline NV_STATUS cliresCtrlCmdVgpuGetVgpuVersion_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_VGPU_GET_VGPU_VERSION_PARAMS *vgpuVersionInfo) {
@@ -906,6 +900,12 @@ static inline NV_STATUS cliresCtrlCmdVgpuSetVgpuVersion_DISPATCH(struct RmClient
return pRmCliRes->__cliresCtrlCmdVgpuSetVgpuVersion__(pRmCliRes, vgpuVersionInfo);
}
NV_STATUS cliresCtrlCmdVgpuVfioNotifyRMStatus_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_VGPU_VFIO_NOTIFY_RM_STATUS_PARAMS *pVgpuDeleteParams);
static inline NV_STATUS cliresCtrlCmdVgpuVfioNotifyRMStatus_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_VGPU_VFIO_NOTIFY_RM_STATUS_PARAMS *pVgpuDeleteParams) {
return pRmCliRes->__cliresCtrlCmdVgpuVfioNotifyRMStatus__(pRmCliRes, pVgpuDeleteParams);
}
NV_STATUS cliresCtrlCmdSystemNVPCFGetPowerModeInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams);
static inline NV_STATUS cliresCtrlCmdSystemNVPCFGetPowerModeInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams) {

View File

@@ -535,7 +535,7 @@ void notifyDestruct_IMPL(struct Notifier *pNotifier);
#undef PRIVATE_FIELD
void CliAddSystemEvent(NvU32, NvU32);
void CliAddSystemEvent(NvU32, NvU32, NvBool *);
NvBool CliDelObjectEvents(NvHandle hClient, NvHandle hObject);
NvBool CliGetEventInfo(NvHandle hClient, NvHandle hEvent, struct Event **ppEvent);
NV_STATUS CliGetEventNotificationList(NvHandle hClient, NvHandle hObject,

View File

@@ -762,6 +762,16 @@ static void __nvoc_init_funcTable_KernelGsp_1(KernelGsp *pThis, RmHalspecOwner *
pThis->__kgspInitVgpuPartitionLogging__ = &kgspInitVgpuPartitionLogging_IMPL;
}
// Hal function -- kgspPreserveVgpuPartitionLogging
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 */
{
pThis->__kgspPreserveVgpuPartitionLogging__ = &kgspPreserveVgpuPartitionLogging_395e98;
}
else
{
pThis->__kgspPreserveVgpuPartitionLogging__ = &kgspPreserveVgpuPartitionLogging_IMPL;
}
// Hal function -- kgspFreeVgpuPartitionLogging
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 */
{

View File

@@ -292,7 +292,8 @@ struct KernelGsp {
const BINDATA_ARCHIVE *(*__kgspGetBinArchiveBooterUnloadUcode__)(struct KernelGsp *);
NvU64 (*__kgspGetMinWprHeapSizeMB__)(struct OBJGPU *, struct KernelGsp *);
NvU64 (*__kgspGetMaxWprHeapSizeMB__)(struct OBJGPU *, struct KernelGsp *);
NV_STATUS (*__kgspInitVgpuPartitionLogging__)(struct OBJGPU *, struct KernelGsp *, NvU32, NvU64, NvU64, NvU64, NvU64);
NV_STATUS (*__kgspInitVgpuPartitionLogging__)(struct OBJGPU *, struct KernelGsp *, NvU32, NvU64, NvU64, NvU64, NvU64, NvBool *);
NV_STATUS (*__kgspPreserveVgpuPartitionLogging__)(struct OBJGPU *, struct KernelGsp *, NvU32);
NV_STATUS (*__kgspFreeVgpuPartitionLogging__)(struct OBJGPU *, struct KernelGsp *, NvU32);
const char *(*__kgspGetSignatureSectionNamePrefix__)(struct OBJGPU *, struct KernelGsp *);
NV_STATUS (*__kgspSetupGspFmcArgs__)(struct OBJGPU *, struct KernelGsp *, GSP_FIRMWARE *);
@@ -482,8 +483,10 @@ NV_STATUS __nvoc_objCreate_KernelGsp(KernelGsp**, Dynamic*, NvU32);
#define kgspGetMinWprHeapSizeMB_HAL(pGpu, pKernelGsp) kgspGetMinWprHeapSizeMB_DISPATCH(pGpu, pKernelGsp)
#define kgspGetMaxWprHeapSizeMB(pGpu, pKernelGsp) kgspGetMaxWprHeapSizeMB_DISPATCH(pGpu, pKernelGsp)
#define kgspGetMaxWprHeapSizeMB_HAL(pGpu, pKernelGsp) kgspGetMaxWprHeapSizeMB_DISPATCH(pGpu, pKernelGsp)
#define kgspInitVgpuPartitionLogging(pGpu, pKernelGsp, gfid, initTaskLogBUffOffset, initTaskLogBUffSize, vgpuTaskLogBUffOffset, vgpuTaskLogBuffSize) kgspInitVgpuPartitionLogging_DISPATCH(pGpu, pKernelGsp, gfid, initTaskLogBUffOffset, initTaskLogBUffSize, vgpuTaskLogBUffOffset, vgpuTaskLogBuffSize)
#define kgspInitVgpuPartitionLogging_HAL(pGpu, pKernelGsp, gfid, initTaskLogBUffOffset, initTaskLogBUffSize, vgpuTaskLogBUffOffset, vgpuTaskLogBuffSize) kgspInitVgpuPartitionLogging_DISPATCH(pGpu, pKernelGsp, gfid, initTaskLogBUffOffset, initTaskLogBUffSize, vgpuTaskLogBUffOffset, vgpuTaskLogBuffSize)
#define kgspInitVgpuPartitionLogging(pGpu, pKernelGsp, gfid, initTaskLogBUffOffset, initTaskLogBUffSize, vgpuTaskLogBUffOffset, vgpuTaskLogBuffSize, pPreserveLogBufferFull) kgspInitVgpuPartitionLogging_DISPATCH(pGpu, pKernelGsp, gfid, initTaskLogBUffOffset, initTaskLogBUffSize, vgpuTaskLogBUffOffset, vgpuTaskLogBuffSize, pPreserveLogBufferFull)
#define kgspInitVgpuPartitionLogging_HAL(pGpu, pKernelGsp, gfid, initTaskLogBUffOffset, initTaskLogBUffSize, vgpuTaskLogBUffOffset, vgpuTaskLogBuffSize, pPreserveLogBufferFull) kgspInitVgpuPartitionLogging_DISPATCH(pGpu, pKernelGsp, gfid, initTaskLogBUffOffset, initTaskLogBUffSize, vgpuTaskLogBUffOffset, vgpuTaskLogBuffSize, pPreserveLogBufferFull)
#define kgspPreserveVgpuPartitionLogging(pGpu, pKernelGsp, gfid) kgspPreserveVgpuPartitionLogging_DISPATCH(pGpu, pKernelGsp, gfid)
#define kgspPreserveVgpuPartitionLogging_HAL(pGpu, pKernelGsp, gfid) kgspPreserveVgpuPartitionLogging_DISPATCH(pGpu, pKernelGsp, gfid)
#define kgspFreeVgpuPartitionLogging(pGpu, pKernelGsp, gfid) kgspFreeVgpuPartitionLogging_DISPATCH(pGpu, pKernelGsp, gfid)
#define kgspFreeVgpuPartitionLogging_HAL(pGpu, pKernelGsp, gfid) kgspFreeVgpuPartitionLogging_DISPATCH(pGpu, pKernelGsp, gfid)
#define kgspGetSignatureSectionNamePrefix(pGpu, pKernelGsp) kgspGetSignatureSectionNamePrefix_DISPATCH(pGpu, pKernelGsp)
@@ -981,14 +984,24 @@ static inline NvU64 kgspGetMaxWprHeapSizeMB_DISPATCH(struct OBJGPU *pGpu, struct
return pKernelGsp->__kgspGetMaxWprHeapSizeMB__(pGpu, pKernelGsp);
}
static inline NV_STATUS kgspInitVgpuPartitionLogging_395e98(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 gfid, NvU64 initTaskLogBUffOffset, NvU64 initTaskLogBUffSize, NvU64 vgpuTaskLogBUffOffset, NvU64 vgpuTaskLogBuffSize) {
static inline NV_STATUS kgspInitVgpuPartitionLogging_395e98(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 gfid, NvU64 initTaskLogBUffOffset, NvU64 initTaskLogBUffSize, NvU64 vgpuTaskLogBUffOffset, NvU64 vgpuTaskLogBuffSize, NvBool *pPreserveLogBufferFull) {
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS kgspInitVgpuPartitionLogging_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 gfid, NvU64 initTaskLogBUffOffset, NvU64 initTaskLogBUffSize, NvU64 vgpuTaskLogBUffOffset, NvU64 vgpuTaskLogBuffSize);
NV_STATUS kgspInitVgpuPartitionLogging_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 gfid, NvU64 initTaskLogBUffOffset, NvU64 initTaskLogBUffSize, NvU64 vgpuTaskLogBUffOffset, NvU64 vgpuTaskLogBuffSize, NvBool *pPreserveLogBufferFull);
static inline NV_STATUS kgspInitVgpuPartitionLogging_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 gfid, NvU64 initTaskLogBUffOffset, NvU64 initTaskLogBUffSize, NvU64 vgpuTaskLogBUffOffset, NvU64 vgpuTaskLogBuffSize) {
return pKernelGsp->__kgspInitVgpuPartitionLogging__(pGpu, pKernelGsp, gfid, initTaskLogBUffOffset, initTaskLogBUffSize, vgpuTaskLogBUffOffset, vgpuTaskLogBuffSize);
static inline NV_STATUS kgspInitVgpuPartitionLogging_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 gfid, NvU64 initTaskLogBUffOffset, NvU64 initTaskLogBUffSize, NvU64 vgpuTaskLogBUffOffset, NvU64 vgpuTaskLogBuffSize, NvBool *pPreserveLogBufferFull) {
return pKernelGsp->__kgspInitVgpuPartitionLogging__(pGpu, pKernelGsp, gfid, initTaskLogBUffOffset, initTaskLogBUffSize, vgpuTaskLogBUffOffset, vgpuTaskLogBuffSize, pPreserveLogBufferFull);
}
static inline NV_STATUS kgspPreserveVgpuPartitionLogging_395e98(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 gfid) {
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS kgspPreserveVgpuPartitionLogging_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 gfid);
static inline NV_STATUS kgspPreserveVgpuPartitionLogging_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 gfid) {
return pKernelGsp->__kgspPreserveVgpuPartitionLogging__(pGpu, pKernelGsp, gfid);
}
static inline NV_STATUS kgspFreeVgpuPartitionLogging_395e98(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 gfid) {
@@ -1339,14 +1352,14 @@ static inline NV_STATUS kgspAllocateBooterUnloadUcodeImage(struct OBJGPU *pGpu,
#define kgspAllocateBooterUnloadUcodeImage(pGpu, pKernelGsp, ppBooterUnloadUcode) kgspAllocateBooterUnloadUcodeImage_IMPL(pGpu, pKernelGsp, ppBooterUnloadUcode)
#endif //__nvoc_kernel_gsp_h_disabled
void kgspRcAndNotifyAllUserChannels_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 exceptType);
void kgspRcAndNotifyAllChannels_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 exceptType, NvBool bSkipKernelChannels);
#ifdef __nvoc_kernel_gsp_h_disabled
static inline void kgspRcAndNotifyAllUserChannels(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 exceptType) {
static inline void kgspRcAndNotifyAllChannels(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 exceptType, NvBool bSkipKernelChannels) {
NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!");
}
#else //__nvoc_kernel_gsp_h_disabled
#define kgspRcAndNotifyAllUserChannels(pGpu, pKernelGsp, exceptType) kgspRcAndNotifyAllUserChannels_IMPL(pGpu, pKernelGsp, exceptType)
#define kgspRcAndNotifyAllChannels(pGpu, pKernelGsp, exceptType, bSkipKernelChannels) kgspRcAndNotifyAllChannels_IMPL(pGpu, pKernelGsp, exceptType, bSkipKernelChannels)
#endif //__nvoc_kernel_gsp_h_disabled
#undef PRIVATE_FIELD

View File

@@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2017-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2017-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -116,7 +116,6 @@ typedef struct KERNEL_HOST_VGPU_DEVICE
NvU32 chidOffset[RM_ENGINE_TYPE_LAST];
NvU32 channelCount[RM_ENGINE_TYPE_LAST]; /*Number of channels available to the VF*/
NvU8 vgpuUuid[RM_SHA1_GID_SIZE];
void *pVgpuVfioRef;
struct REQUEST_VGPU_INFO_NODE *pRequestVgpuInfoNode;
struct PhysMemSubAlloc *pPhysMemSubAlloc;
struct HOST_VGPU_DEVICE *pHostVgpuDevice;
@@ -175,17 +174,11 @@ typedef struct
/* vGPU info received from mdev kernel module for KVM */
typedef struct REQUEST_VGPU_INFO_NODE
{
char configParams[VGPU_CONFIG_PARAMS_MAX_LENGTH];
NvU8 mdevUuid[VGPU_UUID_SIZE];
void *waitQueue;
NvU8 *vmName;
NvS32 *returnStatus;
NvU32 gpuPciId;
NvU32 qemuPid;
NvU16 vgpuId;
VGPU_DEVICE_STATE deviceState;
NvU32 gpuPciBdf;
NvU32 swizzId;
NvU16 vgpuId;
KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice;
} REQUEST_VGPU_INFO_NODE;
@@ -281,6 +274,7 @@ kvgpumgrGuestRegister(struct OBJGPU *pGpu,
NvU32 swizzId,
NvU32 vgpuDeviceInstanceId,
NvBool bDisableDefaultSmcExecPartRestore,
NvU8 *pVgpuDevName,
KERNEL_HOST_VGPU_DEVICE **ppKernelHostVgpuDevice);
NV_STATUS
@@ -315,10 +309,6 @@ kvgpumgrGetVgpuFbUsage(struct OBJGPU *pGpu, NVA081_CTRL_VGPU_CONFIG_GET_VGPU_FB_
NV_STATUS
kvgpumgrSetVgpuEncoderCapacity(struct OBJGPU *pGpu, NvU8 *vgpuUuid, NvU32 encoderCapacity);
NV_STATUS
kvgpumgrStart(const NvU8 *pMdevUuid, void *waitQueue, NvS32 *returnStatus,
NvU8 *vmName, NvU32 qemuPid);
NV_STATUS
kvgpumgrCreateRequestVgpu(NvU32 gpuPciId, const NvU8 *pMdevUuid,
NvU32 vgpuTypeId, NvU16 *vgpuId, NvU32 gpuPciBdf);
@@ -326,6 +316,10 @@ kvgpumgrCreateRequestVgpu(NvU32 gpuPciId, const NvU8 *pMdevUuid,
NV_STATUS
kvgpumgrDeleteRequestVgpu(const NvU8 *pMdevUuid, NvU16 vgpuId);
NV_STATUS
kvgpumgrGetAvailableInstances(NvU32 *avail_instances, struct OBJGPU *pGpu, VGPU_TYPE *vgpuTypeInfo,
NvU32 pgpuIndex, NvU8 devfn);
NV_STATUS
kvgpumgrGetHostVgpuDeviceFromMdevUuid(NvU32 gpuPciId, const NvU8 *pMdevUuid,
KERNEL_HOST_VGPU_DEVICE **ppKernelHostVgpuDevice);

View File

@@ -119,7 +119,6 @@ typedef struct _def_client_vgpu_ns_intr
NvU64 guestDomainId; // guest ID that we need to use to inject interrupt
NvU64 guestMSIAddr; // MSI address allocated by guest OS
NvU32 guestMSIData; // MSI data value set by guest OS
void *pVgpuVfioRef; // Reference to vgpu device in nvidia-vgpu-vfio module
void *pEventDpc; // DPC event to pass the interrupt
} VGPU_NS_INTR;

View File

@@ -825,11 +825,12 @@ NvBool osRemoveGpuSupported(void);
void initVGXSpecificRegistry(OBJGPU *);
NV_STATUS osVgpuVfioWake(void *waitQueue);
NV_STATUS osVgpuInjectInterrupt(void *pArg1);
NV_STATUS osVgpuRegisterMdev(OS_GPU_INFO *pArg1);
NV_STATUS nv_vgpu_rm_get_bar_info(OBJGPU *pGpu, const NvU8 *pMdevUuid, NvU64 *barSizes,
NvU64 *sparseOffsets, NvU64 *sparseSizes,
NvU32 *sparseCount, NvU8 *configParams);
NV_STATUS osIsVgpuVfioPresent(void);
NV_STATUS osIsVfioPciCorePresent(void);
void osWakeRemoveVgpu(NvU32, NvU32);
NV_STATUS rm_is_vgpu_supported_device(OS_GPU_INFO *pNv, NvU32 pmc_boot_1);
NV_STATUS osLockPageableDataSection(RM_PAGEABLE_SECTION *pSection);
NV_STATUS osUnlockPageableDataSection(RM_PAGEABLE_SECTION *pSection);

View File

@@ -295,6 +295,7 @@ typedef struct rpc_rc_triggered_v17_02
{
NvU32 nv2080EngineType;
NvU32 chid;
NvU32 gfid;
NvU32 exceptType;
NvU32 scope;
NvU16 partitionAttributionId;
@@ -1792,6 +1793,13 @@ static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_rc_triggered_v17_02[] = {
.name = "chid"
#endif
},
{
.vtype = vtype_NvU32,
.offset = NV_OFFSETOF(rpc_rc_triggered_v17_02, gfid),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "gfid"
#endif
},
{
.vtype = vtype_NvU32,
.offset = NV_OFFSETOF(rpc_rc_triggered_v17_02, exceptType),

View File

@@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a

View File

@@ -330,21 +330,6 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_VgpuConf
{ /* [6] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) vgpuconfigapiCtrlCmdVgpuConfigNotifyStart_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*flags=*/ 0x10u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xa0810107u,
/*paramSize=*/ sizeof(NVA081_CTRL_VGPU_CONFIG_NOTIFY_START_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_VgpuConfigApi.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "vgpuconfigapiCtrlCmdVgpuConfigNotifyStart"
#endif
},
{ /* [7] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) vgpuconfigapiCtrlCmdVgpuConfigMdevRegister_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
@@ -357,7 +342,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_VgpuConf
/*func=*/ "vgpuconfigapiCtrlCmdVgpuConfigMdevRegister"
#endif
},
{ /* [8] */
{ /* [7] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -372,7 +357,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_VgpuConf
/*func=*/ "vgpuconfigapiCtrlCmdVgpuConfigSetVgpuInstanceEncoderCapacity"
#endif
},
{ /* [9] */
{ /* [8] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -387,7 +372,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_VgpuConf
/*func=*/ "vgpuconfigapiCtrlCmdVgpuConfigGetVgpuFbUsage"
#endif
},
{ /* [10] */
{ /* [9] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -402,7 +387,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_VgpuConf
/*func=*/ "vgpuconfigapiCtrlCmdVgpuConfigGetMigrationCap"
#endif
},
{ /* [11] */
{ /* [10] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -417,7 +402,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_VgpuConf
/*func=*/ "vgpuconfigapiCtrlCmdVgpuConfigGetHostFbReservation"
#endif
},
{ /* [12] */
{ /* [11] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -432,7 +417,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_VgpuConf
/*func=*/ "vgpuconfigapiCtrlCmdVgpuConfigGetPgpuMetadataString"
#endif
},
{ /* [13] */
{ /* [12] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -447,7 +432,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_VgpuConf
/*func=*/ "vgpuconfigapiCtrlCmdVgpuConfigGetDoorbellEmulationSupport"
#endif
},
{ /* [14] */
{ /* [13] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -462,7 +447,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_VgpuConf
/*func=*/ "vgpuconfigapiCtrlCmdVgpuConfigGetFreeSwizzId"
#endif
},
{ /* [15] */
{ /* [14] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -477,7 +462,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_VgpuConf
/*func=*/ "vgpuconfigapiCtrlCmdPgpuGetMultiVgpuSupportInfo"
#endif
},
{ /* [16] */
{ /* [15] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -492,7 +477,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_VgpuConf
/*func=*/ "vgpuconfigapiCtrlCmdGetVgpuDriversCaps"
#endif
},
{ /* [17] */
{ /* [16] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -507,7 +492,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_VgpuConf
/*func=*/ "vgpuconfigapiCtrlCmdVgpuConfigSetPgpuInfo"
#endif
},
{ /* [18] */
{ /* [17] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -520,6 +505,21 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_VgpuConf
/*pClassInfo=*/ &(__nvoc_class_def_VgpuConfigApi.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "vgpuconfigapiCtrlCmdVgpuConfigValidateSwizzId"
#endif
},
{ /* [18] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) vgpuconfigapiCtrlCmdVgpuSetVmName_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*flags=*/ 0x10u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xa0810120u,
/*paramSize=*/ sizeof(NVA081_CTRL_VGPU_SET_VM_NAME_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_VgpuConfigApi.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "vgpuconfigapiCtrlCmdVgpuSetVmName"
#endif
},
@@ -595,10 +595,6 @@ static void __nvoc_init_funcTable_VgpuConfigApi_1(VgpuConfigApi *pThis) {
pThis->__vgpuconfigapiCtrlCmdVgpuConfigEventSetNotification__ = &vgpuconfigapiCtrlCmdVgpuConfigEventSetNotification_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__vgpuconfigapiCtrlCmdVgpuConfigNotifyStart__ = &vgpuconfigapiCtrlCmdVgpuConfigNotifyStart_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__vgpuconfigapiCtrlCmdVgpuConfigMdevRegister__ = &vgpuconfigapiCtrlCmdVgpuConfigMdevRegister_IMPL;
#endif
@@ -647,6 +643,10 @@ static void __nvoc_init_funcTable_VgpuConfigApi_1(VgpuConfigApi *pThis) {
pThis->__vgpuconfigapiCtrlCmdVgpuConfigValidateSwizzId__ = &vgpuconfigapiCtrlCmdVgpuConfigValidateSwizzId_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__vgpuconfigapiCtrlCmdVgpuSetVmName__ = &vgpuconfigapiCtrlCmdVgpuSetVmName_IMPL;
#endif
pThis->__vgpuconfigapiShareCallback__ = &__nvoc_thunk_GpuResource_vgpuconfigapiShareCallback;
pThis->__vgpuconfigapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_vgpuconfigapiCheckMemInterUnmap;

View File

@@ -68,7 +68,6 @@ struct VgpuConfigApi {
NV_STATUS (*__vgpuconfigapiCtrlCmdVgpuConfigGetSupportedVgpuTypes__)(struct VgpuConfigApi *, NVA081_CTRL_VGPU_CONFIG_GET_VGPU_TYPES_PARAMS *);
NV_STATUS (*__vgpuconfigapiCtrlCmdVgpuConfigGetCreatableVgpuTypes__)(struct VgpuConfigApi *, NVA081_CTRL_VGPU_CONFIG_GET_VGPU_TYPES_PARAMS *);
NV_STATUS (*__vgpuconfigapiCtrlCmdVgpuConfigEventSetNotification__)(struct VgpuConfigApi *, NVA081_CTRL_VGPU_CONFIG_EVENT_SET_NOTIFICATION_PARAMS *);
NV_STATUS (*__vgpuconfigapiCtrlCmdVgpuConfigNotifyStart__)(struct VgpuConfigApi *, NVA081_CTRL_VGPU_CONFIG_NOTIFY_START_PARAMS *);
NV_STATUS (*__vgpuconfigapiCtrlCmdVgpuConfigMdevRegister__)(struct VgpuConfigApi *);
NV_STATUS (*__vgpuconfigapiCtrlCmdVgpuConfigSetVgpuInstanceEncoderCapacity__)(struct VgpuConfigApi *, NVA081_CTRL_VGPU_CONFIG_VGPU_INSTANCE_ENCODER_CAPACITY_PARAMS *);
NV_STATUS (*__vgpuconfigapiCtrlCmdVgpuConfigGetVgpuFbUsage__)(struct VgpuConfigApi *, NVA081_CTRL_VGPU_CONFIG_GET_VGPU_FB_USAGE_PARAMS *);
@@ -81,6 +80,7 @@ struct VgpuConfigApi {
NV_STATUS (*__vgpuconfigapiCtrlCmdGetVgpuDriversCaps__)(struct VgpuConfigApi *, NVA081_CTRL_GET_VGPU_DRIVER_CAPS_PARAMS *);
NV_STATUS (*__vgpuconfigapiCtrlCmdVgpuConfigSetPgpuInfo__)(struct VgpuConfigApi *, NVA081_CTRL_VGPU_CONFIG_SET_PGPU_INFO_PARAMS *);
NV_STATUS (*__vgpuconfigapiCtrlCmdVgpuConfigValidateSwizzId__)(struct VgpuConfigApi *, NVA081_CTRL_VGPU_CONFIG_VALIDATE_SWIZZID_PARAMS *);
NV_STATUS (*__vgpuconfigapiCtrlCmdVgpuSetVmName__)(struct VgpuConfigApi *, NVA081_CTRL_VGPU_SET_VM_NAME_PARAMS *);
NvBool (*__vgpuconfigapiShareCallback__)(struct VgpuConfigApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
NV_STATUS (*__vgpuconfigapiCheckMemInterUnmap__)(struct VgpuConfigApi *, NvBool);
NV_STATUS (*__vgpuconfigapiGetOrAllocNotifShare__)(struct VgpuConfigApi *, NvHandle, NvHandle, struct NotifShare **);
@@ -148,7 +148,6 @@ NV_STATUS __nvoc_objCreate_VgpuConfigApi(VgpuConfigApi**, Dynamic*, NvU32, struc
#define vgpuconfigapiCtrlCmdVgpuConfigGetSupportedVgpuTypes(pVgpuConfigApi, pParams) vgpuconfigapiCtrlCmdVgpuConfigGetSupportedVgpuTypes_DISPATCH(pVgpuConfigApi, pParams)
#define vgpuconfigapiCtrlCmdVgpuConfigGetCreatableVgpuTypes(pVgpuConfigApi, pParams) vgpuconfigapiCtrlCmdVgpuConfigGetCreatableVgpuTypes_DISPATCH(pVgpuConfigApi, pParams)
#define vgpuconfigapiCtrlCmdVgpuConfigEventSetNotification(pVgpuConfigApi, pSetEventParams) vgpuconfigapiCtrlCmdVgpuConfigEventSetNotification_DISPATCH(pVgpuConfigApi, pSetEventParams)
#define vgpuconfigapiCtrlCmdVgpuConfigNotifyStart(pVgpuConfigApi, pNotifyParams) vgpuconfigapiCtrlCmdVgpuConfigNotifyStart_DISPATCH(pVgpuConfigApi, pNotifyParams)
#define vgpuconfigapiCtrlCmdVgpuConfigMdevRegister(pVgpuConfigApi) vgpuconfigapiCtrlCmdVgpuConfigMdevRegister_DISPATCH(pVgpuConfigApi)
#define vgpuconfigapiCtrlCmdVgpuConfigSetVgpuInstanceEncoderCapacity(pVgpuConfigApi, pEncoderParams) vgpuconfigapiCtrlCmdVgpuConfigSetVgpuInstanceEncoderCapacity_DISPATCH(pVgpuConfigApi, pEncoderParams)
#define vgpuconfigapiCtrlCmdVgpuConfigGetVgpuFbUsage(pVgpuConfigApi, pParams) vgpuconfigapiCtrlCmdVgpuConfigGetVgpuFbUsage_DISPATCH(pVgpuConfigApi, pParams)
@@ -161,6 +160,7 @@ NV_STATUS __nvoc_objCreate_VgpuConfigApi(VgpuConfigApi**, Dynamic*, NvU32, struc
#define vgpuconfigapiCtrlCmdGetVgpuDriversCaps(pVgpuConfigApi, pParams) vgpuconfigapiCtrlCmdGetVgpuDriversCaps_DISPATCH(pVgpuConfigApi, pParams)
#define vgpuconfigapiCtrlCmdVgpuConfigSetPgpuInfo(pVgpuConfigApi, pParams) vgpuconfigapiCtrlCmdVgpuConfigSetPgpuInfo_DISPATCH(pVgpuConfigApi, pParams)
#define vgpuconfigapiCtrlCmdVgpuConfigValidateSwizzId(pVgpuConfigApi, pParams) vgpuconfigapiCtrlCmdVgpuConfigValidateSwizzId_DISPATCH(pVgpuConfigApi, pParams)
#define vgpuconfigapiCtrlCmdVgpuSetVmName(pVgpuConfigApi, pParams) vgpuconfigapiCtrlCmdVgpuSetVmName_DISPATCH(pVgpuConfigApi, pParams)
#define vgpuconfigapiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) vgpuconfigapiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
#define vgpuconfigapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) vgpuconfigapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
#define vgpuconfigapiGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) vgpuconfigapiGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare)
@@ -227,12 +227,6 @@ static inline NV_STATUS vgpuconfigapiCtrlCmdVgpuConfigEventSetNotification_DISPA
return pVgpuConfigApi->__vgpuconfigapiCtrlCmdVgpuConfigEventSetNotification__(pVgpuConfigApi, pSetEventParams);
}
NV_STATUS vgpuconfigapiCtrlCmdVgpuConfigNotifyStart_IMPL(struct VgpuConfigApi *pVgpuConfigApi, NVA081_CTRL_VGPU_CONFIG_NOTIFY_START_PARAMS *pNotifyParams);
static inline NV_STATUS vgpuconfigapiCtrlCmdVgpuConfigNotifyStart_DISPATCH(struct VgpuConfigApi *pVgpuConfigApi, NVA081_CTRL_VGPU_CONFIG_NOTIFY_START_PARAMS *pNotifyParams) {
return pVgpuConfigApi->__vgpuconfigapiCtrlCmdVgpuConfigNotifyStart__(pVgpuConfigApi, pNotifyParams);
}
NV_STATUS vgpuconfigapiCtrlCmdVgpuConfigMdevRegister_IMPL(struct VgpuConfigApi *pVgpuConfigApi);
static inline NV_STATUS vgpuconfigapiCtrlCmdVgpuConfigMdevRegister_DISPATCH(struct VgpuConfigApi *pVgpuConfigApi) {
@@ -305,6 +299,12 @@ static inline NV_STATUS vgpuconfigapiCtrlCmdVgpuConfigValidateSwizzId_DISPATCH(s
return pVgpuConfigApi->__vgpuconfigapiCtrlCmdVgpuConfigValidateSwizzId__(pVgpuConfigApi, pParams);
}
NV_STATUS vgpuconfigapiCtrlCmdVgpuSetVmName_IMPL(struct VgpuConfigApi *pVgpuConfigApi, NVA081_CTRL_VGPU_SET_VM_NAME_PARAMS *pParams);
static inline NV_STATUS vgpuconfigapiCtrlCmdVgpuSetVmName_DISPATCH(struct VgpuConfigApi *pVgpuConfigApi, NVA081_CTRL_VGPU_SET_VM_NAME_PARAMS *pParams) {
return pVgpuConfigApi->__vgpuconfigapiCtrlCmdVgpuSetVmName__(pVgpuConfigApi, pParams);
}
static inline NvBool vgpuconfigapiShareCallback_DISPATCH(struct VgpuConfigApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return pGpuResource->__vgpuconfigapiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -63,4 +63,12 @@
*/
#define FLCN_RESET_PROPAGATION_DELAY_COUNT 10
/*!
* Used by FALCON_DMATRFCMD polling functions to wait for _FULL==FALSE or _IDLE==TRUE
*/
typedef enum {
FLCN_DMA_POLL_QUEUE_NOT_FULL = 0,
FLCN_DMA_POLL_ENGINE_IDLE = 1
} FlcnDmaPollMode;
#endif // FALCON_COMMON_H

View File

@@ -405,7 +405,7 @@ void NVRM_PRINTF_FUNCTION(const char *file,
// In MODS builds, we allow all printfs, but don't automatically include the
// __FILE__ or __FUNCTION__ references.
//
#if NV_PRINTF_STRINGS_ALLOWED && (!defined(NV_MODS) || defined(SIM_BUILD) || defined(DEBUG) || defined(NV_MODS_INTERNAL))
#if NV_PRINTF_STRINGS_ALLOWED && (!defined(NV_MODS) || defined(SIM_BUILD) || defined(DEBUG) || defined(DEVELOP) || defined(NV_MODS_INTERNAL))
#define NV_FILE_STR __FILE__
#define NV_FILE __FILE__
#define NV_FILE_FMT "%s"

View File

@@ -1213,6 +1213,19 @@
#define NV_REG_STR_RM_WATCHDOG_INTERVAL_HI 0x0000000C
#define NV_REG_STR_RM_WATCHDOG_INTERVAL_DEFAULT NV_REG_STR_RM_WATCHDOG_INTERVAL_LOW
// Enable/Disable watchcat in GSP-Plugin for Guest RPC
// Default is Enabled
#define NV_REG_STR_RM_GSP_VGPU_WATCHCAT "RmEnableGspPluginWatchcat"
#define NV_REG_STR_RM_GSP_VGPU_WATCHCAT_ENABLE 0x00000001
#define NV_REG_STR_RM_GSP_VGPU_WATCHCAT_DISABLE 0x00000000
#define NV_REG_STR_RM_GSP_VGPU_WATCHCAT_DEFAULT NV_REG_STR_RM_GSP_VGPU_WATCHCAT_ENABLE
// Set watchcat timeout value in GSP-Plugin for Guest RPC
// Default is 10 seconds
#define NV_REG_STR_RM_GSP_VGPU_WATCHCAT_TIMEOUT "RmGspPluginWatchcatTimeOut"
#define NV_REG_STR_RM_GSP_VGPU_WATCHCAT_TIMEOUT_MIN 0x0000000A
#define NV_REG_STR_RM_GSP_VGPU_WATCHCAT_TIMEOUT_DEFAULT NV_REG_STR_RM_GSP_VGPU_WATCHCAT_TIMEOUT_MIN
#define NV_REG_STR_RM_DO_LOG_RC_EVENTS "RmLogonRC"
// Type Dword
// Encoding : 0 --> Skip Logging
@@ -1966,4 +1979,3 @@
#define NV_REG_STR_RM_FORCE_GR_SCRUBBER_CHANNEL_DISABLE 0x00000000
#define NV_REG_STR_RM_FORCE_GR_SCRUBBER_CHANNEL_ENABLE 0x00000001
#endif // NVRM_REGISTRY_H

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -82,6 +82,7 @@ typedef struct
#define NVPCF_CONTROLLER_STATIC_TABLE_VERSION_21 (0x21)
#define NVPCF_CONTROLLER_STATIC_TABLE_VERSION_22 (0x22)
#define NVPCF_CONTROLLER_STATIC_TABLE_VERSION_23 (0x23)
#define NVPCF_CONTROLLER_STATIC_TABLE_VERSION_24 (0x24)
// format for 2.0 and 2.1
#define NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V20_SIZE_05 (0x05U)

View File

@@ -548,11 +548,18 @@ done:
if (bLockAcquired)
rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
if (bReserveMem)
if ((rmStatus == NV_OK) && bReserveMem)
{
// GPU lock should not be held when reserving memory for ctxBufPool
NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus,
NV_CHECK_OK(rmStatus, LEVEL_ERROR,
ctxBufPoolReserve(pGpu, pKernelChannelGroup->pCtxBufPool, bufInfoList, bufCount));
if (rmStatus != NV_OK)
{
// Acquire the lock again for the cleanup path
NV_ASSERT_OK_OR_RETURN(rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_FIFO));
bLockAcquired = NV_TRUE;
goto failed;
}
}
portMemFree(bufInfoList);

View File

@@ -1675,14 +1675,10 @@ kfifoGetChannelIterator_IMPL
)
{
portMemSet(pIt, 0, sizeof(*pIt));
pIt->physicalChannelID = 0;
pIt->pFifoDataBlock = NULL;
pIt->runlistId = 0;
pIt->numRunlists = 1;
if (kfifoIsPerRunlistChramEnabled(pKernelFifo))
{
pIt->numRunlists = kfifoGetMaxNumRunlists_HAL(pGpu, pKernelFifo);
}
pIt->runlistId = 0;
// Resulting iterator will iterate over constructed CHID_MGRs only
pIt->numRunlists = pKernelFifo->numChidMgrs;
}
/**

View File

@@ -31,6 +31,7 @@
#include "platform/platform.h"
#include "platform/chipset/chipset.h"
#include "kernel/gpu/gr/kernel_graphics.h"
#include "gpu/mem_mgr/mem_mgr.h"
#include "gpu/mem_mgr/fbsr.h"
#include "gpu/gsp/gsp_init_args.h"
@@ -350,6 +351,13 @@ gpuResumeFromStandby_IMPL(OBJGPU *pGpu)
NV_PRINTF(LEVEL_NOTICE, "Ending resume from %s\n",
IS_GPU_GC6_STATE_EXITING(pGpu) ? "GC6" : "APM Suspend");
}
if (resumeStatus == NV_OK)
{
if (kgraphicsIsBug4208224WARNeeded_HAL(pGpu, GPU_GET_KERNEL_GRAPHICS(pGpu, 0)))
{
return kgraphicsInitializeBug4208224WAR_HAL(pGpu, GPU_GET_KERNEL_GRAPHICS(pGpu, 0));
}
}
return resumeStatus;
}
@@ -413,6 +421,13 @@ NV_STATUS gpuResumeFromHibernate_IMPL(OBJGPU *pGpu)
{
NV_PRINTF(LEVEL_NOTICE, "End resuming from APM Suspend\n");
}
if (resumeStatus == NV_OK)
{
if (kgraphicsIsBug4208224WARNeeded_HAL(pGpu, GPU_GET_KERNEL_GRAPHICS(pGpu, 0)))
{
return kgraphicsInitializeBug4208224WAR_HAL(pGpu, GPU_GET_KERNEL_GRAPHICS(pGpu, 0));
}
}
return resumeStatus;
}

View File

@@ -196,6 +196,11 @@ kgraphicsInitializeBug4208224WAR_TU102
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
NV2080_CTRL_INTERNAL_KGR_INIT_BUG4208224_WAR_PARAMS params = {0};
if (pKernelGraphics->bug4208224Info.bConstructed)
{
return NV_OK;
}
NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
kgraphicsCreateBug4208224Channel_HAL(pGpu, pKernelGraphics));

View File

@@ -506,7 +506,7 @@ _kgraphicsPostSchedulingEnableHandler
}
NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, kgraphicsCreateGoldenImageChannel(pGpu, pKernelGraphics));
if (kgraphicsIsBug4208224WARNeeded_HAL(pGpu, pKernelGraphics))
if (kgraphicsIsBug4208224WARNeeded_HAL(pGpu, pKernelGraphics) && !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH))
{
return kgraphicsInitializeBug4208224WAR_HAL(pGpu, pKernelGraphics);
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -35,6 +35,67 @@
#include "published/ampere/ga102/dev_falcon_second_pri.h"
#include "published/ampere/ga102/dev_fbif_v4.h"
static GpuWaitConditionFunc s_dmaPollCondFunc;
typedef struct {
KernelFalcon *pKernelFlcn;
NvU32 pollMask;
NvU32 pollValue;
} DmaPollCondData;
static NvBool
s_dmaPollCondFunc
(
OBJGPU *pGpu,
void *pVoid
)
{
DmaPollCondData *pData = (DmaPollCondData *)pVoid;
return ((kflcnRegRead_HAL(pGpu, pData->pKernelFlcn, NV_PFALCON_FALCON_DMATRFCMD) & pData->pollMask) == pData->pollValue);
}
/*!
* Poll on either _FULL or _IDLE field of NV_PFALCON_FALCON_DMATRFCMD
*
* @param[in] pGpu GPU object pointer
* @param[in] pKernelFlcn pKernelFlcn object pointer
* @param[in] mode FLCN_DMA_POLL_QUEUE_NOT_FULL for poll on _FULL; return when _FULL is false
* FLCN_DMA_POLL_ENGINE_IDLE for poll on _IDLE; return when _IDLE is true
*/
static NV_STATUS
s_dmaPoll_GA102
(
OBJGPU *pGpu,
KernelFalcon *pKernelFlcn,
FlcnDmaPollMode mode
)
{
NV_STATUS status;
DmaPollCondData data;
data.pKernelFlcn = pKernelFlcn;
if (mode == FLCN_DMA_POLL_QUEUE_NOT_FULL)
{
data.pollMask = DRF_SHIFTMASK(NV_PFALCON_FALCON_DMATRFCMD_FULL);
data.pollValue = DRF_DEF(_PFALCON, _FALCON_DMATRFCMD, _FULL, _FALSE);
}
else
{
data.pollMask = DRF_SHIFTMASK(NV_PFALCON_FALCON_DMATRFCMD_IDLE);
data.pollValue = DRF_DEF(_PFALCON, _FALCON_DMATRFCMD, _IDLE, _TRUE);
}
status = gpuTimeoutCondWait(pGpu, s_dmaPollCondFunc, &data, NULL);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Error while waiting for Falcon DMA; mode: %d, status: 0x%08x\n", mode, status);
DBG_BREAKPOINT();
return status;
}
return NV_OK;
}
static NV_STATUS
s_dmaTransfer_GA102
(
@@ -48,15 +109,20 @@ s_dmaTransfer_GA102
)
{
NV_STATUS status = NV_OK;
RMTIMEOUT timeout;
NvU32 data;
NvU32 bytesXfered = 0;
// Ensure request queue initially has space or writing base registers will corrupt DMA transfer.
NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, s_dmaPoll_GA102(pGpu, pKernelFlcn, FLCN_DMA_POLL_QUEUE_NOT_FULL));
kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_DMATRFBASE, NvU64_LO32(srcPhysAddr >> 8));
kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_DMATRFBASE1, NvU64_HI32(srcPhysAddr >> 8) & 0x1FF);
while (bytesXfered < sizeInBytes)
{
// Poll for non-full request queue as writing control registers when full will corrupt DMA transfer.
NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, s_dmaPoll_GA102(pGpu, pKernelFlcn, FLCN_DMA_POLL_QUEUE_NOT_FULL));
data = FLD_SET_DRF_NUM(_PFALCON, _FALCON_DMATRFMOFFS, _OFFS, dest, 0);
kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_DMATRFMOFFS, data);
@@ -66,28 +132,17 @@ s_dmaTransfer_GA102
// Write the command
kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_DMATRFCMD, dmaCmd);
// Poll for completion
data = kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_DMATRFCMD);
gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0);
while(FLD_TEST_DRF(_PFALCON_FALCON, _DMATRFCMD, _IDLE, _FALSE, data))
{
status = gpuCheckTimeout(pGpu, &timeout);
if (status == NV_ERR_TIMEOUT)
{
NV_PRINTF(LEVEL_ERROR, "Timeout waiting for Falcon DMA to finish\n");
DBG_BREAKPOINT();
return status;
}
osSpinLoop();
data = kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_DMATRFCMD);
}
bytesXfered += FLCN_BLK_ALIGNMENT;
dest += FLCN_BLK_ALIGNMENT;
memOff += FLCN_BLK_ALIGNMENT;
}
//
// Poll for completion. GA10x+ does not have TCM tagging so DMA operations to/from TCM should
// wait for DMA to complete before launching another operation to avoid memory ordering problems.
//
NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, s_dmaPoll_GA102(pGpu, pKernelFlcn, FLCN_DMA_POLL_ENGINE_IDLE));
return status;
}

View File

@@ -867,7 +867,7 @@ exit_health_check:
if (bFirstFatal)
{
kgspRcAndNotifyAllUserChannels(pGpu, pKernelGsp, GSP_ERROR);
kgspRcAndNotifyAllChannels(pGpu, pKernelGsp, GSP_ERROR, NV_TRUE);
}
gpuCheckEccCounts_HAL(pGpu);

View File

@@ -295,13 +295,17 @@ _kgspCompleteRpcHistoryEntry
NvU32 historyIndex;
NvU32 historyEntry;
// Complete the current entry (it should be active)
// TODO: assert that ts_end == 0 here when continuation record timestamps are fixed
NV_ASSERT_OR_RETURN_VOID(pHistory[current].ts_start != 0);
pHistory[current].ts_end = osGetTimestamp();
//
// Complete any previous entries that aren't marked complete yet, using the same timestamp
// (we may not have explicitly waited for them)
//
for (historyIndex = 0; historyIndex < RPC_HISTORY_DEPTH; historyIndex++)
for (historyIndex = 1; historyIndex < RPC_HISTORY_DEPTH; historyIndex++)
{
historyEntry = (current + RPC_HISTORY_DEPTH - historyIndex) % RPC_HISTORY_DEPTH;
if (pHistory[historyEntry].ts_start != 0 &&
@@ -309,8 +313,8 @@ _kgspCompleteRpcHistoryEntry
{
pHistory[historyEntry].ts_end = pHistory[current].ts_end;
}
}
}
}
/*!
* GSP client RM RPC send routine
@@ -472,7 +476,7 @@ _kgspRpcRCTriggered
RPC_PARAMS(rc_triggered, _v17_02);
KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pGpu);
KernelChannel *pKernelChannel;
KernelChannel *pKernelChannel = NULL;
KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
CHID_MGR *pChidMgr;
NvU32 status = NV_OK;
@@ -500,73 +504,18 @@ _kgspRpcRCTriggered
if (status != NV_OK)
return status;
pKernelChannel = kfifoChidMgrGetKernelChannel(pGpu, pKernelFifo,
pChidMgr,
rpc_params->chid);
NV_CHECK_OR_RETURN(LEVEL_ERROR,
pKernelChannel != NULL,
NV_ERR_INVALID_CHANNEL);
// Add the RcDiag records we received from GSP-RM to our system wide journal
if (IS_GFID_PF(rpc_params->gfid))
{
OBJSYS *pSys = SYS_GET_INSTANCE();
Journal *pRcDB = SYS_GET_RCDB(pSys);
RmClient *pClient;
NvU32 recordSize = rcdbGetOcaRecordSizeWithHeader(pRcDB, RmRcDiagReport);
NvU32 rcDiagRecStart = pRcDB->RcErrRptNextIdx;
NvU32 rcDiagRecEnd;
NvU32 processId = 0;
NvU32 owner = RCDB_RCDIAG_DEFAULT_OWNER;
pClient = dynamicCast(RES_GET_CLIENT(pKernelChannel), RmClient);
NV_ASSERT(pClient != NULL);
if (pClient != NULL)
processId = pClient->ProcID;
for (NvU32 i = 0; i < rpc_params->rcJournalBufferSize / recordSize; i++)
{
RmRCCommonJournal_RECORD *pCommonRecord =
(RmRCCommonJournal_RECORD *)((NvU8*)&rpc_params->rcJournalBuffer + i * recordSize);
RmRcDiag_RECORD *pRcDiagRecord =
(RmRcDiag_RECORD *)&pCommonRecord[1];
#if defined(DEBUG)
NV_PRINTF(LEVEL_INFO, "%d: GPUTag=0x%x CPUTag=0x%llx timestamp=0x%llx stateMask=0x%llx\n",
i, pCommonRecord->GPUTag, pCommonRecord->CPUTag, pCommonRecord->timeStamp,
pCommonRecord->stateMask);
NV_PRINTF(LEVEL_INFO, " idx=%d timeStamp=0x%x type=0x%x flags=0x%x count=%d owner=0x%x processId=0x%x\n",
pRcDiagRecord->idx, pRcDiagRecord->timeStamp, pRcDiagRecord->type, pRcDiagRecord->flags,
pRcDiagRecord->count, pRcDiagRecord->owner, processId);
for (NvU32 j = 0; j < pRcDiagRecord->count; j++)
{
NV_PRINTF(LEVEL_INFO, " %d: offset=0x08%x tag=0x08%x value=0x08%x attribute=0x08%x\n",
j, pRcDiagRecord->data[j].offset, pRcDiagRecord->data[j].tag,
pRcDiagRecord->data[j].value, pRcDiagRecord->data[j].attribute);
}
#endif
if (rcdbAddRcDiagRecFromGsp(pGpu, pRcDB, pCommonRecord, pRcDiagRecord) == NULL)
{
NV_PRINTF(LEVEL_WARNING, "Lost RC diagnostic record coming from GPU%d GSP: type=0x%x stateMask=0x%llx\n",
gpuGetInstance(pGpu), pRcDiagRecord->type, pCommonRecord->stateMask);
}
}
rcDiagRecEnd = pRcDB->RcErrRptNextIdx - 1;
// Update records to have the correct PID associated with the channel
if (rcDiagRecStart != rcDiagRecEnd)
{
rcdbUpdateRcDiagRecContext(pRcDB,
rcDiagRecStart,
rcDiagRecEnd,
processId,
owner);
}
pKernelChannel = kfifoChidMgrGetKernelChannel(pGpu, pKernelFifo,
pChidMgr,
rpc_params->chid);
NV_CHECK_OR_RETURN(LEVEL_ERROR,
pKernelChannel != NULL,
NV_ERR_INVALID_CHANNEL);
}
// With CC enabled, CPU-RM needs to write error notifiers
if (gpuIsCCFeatureEnabled(pGpu))
if (gpuIsCCFeatureEnabled(pGpu) && pKernelChannel != NULL)
{
NV_ASSERT_OK_OR_RETURN(krcErrorSetNotifier(pGpu, pKernelRc,
pKernelChannel,
@@ -577,7 +526,7 @@ _kgspRpcRCTriggered
return krcErrorSendEventNotifications_HAL(pGpu, pKernelRc,
pKernelChannel,
rmEngineType, // unused on kernel side
rmEngineType, // unused on kernel side
rpc_params->exceptType,
rpc_params->scope,
rpc_params->partitionAttributionId);
@@ -590,34 +539,39 @@ _kgspRpcRCTriggered
* @param[in] pGpu GPU object pointer
* @param[in] pKernelGsp KernelGsp object pointer
* @param[in] exceptType Error code to send to the RC notifiers
* @param[in] bSkipKernelChannels Don't RC and notify kernel channels
*
*/
void
kgspRcAndNotifyAllUserChannels
kgspRcAndNotifyAllChannels_IMPL
(
OBJGPU *pGpu,
KernelGsp *pKernelGsp,
NvU32 exceptType
NvU32 exceptType,
NvBool bSkipKernelChannels
)
{
//
// Note Bug 4503046: UVM currently attributes all errors as global and fails
// operations on all GPUs, in addition to the current failing GPU. Right now, the only
// case where we shouldn't skip kernel channels is when the GPU has fallen off the bus.
//
KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pGpu);
KernelChannel *pKernelChannel;
KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
CHANNEL_ITERATOR chanIt;
RMTIMEOUT timeout;
NV_PRINTF(LEVEL_ERROR, "RC all user channels for critical error %d.\n", exceptType);
NV_PRINTF(LEVEL_ERROR, "RC all %schannels for critical error %d.\n",
bSkipKernelChannels ? MAKE_NV_PRINTF_STR("user ") : MAKE_NV_PRINTF_STR(""),
exceptType);
// Pass 1: halt all user channels.
// Pass 1: halt all channels.
kfifoGetChannelIterator(pGpu, pKernelFifo, &chanIt);
while (kfifoGetNextKernelChannel(pGpu, pKernelFifo, &chanIt, &pKernelChannel) == NV_OK)
{
//
// Kernel (uvm) channels are skipped to workaround nvbug 4503046, where
// uvm attributes all errors as global and fails operations on all GPUs,
// in addition to the current failing GPU.
//
if (kchannelCheckIsKernel(pKernelChannel))
if (kchannelCheckIsKernel(pKernelChannel) && bSkipKernelChannels)
{
continue;
}
@@ -626,7 +580,7 @@ kgspRcAndNotifyAllUserChannels
}
//
// Pass 2: Wait for the halts to complete, and RC notify the user channels.
// Pass 2: Wait for the halts to complete, and RC notify the channels.
// The channel halts require a preemption, which may not be able to complete
// since the GSP is no longer servicing interrupts. Wait for up to the
// default GPU timeout value for the preemptions to complete.
@@ -635,21 +589,22 @@ kgspRcAndNotifyAllUserChannels
kfifoGetChannelIterator(pGpu, pKernelFifo, &chanIt);
while (kfifoGetNextKernelChannel(pGpu, pKernelFifo, &chanIt, &pKernelChannel) == NV_OK)
{
// Skip kernel (uvm) channels as only user channel halts are initiated above.
if (kchannelCheckIsKernel(pKernelChannel))
if (kchannelCheckIsKernel(pKernelChannel) && bSkipKernelChannels)
{
continue;
}
kfifoCompleteChannelHalt(pGpu, pKernelFifo, pKernelChannel, &timeout);
NV_ASSERT_OK(krcErrorSetNotifier(pGpu, pKernelRc,
NV_ASSERT_OK(
krcErrorSetNotifier(pGpu, pKernelRc,
pKernelChannel,
exceptType,
kchannelGetEngineType(pKernelChannel),
RC_NOTIFIER_SCOPE_CHANNEL));
NV_ASSERT_OK(krcErrorSendEventNotifications_HAL(pGpu, pKernelRc,
NV_ASSERT_OK(
krcErrorSendEventNotifications_HAL(pGpu, pKernelRc,
pKernelChannel,
kchannelGetEngineType(pKernelChannel),
exceptType,
@@ -1162,8 +1117,8 @@ _kgspRpcMigCiConfigUpdate
pParams->gfid = rpc_params->gfid;
pParams->bDelete = rpc_params->bDelete;
status = pOS->osQueueWorkItemWithFlags(pGpu,
_kgspRpcMigCiConfigUpdateCallback,
(void *)pParams,
_kgspRpcMigCiConfigUpdateCallback,
(void *)pParams,
OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW | OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW);
if (status != NV_OK)
{
@@ -1548,13 +1503,13 @@ _tsDiffToDuration
{
duration /= 1000;
*pDurationUnitsChar = 'm';
}
// 9999ms then 10s
if (duration >= 10000)
{
duration /= 1000;
*pDurationUnitsChar = ' '; // so caller can always just append 's'
// 9999ms then 10s
if (duration >= 10000)
{
duration /= 1000;
*pDurationUnitsChar = ' '; // so caller can always just append 's'
}
}
return duration;
@@ -1717,7 +1672,7 @@ _kgspLogXid119
duration = _tsDiffToDuration(ts_end - pHistoryEntry->ts_start, &durationUnitsChar);
NV_ERROR_LOG(pGpu, GSP_RPC_TIMEOUT,
"Timeout after %llus of waiting for RPC response from GPU%d GSP! Expected function %d (%s) (0x%x 0x%x).",
"Timeout after %llus of waiting for RPC response from GPU%d GSP! Expected function %d (%s) (0x%llx 0x%llx).",
(durationUnitsChar == 'm' ? duration / 1000 : duration),
gpuGetInstance(pGpu),
expectedFunc,
@@ -1728,7 +1683,6 @@ _kgspLogXid119
if (pRpc->timeoutCount == 1)
{
kgspLogRpcDebugInfo(pGpu, pRpc, GSP_RPC_TIMEOUT, NV_TRUE/*bPollingForRpcResponse*/);
osAssertFailed();
NV_PRINTF(LEVEL_ERROR,
@@ -1736,6 +1690,32 @@ _kgspLogXid119
}
}
static void
_kgspLogRpcSanityCheckFailure
(
OBJGPU *pGpu,
OBJRPC *pRpc,
NvU32 rpcStatus,
NvU32 expectedFunc
)
{
RpcHistoryEntry *pHistoryEntry = &pRpc->rpcHistory[pRpc->rpcHistoryCurrent];
NV_ASSERT(expectedFunc == pHistoryEntry->function);
NV_PRINTF(LEVEL_ERROR,
"GPU%d sanity check failed 0x%x waiting for RPC response from GSP. Expected function %d (%s) (0x%llx 0x%llx).\n",
gpuGetInstance(pGpu),
rpcStatus,
expectedFunc,
_getRpcName(expectedFunc),
pHistoryEntry->data[0],
pHistoryEntry->data[1]);
kgspLogRpcDebugInfo(pGpu, pRpc, GSP_RPC_TIMEOUT, NV_TRUE/*bPollingForRpcResponse*/);
osAssertFailed();
}
static void
_kgspRpcIncrementTimeoutCountAndRateLimitPrints
(
@@ -1866,7 +1846,16 @@ _kgspRpcRecvPoll
goto done;
}
NV_CHECK_OK_OR_GOTO(rpcStatus, LEVEL_SILENT, _kgspRpcSanityCheck(pGpu, pKernelGsp, pRpc), done);
rpcStatus = _kgspRpcSanityCheck(pGpu, pKernelGsp, pRpc);
if (rpcStatus != NV_OK)
{
if (!pRpc->bQuietPrints)
{
_kgspLogRpcSanityCheckFailure(pGpu, pRpc, rpcStatus, expectedFunc);
pRpc->bQuietPrints = NV_TRUE;
}
goto done;
}
if (timeoutStatus == NV_ERR_TIMEOUT)
{
@@ -2135,19 +2124,20 @@ kgspInitVgpuPartitionLogging_IMPL
NvU64 initTaskLogBUffOffset,
NvU64 initTaskLogBUffSize,
NvU64 vgpuTaskLogBUffOffset,
NvU64 vgpuTaskLogBuffSize
NvU64 vgpuTaskLogBuffSize,
NvBool *pPreserveLogBufferFull
)
{
NV_STATUS nvStatus = NV_OK;
RM_LIBOS_LOG_MEM *pGspPluginVgpuTaskLog = NULL;
RM_LIBOS_LOG_MEM *pGspPluginInitTaskLog = NULL;
char vm_string[8], sourceName[SOURCE_NAME_MAX_LENGTH];
NvBool bPreserveLogBufferFull = NV_FALSE;
if (gfid > MAX_PARTITIONS_WITH_GFID)
{
return NV_ERR_INVALID_ARGUMENT;
}
portSyncMutexAcquire(pKernelGsp->pNvlogFlushMtx);
// Source name is used to generate a tag that is a unique identifier for nvlog buffers.
@@ -2155,6 +2145,11 @@ kgspInitVgpuPartitionLogging_IMPL
nvDbgSnprintf(sourceName, SOURCE_NAME_MAX_LENGTH, "V%02d", gfid);
libosLogCreateEx(&pKernelGsp->logDecodeVgpuPartition[gfid - 1], sourceName);
if (!bPreserveLogBufferFull)
{
bPreserveLogBufferFull = isLibosPreserveLogBufferFull(&pKernelGsp->logDecodeVgpuPartition[gfid - 1], pGpu->gpuInstance);
}
// Setup logging for vgpu task in vgpu partition
{
pGspPluginVgpuTaskLog = &pKernelGsp->gspPluginVgpuTaskLogMem[gfid - 1];
@@ -2162,14 +2157,13 @@ kgspInitVgpuPartitionLogging_IMPL
NV_ASSERT_OK_OR_GOTO(nvStatus,
memdescCreate(&pGspPluginVgpuTaskLog->pTaskLogDescriptor,
pGpu,
vgpuTaskLogBuffSize,
RM_PAGE_SIZE,
NV_TRUE, ADDR_FBMEM, NV_MEMORY_CACHED,
MEMDESC_FLAGS_NONE),
error_cleanup);
memdescCreate(&pGspPluginVgpuTaskLog->pTaskLogDescriptor,
pGpu,
vgpuTaskLogBuffSize,
RM_PAGE_SIZE,
NV_TRUE, ADDR_FBMEM, NV_MEMORY_CACHED,
MEMDESC_FLAGS_NONE),
error_cleanup);
memdescDescribe(pGspPluginVgpuTaskLog->pTaskLogDescriptor, ADDR_FBMEM, vgpuTaskLogBUffOffset, vgpuTaskLogBuffSize);
@@ -2185,12 +2179,12 @@ kgspInitVgpuPartitionLogging_IMPL
nvDbgSnprintf(vm_string, sizeof(vm_string), "VGPU%d", gfid);
libosLogAddLogEx(&pKernelGsp->logDecodeVgpuPartition[gfid - 1],
pGspPluginVgpuTaskLog->pTaskLogBuffer,
memdescGetSize(pGspPluginVgpuTaskLog->pTaskLogDescriptor),
pGpu->gpuInstance,
(gpuGetChipArch(pGpu) >> GPU_ARCH_SHIFT),
gpuGetChipImpl(pGpu),
vm_string,
pGspPluginVgpuTaskLog->pTaskLogBuffer,
memdescGetSize(pGspPluginVgpuTaskLog->pTaskLogDescriptor),
pGpu->gpuInstance,
(gpuGetChipArch(pGpu) >> GPU_ARCH_SHIFT),
gpuGetChipImpl(pGpu),
vm_string,
".fwlogging_vgpu");
}
else
@@ -2201,6 +2195,11 @@ kgspInitVgpuPartitionLogging_IMPL
}
}
if (!bPreserveLogBufferFull)
{
bPreserveLogBufferFull = isLibosPreserveLogBufferFull(&pKernelGsp->logDecodeVgpuPartition[gfid - 1], pGpu->gpuInstance);
}
// Setup logging for init task in vgpu partition
{
pGspPluginInitTaskLog = &pKernelGsp->gspPluginInitTaskLogMem[gfid - 1];
@@ -2254,6 +2253,7 @@ kgspInitVgpuPartitionLogging_IMPL
"GSP", SOURCE_NAME_MAX_LENGTH);
}
*pPreserveLogBufferFull = bPreserveLogBufferFull;
pKernelGsp->bHasVgpuLogs = NV_TRUE;
error_cleanup:
@@ -2265,6 +2265,31 @@ error_cleanup:
return nvStatus;
}
/*!
* Preserve vGPU Partition log buffers between VM reboots
*/
NV_STATUS
kgspPreserveVgpuPartitionLogging_IMPL
(
OBJGPU *pGpu,
KernelGsp *pKernelGsp,
NvU32 gfid
)
{
if ((gfid == 0) || (gfid > MAX_PARTITIONS_WITH_GFID))
{
return NV_ERR_INVALID_ARGUMENT;
}
// Make sure this this NvLog buffer is pushed
kgspDumpGspLogsUnlocked(pKernelGsp, NV_FALSE);
// Preserve any captured vGPU Partition logs
libosPreserveLogs(&pKernelGsp->logDecodeVgpuPartition[gfid - 1]);
return NV_OK;
}
void kgspNvlogFlushCb(void *pKernelGsp)
{
if (pKernelGsp != NULL)
@@ -2343,7 +2368,7 @@ _kgspInitLibosLoggingStructures
const char *elfSectionName;
} logInitValues[] =
{
{"LOGINIT", "INIT", 0x10000, ".fwlogging_init"}, // 64KB for stack traces
{"LOGINIT", "INIT", 0x10000, ".fwlogging_init"}, // 64KB for stack traces
#if defined(DEVELOP) || defined(DEBUG)
// The interrupt task is in the rm elf, so they share the same logging elf too
{"LOGINTR", "INTR", 0x40000, ".fwlogging_rm"}, // 256KB ISR debug log on develop/debug builds
@@ -2361,12 +2386,12 @@ _kgspInitLibosLoggingStructures
NvU8 idx;
NvU64 flags = MEMDESC_FLAGS_NONE;
pKernelGsp->pNvlogFlushMtx = portSyncMutexCreate(portMemAllocatorGetGlobalNonPaged());
if (pKernelGsp->pNvlogFlushMtx == NULL)
{
nvStatus = NV_ERR_INSUFFICIENT_RESOURCES;
goto error_cleanup;
}
pKernelGsp->pNvlogFlushMtx = portSyncMutexCreate(portMemAllocatorGetGlobalNonPaged());
if (pKernelGsp->pNvlogFlushMtx == NULL)
{
nvStatus = NV_ERR_INSUFFICIENT_RESOURCES;
goto error_cleanup;
}
libosLogCreate(&pKernelGsp->logDecode);
@@ -2381,8 +2406,8 @@ _kgspInitLibosLoggingStructures
// Setup logging memory for each task.
NV_ASSERT_OK_OR_GOTO(nvStatus,
memdescCreate(&pLog->pTaskLogDescriptor,
pGpu,
logInitValues[idx].size,
pGpu,
logInitValues[idx].size,
RM_PAGE_SIZE,
NV_TRUE, ADDR_SYSMEM, NV_MEMORY_CACHED,
flags),
@@ -3033,7 +3058,9 @@ kgspDumpGspLogsUnlocked_IMPL
NvBool bSyncNvLog
)
{
if (pKernelGsp->bInInit || pKernelGsp->pLogElf || bSyncNvLog)
if (pKernelGsp->bInInit || pKernelGsp->pLogElf || bSyncNvLog
|| pKernelGsp->bHasVgpuLogs
)
{
libosExtractLogs(&pKernelGsp->logDecode, bSyncNvLog);
@@ -3063,7 +3090,9 @@ kgspDumpGspLogs_IMPL
NvBool bSyncNvLog
)
{
if (pKernelGsp->bInInit || pKernelGsp->pLogElf || bSyncNvLog)
if (pKernelGsp->bInInit || pKernelGsp->pLogElf || bSyncNvLog
|| pKernelGsp->bHasVgpuLogs
)
{
if (pKernelGsp->pNvlogFlushMtx != NULL)
portSyncMutexAcquire(pKernelGsp->pNvlogFlushMtx);

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -341,6 +341,7 @@ _knvlinkAreLinksDisconnected
portMemSet(pParams, 0, sizeof(*pParams));
pParams->linkMask = pKernelNvlink->enabledLinks;
pParams->bSublinkStateInst = NV_TRUE;
status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink,
NV2080_CTRL_CMD_NVLINK_GET_LINK_AND_CLOCK_INFO,

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -722,7 +722,7 @@ subdeviceCtrlCmdBusGetNvlinkStatus_IMPL
pParams->enabledLinkMask = (bIsNvlinkReady) ? pKernelNvlink->enabledLinks : 0x0;
pTmpData->nvlinkLinkAndClockInfoParams.linkMask = pParams->enabledLinkMask;
pTmpData->nvlinkLinkAndClockInfoParams.bSublinkStateInst = pParams->bSublinkStateInst;
pTmpData->nvlinkLinkAndClockInfoParams.bSublinkStateInst = NV_TRUE;
status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink,
NV2080_CTRL_CMD_NVLINK_GET_LINK_AND_CLOCK_INFO,

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -945,6 +945,7 @@ knvlinkGetPeersNvlinkMaskFromHshub_IMPL
portMemSet(&params, 0, sizeof(params));
params.linkMask = pKernelNvlink->enabledLinks;
params.bSublinkStateInst = NV_TRUE;
status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink,
NV2080_CTRL_CMD_NVLINK_GET_LINK_AND_CLOCK_INFO,

View File

@@ -445,12 +445,14 @@ krcErrorSendEventNotifications_KERNEL
)
{
NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE);
NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_CHANNEL);
NV_ASSERT_OK_OR_RETURN(
krcErrorSendEventNotificationsCtxDma_HAL(pGpu, pKernelRc,
pKernelChannel,
scope));
if (pKernelChannel != NULL)
{
NV_ASSERT_OK_OR_RETURN(
krcErrorSendEventNotificationsCtxDma_HAL(pGpu, pKernelRc,
pKernelChannel,
scope));
}
gpuNotifySubDeviceEvent(pGpu,
NV2080_NOTIFIERS_RC_ERROR,

View File

@@ -240,7 +240,6 @@ subdeviceCtrlCmdEventSetSemaphoreMemory_IMPL
pMemory->vgpuNsIntr.guestMSIAddr = 0;
pMemory->vgpuNsIntr.guestMSIData = 0;
pMemory->vgpuNsIntr.guestDomainId = 0;
pMemory->vgpuNsIntr.pVgpuVfioRef = NULL;
pMemory->vgpuNsIntr.isSemaMemValidationEnabled = NV_TRUE;
return NV_OK;

View File

@@ -2369,6 +2369,7 @@ _controllerParseStaticTable_v22
switch (header.version)
{
case NVPCF_CONTROLLER_STATIC_TABLE_VERSION_24:
case NVPCF_CONTROLLER_STATIC_TABLE_VERSION_23:
case NVPCF_CONTROLLER_STATIC_TABLE_VERSION_22:
{
@@ -3885,45 +3886,22 @@ cliresCtrlCmdSyncGpuBoostGroupInfo_IMPL
}
NV_STATUS
cliresCtrlCmdVgpuGetStartData_IMPL
cliresCtrlCmdVgpuVfioNotifyRMStatus_IMPL
(
RmClientResource *pRmCliRes,
NV0000_CTRL_VGPU_GET_START_DATA_PARAMS *pVgpuStartParams
NV0000_CTRL_VGPU_VFIO_NOTIFY_RM_STATUS_PARAMS *pVgpuStatusParams
)
{
NV_STATUS status = NV_OK;
NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes);
NvU32 event, eventStatus;
OBJSYS *pSys = SYS_GET_INSTANCE();
KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
REQUEST_VGPU_INFO_NODE *pRequestVgpu = NULL;
status = CliGetSystemEventStatus(hClient, &event, &eventStatus);
if (status != NV_OK)
return status;
if (osIsVgpuVfioPresent() != NV_OK)
return NV_ERR_NOT_SUPPORTED;
if (event != NV0000_NOTIFIERS_VM_START)
return NV_ERR_INVALID_EVENT;
osWakeRemoveVgpu(pVgpuStatusParams->gpuId, pVgpuStatusParams->returnStatus);
for (pRequestVgpu = listHead(&pKernelVgpuMgr->listRequestVgpuHead);
pRequestVgpu != NULL;
pRequestVgpu = listNext(&pKernelVgpuMgr->listRequestVgpuHead, pRequestVgpu))
{
if (pRequestVgpu->deviceState == NV_VGPU_DEV_OPENED)
{
portMemCopy(pVgpuStartParams->mdevUuid, VGPU_UUID_SIZE, pRequestVgpu->mdevUuid, VGPU_UUID_SIZE);
portMemCopy(pVgpuStartParams->configParams, VGPU_CONFIG_PARAMS_MAX_LENGTH, pRequestVgpu->configParams, VGPU_CONFIG_PARAMS_MAX_LENGTH);
pVgpuStartParams->gpuPciId = pRequestVgpu->gpuPciId;
pVgpuStartParams->qemuPid = pRequestVgpu->qemuPid;
pVgpuStartParams->vgpuId = pRequestVgpu->vgpuId;
pVgpuStartParams->gpuPciBdf = pRequestVgpu->gpuPciBdf;
return NV_OK;
}
}
return NV_ERR_OBJECT_NOT_FOUND;
return NV_OK;
}
NV_STATUS
cliresCtrlCmdVgpuGetVgpuVersion_IMPL
(

View File

@@ -569,7 +569,8 @@ CliDelObjectEvents
void CliAddSystemEvent(
NvU32 event,
NvU32 status
NvU32 status,
NvBool *isEventNotified
)
{
NvU32 temp;
@@ -581,6 +582,9 @@ void CliAddSystemEvent(
NV_STATUS rmStatus = NV_OK;
Notifier *pNotifier;
if (isEventNotified != NULL)
*isEventNotified = NV_FALSE;
for (ppClient = serverutilGetFirstClientUnderLock();
ppClient;
ppClient = serverutilGetNextClientUnderLock(ppClient))
@@ -629,6 +633,8 @@ void CliAddSystemEvent(
NV_PRINTF(LEVEL_ERROR, "failed to deliver event 0x%x",
event);
}
if (isEventNotified != NULL)
*isEventNotified = NV_TRUE;
}
pEventNotification = pEventNotification->Next;
}

View File

@@ -137,6 +137,7 @@ kvgpumgrGuestRegister(OBJGPU *pGpu,
NvU32 swizzId,
NvU32 vgpuDeviceInstanceId,
NvBool bDisableDefaultSmcExecPartRestore,
NvU8 *pVgpuDevName,
KERNEL_HOST_VGPU_DEVICE **ppKernelHostVgpuDevice)
{
return NV_ERR_NOT_SUPPORTED;
@@ -271,13 +272,6 @@ kvgpumgrSetVgpuEncoderCapacity(OBJGPU *pGpu, NvU8 *vgpuUuid, NvU32 encoderCapaci
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS
kvgpumgrStart(const NvU8 *pMdevUuid, void *waitQueue, NvS32 *returnStatus,
NvU8 *vmName, NvU32 qemuPid)
{
return NV_ERR_OBJECT_NOT_FOUND;
}
//
// Add vGPU info received on mdev_create sysfs call to REQUEST_VGPU_INFO_NODE
// list. REQUEST_VGPU_INFO_NODE is currently used only for vGPU on KVM.
@@ -303,6 +297,17 @@ kvgpumgrDeleteRequestVgpu(const NvU8 *pMdevUuid, NvU16 vgpuId)
return NV_ERR_OBJECT_NOT_FOUND;
}
NV_STATUS kvgpumgrGetAvailableInstances(
NvU32 *availInstances,
OBJGPU *pGpu,
VGPU_TYPE *vgpuTypeInfo,
NvU32 pgpuIndex,
NvU8 devfn
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS
kvgpumgrGetHostVgpuDeviceFromMdevUuid(NvU32 gpuPciId, const NvU8 *pMdevUuid,
KERNEL_HOST_VGPU_DEVICE **ppKernelHostVgpuDevice)

View File

@@ -121,16 +121,6 @@ vgpuconfigapiCtrlCmdVgpuConfigEventSetNotification_IMPL
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS
vgpuconfigapiCtrlCmdVgpuConfigNotifyStart_IMPL
(
VgpuConfigApi *pVgpuConfigApi,
NVA081_CTRL_VGPU_CONFIG_NOTIFY_START_PARAMS *pNotifyParams
)
{
return NV_ERR_OBJECT_NOT_FOUND;
}
NV_STATUS
vgpuconfigapiCtrlCmdVgpuConfigMdevRegister_IMPL
(
@@ -242,3 +232,14 @@ vgpuconfigapiCtrlCmdVgpuConfigValidateSwizzId_IMPL
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS
vgpuconfigapiCtrlCmdVgpuSetVmName_IMPL
(
VgpuConfigApi *pVgpuConfigApi,
NVA081_CTRL_VGPU_SET_VM_NAME_PARAMS *pParams
)
{
return NV_ERR_NOT_SUPPORTED;
}