530.30.02

This commit is contained in:
Andy Ritger
2023-02-28 11:12:44 -08:00
parent e598191e8e
commit 4397463e73
928 changed files with 124728 additions and 88525 deletions

View File

@@ -104,7 +104,7 @@ typedef struct nv_ioctl_rm_api_version
#define NV_RM_API_VERSION_CMD_STRICT 0
#define NV_RM_API_VERSION_CMD_RELAXED '1'
#define NV_RM_API_VERSION_CMD_OVERRIDE '2'
#define NV_RM_API_VERSION_CMD_QUERY '2'
#define NV_RM_API_VERSION_REPLY_UNRECOGNIZED 0
#define NV_RM_API_VERSION_REPLY_RECOGNIZED 1

View File

@@ -699,6 +699,22 @@
#define NV_REG_ENABLE_PCIE_RELAXED_ORDERING_MODE \
NV_REG_STRING(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE)
/*
* Option: EnableResizableBar
*
* Description:
*
* When this option is enabled, the NVIDIA driver will attempt to resize
* BAR1 to match framebuffer size, or the next largest available size on
* supported machines. This is currently only implemented for Linux.
*
* Possible values:
* 0 - Do not enable PCI BAR resizing
* 1 - Enable PCI BAR resizing
*/
#define __NV_ENABLE_RESIZABLE_BAR EnableResizableBar
#define NV_REG_ENABLE_RESIZABLE_BAR NV_REG_STRING(__NV_ENABLE_RESIZABLE_BAR)
/*
* Option: EnableGpuFirmware
*
@@ -825,6 +841,26 @@
#define NV_DMA_REMAP_PEER_MMIO_DISABLE 0x00000000
#define NV_DMA_REMAP_PEER_MMIO_ENABLE 0x00000001
/*
* Option: NVreg_RmNvlinkBandwidth
*
* Description:
*
* This option allows user to reduce the NVLINK P2P bandwidth to save power.
* The option is in the string format.
*
* Possible string values:
* OFF: 0% bandwidth
* MIN: 15%-25% bandwidth depending on the system's NVLink topology
* HALF: 50% bandwidth
* 3QUARTER: 75% bandwidth
* FULL: 100% bandwidth (default)
*
* This option is only for Hopper+ GPU with NVLINK version 4.0.
*/
#define __NV_RM_NVLINK_BW RmNvlinkBandwidth
#define NV_RM_NVLINK_BW NV_REG_STRING(__NV_RM_NVLINK_BW)
#if defined(NV_DEFINE_REGISTRY_KEY_TABLE)
/*
@@ -861,6 +897,7 @@ NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IGNORE_MMIO_CHECK, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_NVLINK_DISABLE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PCI_DRIVER, 1);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_RESIZABLE_BAR, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_DBG_BREAKPOINT, 0);
NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS, NULL);
@@ -870,6 +907,7 @@ NV_DEFINE_REG_STRING_ENTRY(__NV_GPU_BLACKLIST, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_TEMPORARY_FILE_PATH, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_EXCLUDED_GPUS, NULL);
NV_DEFINE_REG_ENTRY(__NV_DMA_REMAP_PEER_MMIO, NV_DMA_REMAP_PEER_MMIO_ENABLE);
NV_DEFINE_REG_STRING_ENTRY(__NV_RM_NVLINK_BW, NULL);
/*
*----------------registry database definition----------------------
@@ -910,6 +948,7 @@ nv_parm_t nv_parms[] = {
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_PCI_DRIVER),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_RESIZABLE_BAR),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_DBG_BREAKPOINT),

View File

@@ -315,6 +315,7 @@ typedef enum
NV_SOC_IRQ_DPAUX_TYPE,
NV_SOC_IRQ_GPIO_TYPE,
NV_SOC_IRQ_HDACODEC_TYPE,
NV_SOC_IRQ_TCPC2DISP_TYPE,
NV_SOC_IRQ_INVALID_TYPE
} nv_soc_irq_type_t;
@@ -329,6 +330,7 @@ typedef struct nv_soc_irq_info_s {
NvU32 gpio_num;
NvU32 dpaux_instance;
} irq_data;
NvS32 ref_count;
} nv_soc_irq_info_t;
#define NV_MAX_SOC_IRQS 6
@@ -384,9 +386,11 @@ typedef struct nv_state_t
NvS32 current_soc_irq;
NvU32 num_soc_irqs;
NvU32 hdacodec_irq;
NvU32 tcpc2disp_irq;
NvU8 *soc_dcb_blob;
NvU32 soc_dcb_size;
NvU32 disp_sw_soc_chip_id;
NvBool soc_is_dpalt_mode_supported;
NvU32 igpu_stall_irq[NV_IGPU_MAX_STALL_IRQS];
NvU32 igpu_nonstall_irq;
@@ -649,7 +653,8 @@ static inline NvBool IS_REG_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
static inline NvBool IS_FB_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
{
return ((nv->fb) && (offset >= nv->fb->cpu_address) &&
return ((nv->fb) && (nv->fb->size != 0) &&
(offset >= nv->fb->cpu_address) &&
((offset + (length - 1)) >= offset) &&
((offset + (length - 1)) <= (nv->fb->cpu_address + (nv->fb->size - 1))));
}
@@ -739,7 +744,7 @@ nv_state_t* NV_API_CALL nv_get_ctl_state (void);
void NV_API_CALL nv_set_dma_address_size (nv_state_t *, NvU32 );
NV_STATUS NV_API_CALL nv_alias_pages (nv_state_t *, NvU32, NvU32, NvU32, NvU64, NvU64 *, void **);
NV_STATUS NV_API_CALL nv_alloc_pages (nv_state_t *, NvU32, NvBool, NvU32, NvBool, NvBool, NvU64 *, void **);
NV_STATUS NV_API_CALL nv_alloc_pages (nv_state_t *, NvU32, NvBool, NvU32, NvBool, NvBool, NvS32, NvU64 *, void **);
NV_STATUS NV_API_CALL nv_free_pages (nv_state_t *, NvU32, NvBool, NvU32, void *);
NV_STATUS NV_API_CALL nv_register_user_pages (nv_state_t *, NvU64, NvU64 *, void *, void **);
@@ -915,7 +920,6 @@ NV_STATUS NV_API_CALL rm_write_registry_string (nvidia_stack_t *, nv_state_t *
void NV_API_CALL rm_parse_option_string (nvidia_stack_t *, const char *);
char* NV_API_CALL rm_remove_spaces (const char *);
char* NV_API_CALL rm_string_token (char **, const char);
void NV_API_CALL rm_vgpu_vfio_set_driver_vm(nvidia_stack_t *, NvBool);
NV_STATUS NV_API_CALL rm_run_rc_callback (nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_execute_work_item (nvidia_stack_t *, void *);
@@ -985,11 +989,12 @@ const char* NV_API_CALL rm_get_dynamic_power_management_status(nvidia_stack_t *,
const char* NV_API_CALL rm_get_gpu_gcx_support(nvidia_stack_t *, nv_state_t *, NvBool);
void NV_API_CALL rm_acpi_notify(nvidia_stack_t *, nv_state_t *, NvU32);
void NV_API_CALL rm_acpi_nvpcf_notify(nvidia_stack_t *);
NvBool NV_API_CALL rm_is_altstack_in_use(void);
/* vGPU VFIO specific functions */
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32);
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32, NvBool *);
NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16);
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool);
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8);

View File

@@ -181,7 +181,6 @@ NV_STATUS NV_API_CALL os_put_page (NvU64 address);
NvU32 NV_API_CALL os_get_page_refcount (NvU64 address);
NvU32 NV_API_CALL os_count_tail_pages (NvU64 address);
void NV_API_CALL os_free_pages_phys (NvU64, NvU32);
NV_STATUS NV_API_CALL os_call_nv_vmbus (NvU32, void *);
NV_STATUS NV_API_CALL os_open_temporary_file (void **);
void NV_API_CALL os_close_file (void *);
NV_STATUS NV_API_CALL os_write_file (void *, NvU8 *, NvU64, NvU64);

View File

@@ -39,13 +39,6 @@ OSDbgBreakpointEnabled osDbgBreakpointEnabled;
void* osGetStereoDongleInterface(void);
OSCallACPI_DSM osCallACPI_DSM;
OSCallACPI_DDC osCallACPI_DDC;
OSCallACPI_NVHG_ROM osCallACPI_NVHG_ROM;
OSCallACPI_DOD osCallACPI_DOD;
OSCallACPI_MXDS osCallACPI_MXDS;
OSCallACPI_MXDM osCallACPI_MXDM;
#if defined(NVCPU_X86_64)
OSnv_rdcr4 nv_rdcr4;
NvU64 nv_rdcr3(OBJOS *);

View File

@@ -0,0 +1,210 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <rmconfig.h>
#include <nvlog_inc.h>
#include <nv.h>
#include <nv-priv.h>
#include <nv-reg.h>
#include <nv_ref.h>
#include <osapi.h>
#include <gpu/mem_mgr/mem_mgr.h>
#include <core/locks.h>
#include "kernel/gpu/intr/intr.h"
//
// Function to update fixed fbsr modes to support multiple vairants such as
// GCOFF and cuda S3/resume.
//
static void
RmUpdateFixedFbsrModes(OBJGPU *pGpu)
{
MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_GCOFF_STATE_ENTERING))
{
pMemoryManager->fixedFbsrModesMask = NVBIT(FBSR_TYPE_DMA);
}
else if (nv->preserve_vidmem_allocations)
{
pMemoryManager->fixedFbsrModesMask = NVBIT(FBSR_TYPE_FILE);
}
}
static NV_STATUS
RmPowerManagementInternal(
OBJGPU *pGpu,
nv_pm_action_t pmAction
)
{
// default to NV_OK. there may cases where resman is loaded, but
// no devices are allocated (we're still at the console). in these
// cases, it's fine to let the system do whatever it wants.
NV_STATUS rmStatus = NV_OK;
if (pGpu)
{
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
NvBool bcState = gpumgrGetBcEnabledStatus(pGpu);
Intr *pIntr = GPU_GET_INTR(pGpu);
MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
if ((pmAction == NV_PM_ACTION_HIBERNATE) || (pmAction == NV_PM_ACTION_STANDBY))
{
//
// pFb object store the FBSR mode through which FB state unload has happened,
// so os layer doesn't need to set FBSR mode on resume.
//
RmUpdateFixedFbsrModes(pGpu);
}
switch (pmAction)
{
case NV_PM_ACTION_HIBERNATE:
nvp->pm_state.InHibernate = NV_TRUE;
nvp->pm_state.IntrEn = intrGetIntrEn(pIntr);
intrSetIntrEn(pIntr, INTERRUPT_TYPE_DISABLED);
gpumgrSetBcEnabledStatus(pGpu, NV_FALSE);
rmStatus = gpuEnterHibernate(pGpu);
gpumgrSetBcEnabledStatus(pGpu, bcState);
break;
case NV_PM_ACTION_STANDBY:
nvp->pm_state.InHibernate = NV_FALSE;
nvp->pm_state.IntrEn = intrGetIntrEn(pIntr);
intrSetIntrEn(pIntr, INTERRUPT_TYPE_DISABLED);
gpumgrSetBcEnabledStatus(pGpu, NV_FALSE);
rmStatus = gpuEnterStandby(pGpu);
gpumgrSetBcEnabledStatus(pGpu, bcState);
break;
case NV_PM_ACTION_RESUME:
gpumgrSetBcEnabledStatus(pGpu, NV_FALSE);
if (nvp->pm_state.InHibernate)
{
gpuResumeFromHibernate(pGpu);
}
else
{
gpuResumeFromStandby(pGpu);
}
intrSetIntrEn(pIntr, nvp->pm_state.IntrEn);
gpumgrSetBcEnabledStatus(pGpu, bcState);
NvU32 ac_plugged = 0;
if (IsMobile(pGpu))
{
if (nv_acpi_get_powersource(&ac_plugged) == NV_OK)
{
//
// As we have already acquired API and GPU lock here, we are
// directly calling RmPowerSourceChangeEvent.
//
RmPowerSourceChangeEvent(nv, !ac_plugged);
}
}
break;
default:
rmStatus = NV_ERR_INVALID_ARGUMENT;
break;
}
pMemoryManager->fixedFbsrModesMask = 0;
}
return rmStatus;
}
static NV_STATUS
RmPowerManagement(
OBJGPU *pGpu,
nv_pm_action_t pmAction
)
{
NV_STATUS rmStatus;
rmStatus = RmPowerManagementInternal(pGpu, pmAction);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_power_management(
nvidia_stack_t *sp,
nv_state_t *pNv,
nv_pm_action_t pmAction
)
{
THREAD_STATE_NODE threadState;
NV_STATUS rmStatus = NV_OK;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
NV_ASSERT_OK(os_flush_work_queue(pNv->queue));
// LOCK: acquire API lock
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DYN_POWER)) == NV_OK)
{
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv);
if (pGpu != NULL)
{
if ((rmStatus = os_ref_dynamic_power(pNv, NV_DYNAMIC_PM_FINE)) == NV_OK)
{
// LOCK: acquire GPUs lock
if ((rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DYN_POWER)) == NV_OK)
{
{
rmStatus = RmPowerManagement(pGpu, pmAction);
}
// UNLOCK: release GPUs lock
rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
}
os_unref_dynamic_power(pNv, NV_DYNAMIC_PM_FINE);
}
}
// UNLOCK: release API lock
rmapiLockRelease();
}
NV_ASSERT_OK(os_flush_work_queue(pNv->queue));
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}

View File

@@ -70,7 +70,7 @@ static NvBool RmIsDeviceRefNeeded(NVOS54_PARAMETERS *pApi)
{
switch(pApi->cmd)
{
case NV00FD_CTRL_CMD_ATTACH_MEM:
case NV00FD_CTRL_CMD_ATTACH_GPU:
return NV_TRUE;
default:
return NV_FALSE;
@@ -88,8 +88,8 @@ static NV_STATUS RmGetDeviceFd(NVOS54_PARAMETERS *pApi, NvS32 *pFd)
switch(pApi->cmd)
{
case NV00FD_CTRL_CMD_ATTACH_MEM:
paramSize = sizeof(NV00FD_CTRL_ATTACH_MEM_PARAMS);
case NV00FD_CTRL_CMD_ATTACH_GPU:
paramSize = sizeof(NV00FD_CTRL_ATTACH_GPU_PARAMS);
break;
default:
return NV_ERR_INVALID_ARGUMENT;
@@ -103,8 +103,8 @@ static NV_STATUS RmGetDeviceFd(NVOS54_PARAMETERS *pApi, NvS32 *pFd)
switch(pApi->cmd)
{
case NV00FD_CTRL_CMD_ATTACH_MEM:
*pFd = (NvS32)((NV00FD_CTRL_ATTACH_MEM_PARAMS *)pKernelParams)->devDescriptor;
case NV00FD_CTRL_CMD_ATTACH_GPU:
*pFd = (NvS32)((NV00FD_CTRL_ATTACH_GPU_PARAMS *)pKernelParams)->devDescriptor;
break;
default:
NV_ASSERT(0);

View File

@@ -83,15 +83,6 @@ NV_STATUS NV_API_CALL rm_transition_dynamic_power(
return NV_OK;
}
NV_STATUS NV_API_CALL rm_power_management(
nvidia_stack_t *sp,
nv_state_t *pNv,
nv_pm_action_t pmAction
)
{
return NV_OK;
}
const char* NV_API_CALL rm_get_vidmem_power_status(
nvidia_stack_t *sp,
nv_state_t *pNv

View File

@@ -447,11 +447,14 @@ NV_STATUS NV_API_CALL nv_vgpu_create_request(
const NvU8 *pMdevUuid,
NvU32 vgpuTypeId,
NvU16 *vgpuId,
NvU32 gpuPciBdf
NvU32 gpuPciBdf,
NvBool *is_driver_vm
)
{
THREAD_STATE_NODE threadState;
OBJSYS *pSys = SYS_GET_INSTANCE();
void *fp = NULL;
OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
NV_STATUS rmStatus = NV_OK;
NV_ENTER_RM_RUNTIME(sp,fp);
@@ -463,6 +466,8 @@ NV_STATUS NV_API_CALL nv_vgpu_create_request(
rmStatus = kvgpumgrCreateRequestVgpu(pNv->gpu_id, pMdevUuid,
vgpuTypeId, vgpuId, gpuPciBdf);
*is_driver_vm = pHypervisor->getProperty(pHypervisor, PDB_PROP_HYPERVISOR_DRIVERVM_ENABLED);
// UNLOCK: release API lock
rmapiLockRelease();
}
@@ -979,6 +984,9 @@ NV_STATUS osIsVgpuVfioPresent(void)
void initVGXSpecificRegistry(OBJGPU *pGpu)
{
NvU32 data32;
osWriteRegistryDword(pGpu, NV_REG_STR_RM_POWER_FEATURES, 0x55455555);
osWriteRegistryDword(pGpu, NV_REG_STR_RM_INFOROM_DISABLE_BBX,
NV_REG_STR_RM_INFOROM_DISABLE_BBX_YES);
#if !defined(NVCPU_X86_64)
osWriteRegistryDword(pGpu, NV_REG_STR_RM_BAR2_APERTURE_SIZE_MB, 4);
#endif

View File

@@ -45,6 +45,7 @@
#include "mem_mgr/io_vaspace.h"
#include <diagnostics/journal.h>
#include "gpu/mem_mgr/mem_desc.h"
#include "gpu/mem_mgr/mem_mgr.h"
#include "core/thread_state.h"
#include <nvacpitypes.h>
#include <platform/acpi_common.h>
@@ -299,7 +300,13 @@ void* osMapKernelSpace(
offset = (Start & ~os_page_mask);
Start &= os_page_mask;
Size = ((Size + offset + ~os_page_mask) & os_page_mask);
if (!portSafeAddU64(Size, offset, &Size) ||
!portSafeAddU64(Size, ~os_page_mask, &Size))
{
return NULL;
}
Size &= os_page_mask;
ptr = os_map_kernel_space(Start, Size, Mode);
if (ptr != NULL)
@@ -892,6 +899,7 @@ NV_STATUS osAllocPagesInternal(
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
void *pMemData = NULL;
NV_STATUS status;
NvS32 nodeId = -1;
memdescSetAddress(pMemDesc, NvP64_NULL);
memdescSetMemData(pMemDesc, NULL, NULL);
@@ -923,16 +931,19 @@ NV_STATUS osAllocPagesInternal(
if (nv && (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ALLOC_32BIT_ADDRESSABLE)))
nv->force_dma32_alloc = NV_TRUE;
status = nv_alloc_pages(
NV_GET_NV_STATE(pGpu),
NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount),
memdescGetContiguity(pMemDesc, AT_CPU),
memdescGetCpuCacheAttrib(pMemDesc),
pSys->getProperty(pSys,
PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS),
unencrypted,
memdescGetPteArray(pMemDesc, AT_CPU),
&pMemData);
{
status = nv_alloc_pages(
NV_GET_NV_STATE(pGpu),
NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount),
memdescGetContiguity(pMemDesc, AT_CPU),
memdescGetCpuCacheAttrib(pMemDesc),
pSys->getProperty(pSys,
PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS),
unencrypted,
nodeId,
memdescGetPteArray(pMemDesc, AT_CPU),
&pMemData);
}
if (nv && nv->force_dma32_alloc)
nv->force_dma32_alloc = NV_FALSE;
@@ -942,7 +953,7 @@ NV_STATUS osAllocPagesInternal(
{
return status;
}
//
// If the OS layer doesn't think in RM page size, we need to inflate the
// PTE array into RM pages.
@@ -5240,3 +5251,4 @@ osDmabufIsSupported(void)
{
return os_dma_buf_enabled;
}

View File

@@ -404,7 +404,7 @@ void RmFreeUnusedClients(
NvU32 *pClientList;
NvU32 numClients, i;
NV_STATUS status;
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
//
// The 'nvfp' pointer uniquely identifies an open instance in kernel space
@@ -427,7 +427,7 @@ void RmFreeUnusedClients(
if (numClients != 0)
{
pRmApi->FreeClientList(pRmApi, pClientList, numClients);
pRmApi->DisableClients(pRmApi, pClientList, numClients);
portMemFree(pClientList);
}
@@ -1008,6 +1008,16 @@ static NV_STATUS RmPerformVersionCheck(
NvBool relaxed = NV_FALSE;
NvU32 i;
//
// rmStr (i.e., NV_VERSION_STRING) must be null-terminated and fit within
// NV_RM_API_VERSION_STRING_LENGTH, so that:
//
// (1) If the versions don't match, we can return rmStr in
// pParams->versionString.
// (2) The below loop is guaranteed to not overrun rmStr.
//
ct_assert(sizeof(NV_VERSION_STRING) <= NV_RM_API_VERSION_STRING_LENGTH);
if (dataSize != sizeof(nv_ioctl_rm_api_version_t))
return NV_ERR_INVALID_ARGUMENT;
@@ -1020,11 +1030,11 @@ static NV_STATUS RmPerformVersionCheck(
pParams->reply = NV_RM_API_VERSION_REPLY_RECOGNIZED;
//
// the client requested to override the version check; just return
// success.
// the client is just querying the version, not verifying against expected.
//
if (pParams->cmd == NV_RM_API_VERSION_CMD_OVERRIDE)
if (pParams->cmd == NV_RM_API_VERSION_CMD_QUERY)
{
os_string_copy(pParams->versionString, rmStr);
return NV_OK;
}
@@ -1037,19 +1047,6 @@ static NV_STATUS RmPerformVersionCheck(
relaxed = NV_TRUE;
}
//
// rmStr (i.e., NV_VERSION_STRING) must be null-terminated and fit within
// NV_RM_API_VERSION_STRING_LENGTH, so that:
//
// (1) If the versions don't match, we can return rmStr in
// pParams->versionString.
// (2) The below loop is guaranteed to not overrun rmStr.
//
if ((os_string_length(rmStr) + 1) > NV_RM_API_VERSION_STRING_LENGTH)
{
return NV_ERR_BUFFER_TOO_SMALL;
}
for (i = 0; i < NV_RM_API_VERSION_STRING_LENGTH; i++)
{
clientCh = pParams->versionString[i];
@@ -1353,6 +1350,24 @@ RmDmabufPutClientAndDevice(
NV_ASSERT_OK(kmigmgrDecRefCount(pKernelMIGGpuInstance->pShare));
}
static void
RmHandleNvpcfEvents(
nv_state_t *pNv
)
{
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv);
THREAD_STATE_NODE threadState;
if (RmUnixRmApiPrologue(pNv, &threadState, RM_LOCK_MODULES_ACPI) == NULL)
{
return;
}
gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_NVPCF_EVENTS, NULL, 0, 0, 0);
RmUnixRmApiEpilogue(pNv, &threadState);
}
/*
* ---------------------------------------------------------------------------
*
@@ -2446,6 +2461,27 @@ NV_STATUS NV_API_CALL rm_ioctl(
return rmStatus;
}
static void _deferredClientListFreeCallback(void *unused)
{
OBJSYS *pSys = SYS_GET_INSTANCE();
NV_STATUS status = serverFreeDisabledClients(&g_resServ, 0, pSys->clientListDeferredFreeLimit);
//
// Possible return values:
// NV_WARN_MORE_PROCESSING_REQUIRED - Iteration limit reached, need to call again
// NV_ERR_IN_USE - Already running on another thread, try again later
// In both cases, schedule a worker to clean up anything that remains
//
if (status != NV_OK)
{
status = osQueueSystemWorkItem(_deferredClientListFreeCallback, unused);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_NOTICE, "Failed to schedule deferred free callback. Freeing immediately.\n");
serverFreeDisabledClients(&g_resServ, 0, 0);
}
}
}
void NV_API_CALL rm_cleanup_file_private(
nvidia_stack_t *sp,
nv_state_t *pNv,
@@ -2454,19 +2490,23 @@ void NV_API_CALL rm_cleanup_file_private(
{
THREAD_STATE_NODE threadState;
void *fp;
RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL);
RM_API *pRmApi;
RM_API_CONTEXT rmApiContext = {0};
NvU32 i;
OBJSYS *pSys = SYS_GET_INSTANCE();
NV_ENTER_RM_RUNTIME(sp,fp);
pRmApi = rmapiGetInterface(RMAPI_EXTERNAL);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
threadStateSetTimeoutOverride(&threadState, 10 * 1000);
if (rmapiPrologue(pRmApi, &rmApiContext) != NV_OK)
if (rmapiPrologue(pRmApi, &rmApiContext) != NV_OK) {
NV_EXIT_RM_RUNTIME(sp,fp);
return;
}
// LOCK: acquire API lock
if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI) == NV_OK)
// LOCK: acquire API lock. Low priority so cleanup doesn't block active threads
if (rmapiLockAcquire(RMAPI_LOCK_FLAGS_LOW_PRIORITY, RM_LOCK_MODULES_OSAPI) == NV_OK)
{
// Unref any object which was exported on this file.
if (nvfp->handles != NULL)
@@ -2487,13 +2527,21 @@ void NV_API_CALL rm_cleanup_file_private(
nvfp->maxHandles = 0;
}
// Free any RM clients associated with this file.
// Disable any RM clients associated with this file.
RmFreeUnusedClients(pNv, nvfp);
// Unless configured otherwise, immediately free all disabled clients
if (!pSys->bUseDeferredClientListFree)
serverFreeDisabledClients(&g_resServ, RM_LOCK_STATES_API_LOCK_ACQUIRED, 0);
// UNLOCK: release API lock
rmapiLockRelease();
}
// Start the deferred free callback if necessary
if (pSys->bUseDeferredClientListFree)
_deferredClientListFreeCallback(NULL);
rmapiEpilogue(pRmApi, &rmApiContext);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
@@ -2929,7 +2977,7 @@ static NV_STATUS RmRunNanoTimerCallback(
THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER);
// Call timer event service
status = tmrEventServiceOSTimerCallback_HAL(pGpu, pTmr, (PTMR_EVENT)pTmrEvent);
status = tmrEventServiceTimer(pGpu, pTmr, (PTMR_EVENT)pTmrEvent);
// Out of conflicting thread
threadStateFreeISRAndDeferredIntHandler(&threadState,
@@ -5318,25 +5366,6 @@ void NV_API_CALL rm_dma_buf_put_client_and_device(
// NOTE: Used only on VMWware
//
void NV_API_CALL rm_vgpu_vfio_set_driver_vm(
nvidia_stack_t *sp,
NvBool is_driver_vm
)
{
OBJSYS *pSys;
POBJHYPERVISOR pHypervisor;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
pSys = SYS_GET_INSTANCE();
pHypervisor = SYS_GET_HYPERVISOR(pSys);
pHypervisor->setProperty(pHypervisor, PDB_PROP_HYPERVISOR_DRIVERVM_ENABLED, is_driver_vm);
NV_EXIT_RM_RUNTIME(sp,fp);
}
NvBool NV_API_CALL rm_is_altstack_in_use(void)
{
#if defined(__use_altstack__)
@@ -5345,3 +5374,21 @@ NvBool NV_API_CALL rm_is_altstack_in_use(void)
return NV_FALSE;
#endif
}
void NV_API_CALL rm_acpi_nvpcf_notify(
nvidia_stack_t *sp
)
{
void *fp;
OBJGPU *pGpu = gpumgrGetGpu(0);
NV_ENTER_RM_RUNTIME(sp,fp);
if (pGpu != NULL)
{
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
RmHandleNvpcfEvents(nv);
}
NV_EXIT_RM_RUNTIME(sp,fp);
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -665,14 +665,14 @@ osInitNvMapping(
"NV fb using linear address : 0x%p\n", pGpu->registerAccess.gpuFbAddr);
pGpu->setProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED, NV_TRUE);
pGpu->setProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS, NV_FALSE);
pGpu->setProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS, NV_TRUE);
if (osReadRegistryDword(pGpu,
NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR, &data) == NV_OK)
{
if (data == NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR_ENABLE)
if (data == NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR_DISABLE)
{
pGpu->setProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS, NV_TRUE);
pGpu->setProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS, NV_FALSE);
}
}
@@ -680,6 +680,12 @@ osInitNvMapping(
{
pGpu->setProperty(pGpu, PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT, NV_TRUE);
}
else
{
{
pGpu->setProperty(pGpu, PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT, NV_TRUE);
}
}
if ((osReadRegistryDword(NULL,
NV_REG_PRESERVE_VIDEO_MEMORY_ALLOCATIONS,
@@ -766,6 +772,11 @@ RmSetConsolePreservationParams(OBJGPU *pGpu)
if (os_is_vgx_hyper() || IS_VIRTUAL(pGpu))
return;
if (!gpuFuseSupportsDisplay_HAL(pGpu))
{
return;
}
//
// Check the OS layer for any video memory used by a console
// driver that should be reserved.
@@ -937,6 +948,9 @@ RmInitNvDevice(
return;
}
// Setup GPU scalability
(void) RmInitScalability(pGpu);
return;
}

View File

@@ -66,13 +66,6 @@ initUnixOSFunctionPointers(OBJOS *pOS)
pOS->osNv_cpuid = nv_cpuid;
#endif
pOS->osCallACPI_DSM = osCallACPI_DSM;
pOS->osCallACPI_DDC = osCallACPI_DDC;
pOS->osCallACPI_NVHG_ROM = osCallACPI_NVHG_ROM;
pOS->osCallACPI_DOD = osCallACPI_DOD;
pOS->osCallACPI_MXDM = osCallACPI_MXDM;
pOS->osCallACPI_MXDS = osCallACPI_MXDS;
pOS->osDbgBreakpointEnabled = osDbgBreakpointEnabled;
}

View File

@@ -570,6 +570,17 @@ NV_STATUS NV_API_CALL rm_gpu_ops_get_non_replayable_faults(nvidia_stack_t *sp,
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_flush_replayable_fault_buffer(nvidia_stack_t *sp,
gpuDeviceHandle device)
{
NV_STATUS rmStatus;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsFlushReplayableFaultBuffer(device);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_gpu_ops_init_access_cntr_info(nvidia_stack_t *sp,
gpuDeviceHandle device,
gpuAccessCntrInfo *accessCntrInfo)

View File

@@ -303,9 +303,6 @@ NV_STATUS osIsr(
*/
NvBool osLockShouldToggleInterrupts(OBJGPU *pGpu)
{
if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY))
return NV_TRUE;
return (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH) &&
gpuIsStateLoaded(pGpu) &&
!pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SLI_LINK_CODEPATH));
@@ -591,10 +588,10 @@ NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults(
goto done;
}
// Non-replayable faults are copied to the client shadow buffer by GSP-RM.
if (IS_GSP_CLIENT(pGpu))
{
status = NV_ERR_NOT_SUPPORTED;
// Non-replayable faults are copied to the client shadow buffer by GSP-RM.
status = NV_OK;
goto done;
}
@@ -614,11 +611,6 @@ static NV_STATUS _rm_gpu_copy_mmu_faults_unlocked(
THREAD_STATE_NODE *pThreadState
)
{
// Non-replayable faults are copied to the client shadow buffer by GSP-RM.
if (IS_GSP_CLIENT(pGpu))
{
return NV_ERR_NOT_SUPPORTED;
}
return NV_OK;
}
@@ -643,6 +635,7 @@ NV_STATUS rm_gpu_handle_mmu_faults(
if (pGpu == NULL)
{
NV_EXIT_RM_RUNTIME(sp,fp);
return NV_ERR_OBJECT_NOT_FOUND;
}
@@ -666,7 +659,7 @@ NV_STATUS rm_gpu_handle_mmu_faults(
{
// We have to clear the top level interrupt bit here since otherwise
// the bottom half will attempt to service the interrupt on the CPU
// side before GSP recieves the notification and services it
// side before GSP receives the notification and services it
kgmmuClearNonReplayableFaultIntr_HAL(pGpu, pKernelGmmu, &threadState);
status = intrTriggerPrivDoorbell_HAL(pGpu, pIntr, NV_DOORBELL_NOTIFY_LEAF_SERVICE_NON_REPLAYABLE_FAULT_HANDLE);
@@ -681,7 +674,15 @@ NV_STATUS rm_gpu_handle_mmu_faults(
}
else
{
status = _rm_gpu_copy_mmu_faults_unlocked(pGpu, faultsCopied, &threadState);
if (IS_GSP_CLIENT(pGpu))
{
// Non-replayable faults are copied to the client shadow buffer by GSP-RM.
status = NV_OK;
}
else
{
status = _rm_gpu_copy_mmu_faults_unlocked(pGpu, faultsCopied, &threadState);
}
}
threadStateFreeISRLockless(&threadState, pGpu, THREAD_STATE_FLAGS_IS_ISR_LOCKLESS);