This commit is contained in:
Andy Ritger
2022-11-10 08:39:33 -08:00
parent 7c345b838b
commit 758b4ee818
1323 changed files with 262135 additions and 60754 deletions

View File

@@ -24,6 +24,8 @@
#ifndef _NV_NB_REGS_H_
#define _NV_NB_REGS_H_
#include "nvdevid.h"
typedef struct
{
NvU32 subsystem_vendor_id;

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -28,6 +28,7 @@
#include <os/os.h>
#include <ctrl/ctrl402c.h>
#include <gpu/disp/kern_disp_max.h>
#include <gpu/disp/kern_disp_type.h>
#include <efi-console.h>
@@ -39,13 +40,16 @@
#define NV_PRIV_REG_RD16(b,o) ((b)->Reg016[(o)/2])
#define NV_PRIV_REG_RD32(b,o) ((b)->Reg032[(o)/4])
#define NV_NUM_CR_REGS 0x99
struct OBJGPU;
#define NV_BIT_PLANE_SIZE 64 * 1024
#define NV_NUM_VGA_BIT_PLANES 4
typedef struct
{
NvBool baseValid;
VGAADDRDESC base;
NvBool workspaceBaseValid;
VGAADDRDESC workspaceBase;
NvU32 vesaMode;
} nv_vga_t;
/*
* device state during Power Management
@@ -113,12 +117,10 @@ typedef struct nv_i2c_adapter_entry_s
#define NV_INIT_FLAG_GPU_STATE_LOAD 0x0008
#define NV_INIT_FLAG_FIFO_WATCHDOG 0x0010
#define NV_INIT_FLAG_CORE_LOGIC 0x0020
#define NV_INIT_FLAG_HIRES 0x0040
#define NV_INIT_FLAG_DISP_STATE_SAVED 0x0080
#define NV_INIT_FLAG_GPUMGR_ATTACH 0x0100
#define NV_INIT_FLAG_PUBLIC_I2C 0x0400
#define NV_INIT_FLAG_SCALABILITY 0x0800
#define NV_INIT_FLAG_DMA 0x1000
#define NV_INIT_FLAG_GPUMGR_ATTACH 0x0040
#define NV_INIT_FLAG_PUBLIC_I2C 0x0080
#define NV_INIT_FLAG_SCALABILITY 0x0100
#define NV_INIT_FLAG_DMA 0x0200
#define MAX_I2C_ADAPTERS NV402C_CTRL_NUM_I2C_PORTS
@@ -298,6 +300,12 @@ typedef struct nv_dynamic_power_s
*/
NvBool b_fine_not_supported;
/*
* This flag is used to check if a workitem is queued for
* RmQueueIdleSustainedWorkitem().
*/
NvBool b_idle_sustained_workitem_queued;
/*
* Counter to track clients disallowing GCOFF.
*/
@@ -321,14 +329,10 @@ typedef struct
NvU32 pmc_boot_0;
nv_vga_t vga;
nv_efi_t efi;
NvU8 scr_vga_active[OBJ_MAX_HEADS];
NvU8 scr_dcb_index_lo[OBJ_MAX_HEADS];
NvU8 scr_dcb_index_hi[OBJ_MAX_HEADS];
NvU8 font_bitplanes[NV_NUM_VGA_BIT_PLANES][NV_BIT_PLANE_SIZE];
NvU32 flags;
NvU32 status;

View File

@@ -40,6 +40,7 @@
#include <nvstatus.h>
#include "nv_stdarg.h"
#include <nv-caps.h>
#include <nv-firmware.h>
#include <nv-ioctl.h>
#include <nvmisc.h>
@@ -160,8 +161,14 @@ typedef enum _TEGRASOC_WHICH_CLK
TEGRASOC_WHICH_CLK_MAUD,
TEGRASOC_WHICH_CLK_AZA_2XBIT,
TEGRASOC_WHICH_CLK_AZA_BIT,
TEGRA234_CLK_MIPI_CAL,
TEGRA234_CLK_UART_FST_MIPI_CAL,
TEGRASOC_WHICH_CLK_MIPI_CAL,
TEGRASOC_WHICH_CLK_UART_FST_MIPI_CAL,
TEGRASOC_WHICH_CLK_SOR0_DIV,
TEGRASOC_WHICH_CLK_DISP_ROOT,
TEGRASOC_WHICH_CLK_HUB_ROOT,
TEGRASOC_WHICH_CLK_PLLA_DISP,
TEGRASOC_WHICH_CLK_PLLA_DISPHUB,
TEGRASOC_WHICH_CLK_PLLA,
TEGRASOC_WHICH_CLK_MAX, // TEGRASOC_WHICH_CLK_MAX is defined for boundary checks only.
} TEGRASOC_WHICH_CLK;
@@ -304,7 +311,7 @@ typedef struct nv_alloc_mapping_context_s {
typedef enum
{
NV_SOC_IRQ_DISPLAY_TYPE,
NV_SOC_IRQ_DISPLAY_TYPE = 0x1,
NV_SOC_IRQ_DPAUX_TYPE,
NV_SOC_IRQ_GPIO_TYPE,
NV_SOC_IRQ_HDACODEC_TYPE,
@@ -368,6 +375,7 @@ typedef struct nv_state_t
nv_aperture_t *mipical_regs;
nv_aperture_t *fb, ud;
nv_aperture_t *simregs;
nv_aperture_t *emc_regs;
NvU32 num_dpaux_instance;
NvU32 interrupt_line;
@@ -430,9 +438,6 @@ typedef struct nv_state_t
/* Variable to force allocation of 32-bit addressable memory */
NvBool force_dma32_alloc;
/* Variable to track if device has entered dynamic power state */
NvBool dynamic_power_entered;
/* PCI power state should be D0 during system suspend */
NvBool d0_state_in_suspend;
@@ -465,6 +470,9 @@ typedef struct nv_state_t
/* Check if NVPCF DSM function is implemented under NVPCF or GPU device scope */
NvBool nvpcf_dsm_in_gpu_scope;
/* Bool to check if the device received a shutdown notification */
NvBool is_shutdown;
} nv_state_t;
// These define need to be in sync with defines in system.h
@@ -473,6 +481,10 @@ typedef struct nv_state_t
#define OS_TYPE_SUNOS 0x3
#define OS_TYPE_VMWARE 0x4
#define NVFP_TYPE_NONE 0x0
#define NVFP_TYPE_REFCOUNTED 0x1
#define NVFP_TYPE_REGISTERED 0x2
struct nv_file_private_t
{
NvHandle *handles;
@@ -482,6 +494,7 @@ struct nv_file_private_t
nv_file_private_t *ctl_nvfp;
void *ctl_nvfp_priv;
NvU32 register_or_refcount;
};
// Forward define the gpu ops structures
@@ -513,8 +526,9 @@ typedef struct UvmGpuChannelResourceBindParams_tag *nvgpuChannelResourceBindPar
typedef struct UvmGpuPagingChannelAllocParams_tag nvgpuPagingChannelAllocParams_t;
typedef struct UvmGpuPagingChannel_tag *nvgpuPagingChannelHandle_t;
typedef struct UvmGpuPagingChannelInfo_tag *nvgpuPagingChannelInfo_t;
typedef NV_STATUS (*nvPmaEvictPagesCallback)(void *, NvU32, NvU64 *, NvU32, NvU64, NvU64);
typedef NV_STATUS (*nvPmaEvictRangeCallback)(void *, NvU64, NvU64);
typedef enum UvmPmaGpuMemoryType_tag nvgpuGpuMemoryType_t;
typedef NV_STATUS (*nvPmaEvictPagesCallback)(void *, NvU32, NvU64 *, NvU32, NvU64, NvU64, nvgpuGpuMemoryType_t);
typedef NV_STATUS (*nvPmaEvictRangeCallback)(void *, NvU64, NvU64, nvgpuGpuMemoryType_t);
/*
* flags
@@ -566,12 +580,6 @@ typedef enum
NV_POWER_STATE_RUNNING
} nv_power_state_t;
typedef enum
{
NV_FIRMWARE_GSP,
NV_FIRMWARE_GSP_LOG
} nv_firmware_t;
#define NV_PRIMARY_VGA(nv) ((nv)->primary_vga)
#define NV_IS_CTL_DEVICE(nv) ((nv)->flags & NV_FLAG_CONTROL)
@@ -587,12 +595,6 @@ typedef enum
#define NV_SOC_IS_ISO_IOMMU_PRESENT(nv) \
((nv)->iso_iommu_present)
/*
* NVIDIA ACPI event ID to be passed into the core NVIDIA driver for
* AC/DC event.
*/
#define NV_SYSTEM_ACPI_BATTERY_POWER_EVENT 0x8002
/*
* GPU add/remove events
*/
@@ -604,8 +606,6 @@ typedef enum
* to core NVIDIA driver for ACPI events.
*/
#define NV_SYSTEM_ACPI_EVENT_VALUE_DISPLAY_SWITCH_DEFAULT 0
#define NV_SYSTEM_ACPI_EVENT_VALUE_POWER_EVENT_AC 0
#define NV_SYSTEM_ACPI_EVENT_VALUE_POWER_EVENT_BATTERY 1
#define NV_SYSTEM_ACPI_EVENT_VALUE_DOCK_EVENT_UNDOCKED 0
#define NV_SYSTEM_ACPI_EVENT_VALUE_DOCK_EVENT_DOCKED 1
@@ -616,14 +616,18 @@ typedef enum
#define NV_EVAL_ACPI_METHOD_NVIF 0x01
#define NV_EVAL_ACPI_METHOD_WMMX 0x02
#define NV_I2C_CMD_READ 1
#define NV_I2C_CMD_WRITE 2
#define NV_I2C_CMD_SMBUS_READ 3
#define NV_I2C_CMD_SMBUS_WRITE 4
#define NV_I2C_CMD_SMBUS_QUICK_WRITE 5
#define NV_I2C_CMD_SMBUS_QUICK_READ 6
#define NV_I2C_CMD_SMBUS_BLOCK_READ 7
#define NV_I2C_CMD_SMBUS_BLOCK_WRITE 8
typedef enum {
NV_I2C_CMD_READ = 1,
NV_I2C_CMD_WRITE,
NV_I2C_CMD_SMBUS_READ,
NV_I2C_CMD_SMBUS_WRITE,
NV_I2C_CMD_SMBUS_QUICK_WRITE,
NV_I2C_CMD_SMBUS_QUICK_READ,
NV_I2C_CMD_SMBUS_BLOCK_READ,
NV_I2C_CMD_SMBUS_BLOCK_WRITE,
NV_I2C_CMD_BLOCK_READ,
NV_I2C_CMD_BLOCK_WRITE
} nv_i2c_cmd_t;
// Flags needed by OSAllocPagesNode
#define NV_ALLOC_PAGES_NODE_NONE 0x0
@@ -636,27 +640,33 @@ typedef enum
#define NV_GET_NV_STATE(pGpu) \
(nv_state_t *)((pGpu) ? (pGpu)->pOsGpuInfo : NULL)
#define IS_REG_OFFSET(nv, offset, length) \
(((offset) >= (nv)->regs->cpu_address) && \
(((offset) + ((length)-1)) <= \
(nv)->regs->cpu_address + ((nv)->regs->size-1)))
static inline NvBool IS_REG_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
{
return ((offset >= nv->regs->cpu_address) &&
((offset + (length - 1)) <= (nv->regs->cpu_address + (nv->regs->size - 1))));
}
#define IS_FB_OFFSET(nv, offset, length) \
(((nv)->fb) && ((offset) >= (nv)->fb->cpu_address) && \
(((offset) + ((length)-1)) <= (nv)->fb->cpu_address + ((nv)->fb->size-1)))
static inline NvBool IS_FB_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
{
return ((nv->fb) && (offset >= nv->fb->cpu_address) &&
((offset + (length - 1)) <= (nv->fb->cpu_address + (nv->fb->size - 1))));
}
#define IS_UD_OFFSET(nv, offset, length) \
(((nv)->ud.cpu_address != 0) && ((nv)->ud.size != 0) && \
((offset) >= (nv)->ud.cpu_address) && \
(((offset) + ((length)-1)) <= (nv)->ud.cpu_address + ((nv)->ud.size-1)))
static inline NvBool IS_UD_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
{
return ((nv->ud.cpu_address != 0) && (nv->ud.size != 0) &&
(offset >= nv->ud.cpu_address) &&
((offset + (length - 1)) <= (nv->ud.cpu_address + (nv->ud.size - 1))));
}
#define IS_IMEM_OFFSET(nv, offset, length) \
(((nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address != 0) && \
((nv)->bars[NV_GPU_BAR_INDEX_IMEM].size != 0) && \
((offset) >= (nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address) && \
(((offset) + ((length) - 1)) <= \
(nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + \
((nv)->bars[NV_GPU_BAR_INDEX_IMEM].size - 1)))
static inline NvBool IS_IMEM_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
{
return ((nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address != 0) &&
(nv->bars[NV_GPU_BAR_INDEX_IMEM].size != 0) &&
(offset >= nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address) &&
((offset + (length - 1)) <= (nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address +
(nv->bars[NV_GPU_BAR_INDEX_IMEM].size - 1))));
}
#define NV_RM_MAX_MSIX_LINES 8
@@ -787,7 +797,7 @@ NV_STATUS NV_API_CALL nv_pci_trigger_recovery (nv_state_t *);
NvBool NV_API_CALL nv_requires_dma_remap (nv_state_t *);
NvBool NV_API_CALL nv_is_rm_firmware_active(nv_state_t *);
const void*NV_API_CALL nv_get_firmware(nv_state_t *, nv_firmware_t, const void **, NvU32 *);
const void*NV_API_CALL nv_get_firmware(nv_state_t *, nv_firmware_type_t, nv_firmware_chip_family_t, const void **, NvU32 *);
void NV_API_CALL nv_put_firmware(const void *);
nv_file_private_t* NV_API_CALL nv_get_file_private(NvS32, NvBool, void **);
@@ -828,6 +838,7 @@ NV_STATUS NV_API_CALL nv_acquire_fabric_mgmt_cap (int, int*);
int NV_API_CALL nv_cap_drv_init(void);
void NV_API_CALL nv_cap_drv_exit(void);
NvBool NV_API_CALL nv_is_gpu_accessible(nv_state_t *);
NvBool NV_API_CALL nv_match_gpu_os_info(nv_state_t *, void *);
NvU32 NV_API_CALL nv_get_os_type(void);
@@ -916,11 +927,11 @@ NvBool NV_API_CALL rm_is_supported_pci_device(NvU8 pci_class,
void NV_API_CALL rm_i2c_remove_adapters (nvidia_stack_t *, nv_state_t *);
NvBool NV_API_CALL rm_i2c_is_smbus_capable (nvidia_stack_t *, nv_state_t *, void *);
NV_STATUS NV_API_CALL rm_i2c_transfer (nvidia_stack_t *, nv_state_t *, void *, NvU8, NvU8, NvU8, NvU32, NvU8 *);
NV_STATUS NV_API_CALL rm_i2c_transfer (nvidia_stack_t *, nv_state_t *, void *, nv_i2c_cmd_t, NvU8, NvU8, NvU32, NvU8 *);
NV_STATUS NV_API_CALL rm_perform_version_check (nvidia_stack_t *, void *, NvU32);
NV_STATUS NV_API_CALL rm_system_event (nvidia_stack_t *, NvU32, NvU32);
void NV_API_CALL rm_power_source_change_event (nvidia_stack_t *, NvU32);
void NV_API_CALL rm_disable_gpu_state_persistence (nvidia_stack_t *sp, nv_state_t *);
NV_STATUS NV_API_CALL rm_p2p_init_mapping (nvidia_stack_t *, NvU64, NvU64 *, NvU64 *, NvU64 *, NvU64 *, NvU64, NvU64, NvU64, NvU64, void (*)(void *), void *);
@@ -944,6 +955,7 @@ void NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd);
NvBool NV_API_CALL rm_get_device_remove_flag(nvidia_stack_t *sp, NvU32 gpu_id);
NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *);
NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults_unlocked(nvidia_stack_t *, nv_state_t *, NvU32 *);
NV_STATUS NV_API_CALL rm_gpu_handle_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *);
NvBool NV_API_CALL rm_gpu_need_4k_page_isolation(nv_state_t *);
NvBool NV_API_CALL rm_is_chipset_io_coherent(nv_stack_t *);
NvBool NV_API_CALL rm_init_event_locks(nvidia_stack_t *, nv_state_t *);
@@ -969,12 +981,13 @@ const char* NV_API_CALL rm_get_dynamic_power_management_status(nvidia_stack_t *,
const char* NV_API_CALL rm_get_gpu_gcx_support(nvidia_stack_t *, nv_state_t *, NvBool);
void NV_API_CALL rm_acpi_notify(nvidia_stack_t *, nv_state_t *, NvU32);
NV_STATUS NV_API_CALL rm_get_clientnvpcf_power_limits(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *);
NvBool NV_API_CALL rm_is_altstack_in_use(void);
/* vGPU VFIO specific functions */
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32, NvBool *);
NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16);
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 **, NvBool);
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool);
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8);
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU32, void *);
NV_STATUS NV_API_CALL nv_vgpu_start(nvidia_stack_t *, const NvU8 *, void *, NvS32 *, NvU8 *, NvU32);
@@ -998,6 +1011,16 @@ static inline const NvU8 *nv_get_cached_uuid(nv_state_t *nv)
return nv->nv_uuid_cache.valid ? nv->nv_uuid_cache.uuid : NULL;
}
/* nano second resolution timer callback structure */
typedef struct nv_nano_timer nv_nano_timer_t;
/* nano timer functions */
void NV_API_CALL nv_create_nano_timer(nv_state_t *, void *pTmrEvent, nv_nano_timer_t **);
void NV_API_CALL nv_start_nano_timer(nv_state_t *nv, nv_nano_timer_t *, NvU64 timens);
NV_STATUS NV_API_CALL rm_run_nano_timer_callback(nvidia_stack_t *, nv_state_t *, void *pTmrEvent);
void NV_API_CALL nv_cancel_nano_timer(nv_state_t *, nv_nano_timer_t *);
void NV_API_CALL nv_destroy_nano_timer(nv_state_t *nv, nv_nano_timer_t *);
#if defined(NVCPU_X86_64)
static inline NvU64 nv_rdtsc(void)

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -143,6 +143,14 @@ void NV_API_CALL os_free_semaphore (void *);
NV_STATUS NV_API_CALL os_acquire_semaphore (void *);
NV_STATUS NV_API_CALL os_cond_acquire_semaphore (void *);
NV_STATUS NV_API_CALL os_release_semaphore (void *);
void* NV_API_CALL os_alloc_rwlock (void);
void NV_API_CALL os_free_rwlock (void *);
NV_STATUS NV_API_CALL os_acquire_rwlock_read (void *);
NV_STATUS NV_API_CALL os_acquire_rwlock_write (void *);
NV_STATUS NV_API_CALL os_cond_acquire_rwlock_read (void *);
NV_STATUS NV_API_CALL os_cond_acquire_rwlock_write(void *);
void NV_API_CALL os_release_rwlock_read (void *);
void NV_API_CALL os_release_rwlock_write (void *);
NvBool NV_API_CALL os_semaphore_may_sleep (void);
NV_STATUS NV_API_CALL os_get_version_info (os_version_info*);
NvBool NV_API_CALL os_is_isr (void);

View File

@@ -128,7 +128,7 @@ void RmI2cAddGpuPorts(nv_state_t *);
NV_STATUS RmInitX86EmuState(OBJGPU *);
void RmFreeX86EmuState(OBJGPU *);
NV_STATUS RmSystemEvent(nv_state_t *, NvU32, NvU32);
NV_STATUS RmPowerSourceChangeEvent(nv_state_t *, NvU32);
const NvU8 *RmGetGpuUuidRaw(nv_state_t *);
@@ -144,7 +144,7 @@ NV_STATUS rm_free_os_event (NvHandle, NvU32);
NV_STATUS rm_get_event_data (nv_file_private_t *, NvP64, NvU32 *);
void rm_client_free_os_events (NvHandle);
NV_STATUS rm_create_mmap_context (nv_state_t *, NvHandle, NvHandle, NvHandle, NvP64, NvU64, NvU64, NvU32, NvU32);
NV_STATUS rm_create_mmap_context (NvHandle, NvHandle, NvHandle, NvP64, NvU64, NvU64, NvU32, NvU32);
NV_STATUS rm_update_device_mapping_info (NvHandle, NvHandle, NvHandle, void *, void *);
NV_STATUS rm_access_registry (NvHandle, NvHandle, NvU32, NvP64, NvU32, NvP64, NvU32, NvP64, NvU32 *, NvU32 *, NvU32 *);

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -46,6 +46,8 @@
#include <class/cl003e.h> // NV01_MEMORY_SYSTEM
#include <class/cl0071.h> // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR
#include <ctrl/ctrl00fd.h>
#define NV_CTL_DEVICE_ONLY(nv) \
{ \
if (((nv)->flags & NV_FLAG_CONTROL) == 0) \
@@ -64,6 +66,56 @@
} \
}
static NvBool RmIsDeviceRefNeeded(NVOS54_PARAMETERS *pApi)
{
switch(pApi->cmd)
{
case NV00FD_CTRL_CMD_ATTACH_MEM:
return NV_TRUE;
default:
return NV_FALSE;
}
}
static NV_STATUS RmGetDeviceFd(NVOS54_PARAMETERS *pApi, NvS32 *pFd)
{
RMAPI_PARAM_COPY paramCopy;
void *pKernelParams;
NvU32 paramSize;
NV_STATUS status;
*pFd = -1;
switch(pApi->cmd)
{
case NV00FD_CTRL_CMD_ATTACH_MEM:
paramSize = sizeof(NV00FD_CTRL_ATTACH_MEM_PARAMS);
break;
default:
return NV_ERR_INVALID_ARGUMENT;
}
RMAPI_PARAM_COPY_INIT(paramCopy, pKernelParams, pApi->params, paramSize, 1);
status = rmapiParamsAcquire(&paramCopy, NV_TRUE);
if (status != NV_OK)
return status;
switch(pApi->cmd)
{
case NV00FD_CTRL_CMD_ATTACH_MEM:
*pFd = (NvS32)((NV00FD_CTRL_ATTACH_MEM_PARAMS *)pKernelParams)->devDescriptor;
break;
default:
NV_ASSERT(0);
break;
}
NV_ASSERT(rmapiParamsRelease(&paramCopy) == NV_OK);
return status;
}
// Only return errors through pApi->status
static void RmCreateOsDescriptor(NVOS32_PARAMETERS *pApi, API_SECURITY_INFO secInfo)
{
@@ -91,6 +143,7 @@ static void RmCreateOsDescriptor(NVOS32_PARAMETERS *pApi, API_SECURITY_INFO secI
pageCount = (1 + ((allocSize - 1) / os_page_size));
writable = FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_WRITE, pApi->data.AllocOsDesc.attr2);
flags = FLD_SET_DRF_NUM(_LOCK_USER_PAGES, _FLAGS, _WRITE, writable, flags);
rmStatus = os_lock_user_pages(pDescriptor, pageCount, &pPageArray, flags);
if (rmStatus == NV_OK)
@@ -243,6 +296,7 @@ NV_STATUS RmIoctl(
secInfo.privLevel = osIsAdministrator() ? RS_PRIV_LEVEL_USER_ROOT : RS_PRIV_LEVEL_USER;
secInfo.paramLocation = PARAM_LOCATION_USER;
secInfo.pProcessToken = NULL;
secInfo.gpuOsInfo = NULL;
secInfo.clientOSInfo = nvfp->ctl_nvfp;
if (secInfo.clientOSInfo == NULL)
secInfo.clientOSInfo = nvfp;
@@ -282,7 +336,7 @@ NV_STATUS RmIoctl(
(!FLD_TEST_DRF(OS02, _FLAGS, _MAPPING, _NO_MAP, flags)) &&
(pParms->status == NV_OK))
{
if (rm_create_mmap_context(nv, pParms->hRoot,
if (rm_create_mmap_context(pParms->hRoot,
pParms->hObjectParent, pParms->hObjectNew,
pParms->pMemory, pParms->limit + 1, 0,
NV_MEMORY_DEFAULT,
@@ -464,7 +518,7 @@ NV_STATUS RmIoctl(
if (pParms->status == NV_OK)
{
pParms->status = rm_create_mmap_context(nv, pParms->hClient,
pParms->status = rm_create_mmap_context(pParms->hClient,
pParms->hDevice, pParms->hMemory,
pParms->pLinearAddress, pParms->length,
pParms->offset,
@@ -700,6 +754,9 @@ NV_STATUS RmIoctl(
case NV_ESC_RM_CONTROL:
{
NVOS54_PARAMETERS *pApi = data;
void *priv = NULL;
nv_file_private_t *dev_nvfp = NULL;
NvS32 fd;
NV_CTL_DEVICE_ONLY(nv);
@@ -709,7 +766,51 @@ NV_STATUS RmIoctl(
goto done;
}
if (RmIsDeviceRefNeeded(pApi))
{
rmStatus = RmGetDeviceFd(pApi, &fd);
if (rmStatus != NV_OK)
{
goto done;
}
dev_nvfp = nv_get_file_private(fd, NV_FALSE, &priv);
if (dev_nvfp == NULL)
{
rmStatus = NV_ERR_INVALID_DEVICE;
goto done;
}
// Check to avoid cyclic dependency with NV_ESC_REGISTER_FD
if (!portAtomicCompareAndSwapU32(&dev_nvfp->register_or_refcount,
NVFP_TYPE_REFCOUNTED,
NVFP_TYPE_NONE))
{
// Is this already refcounted...
if (dev_nvfp->register_or_refcount != NVFP_TYPE_REFCOUNTED)
{
nv_put_file_private(priv);
rmStatus = NV_ERR_IN_USE;
goto done;
}
}
secInfo.gpuOsInfo = priv;
}
Nv04ControlWithSecInfo(pApi, secInfo);
if ((pApi->status != NV_OK) && (priv != NULL))
{
//
// No need to reset `register_or_refcount` as it might be set
// for previous successful calls. We let it clear with FD close.
//
nv_put_file_private(priv);
secInfo.gpuOsInfo = NULL;
}
break;
}
@@ -751,7 +852,7 @@ NV_STATUS RmIoctl(
}
// LOCK: acquire API lock
rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI);
rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI);
if (rmStatus != NV_OK)
goto done;
@@ -759,7 +860,7 @@ NV_STATUS RmIoctl(
if (nvfp->ctl_nvfp != NULL)
{
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
rmStatus = NV_ERR_INVALID_STATE;
goto done;
}
@@ -776,7 +877,7 @@ NV_STATUS RmIoctl(
if (ctl_nvfp == NULL)
{
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
@@ -787,11 +888,23 @@ NV_STATUS RmIoctl(
{
nv_put_file_private(priv);
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
// Check to avoid cyclic dependency with device refcounting
if (!portAtomicCompareAndSwapU32(&nvfp->register_or_refcount,
NVFP_TYPE_REGISTERED,
NVFP_TYPE_NONE))
{
nv_put_file_private(priv);
// UNLOCK: release API lock
rmapiLockRelease();
rmStatus = NV_ERR_IN_USE;
goto done;
}
//
// nvfp->ctl_nvfp is read outside the lock, so set it atomically.
// Note that once set, this can never be removed until the fd
@@ -803,7 +916,7 @@ NV_STATUS RmIoctl(
nvfp->ctl_nvfp_priv = priv;
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
// NOTE: nv_put_file_private(priv) is not called here. It MUST be
// called during cleanup of this nvfp.

View File

@@ -207,16 +207,6 @@ os_unref_dynamic_power
{
}
NV_STATUS NV_API_CALL rm_get_clientnvpcf_power_limits(
nvidia_stack_t *sp,
nv_state_t *nv,
NvU32 *limitRated,
NvU32 *limitCurr
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS
deviceCtrlCmdOsUnixVTSwitch_IMPL
(
@@ -235,24 +225,6 @@ NV_STATUS NV_API_CALL rm_save_low_res_mode(
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults(
nvidia_stack_t *sp,
nv_state_t *nv,
NvU32 *faultsCopied
)
{
return NV_OK;
}
NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults_unlocked(
nvidia_stack_t *sp,
nv_state_t *nv,
NvU32 *faultsCopied
)
{
return NV_OK;
}
NV_STATUS RmInitX86EmuState(OBJGPU *pGpu)
{
return NV_OK;

View File

@@ -1,151 +0,0 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvstatus.h"
#include "os/os.h"
#include "nv.h"
#include "nv-hypervisor.h"
HYPERVISOR_TYPE NV_API_CALL nv_get_hypervisor_type(void)
{
return OS_HYPERVISOR_UNKNOWN;
}
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(
nvidia_stack_t *sp,
nv_state_t *pNv,
NvU32 *numVgpuTypes,
NvU32 **vgpuTypeIds,
NvBool isVirtfn
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(
nvidia_stack_t *sp,
nv_state_t *pNv,
NvU8 cmd,
NvU32 domain,
NvU8 bus,
NvU8 slot,
NvU8 function,
NvBool isMdevAttached,
void *vf_pci_info
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(
nvidia_stack_t *sp,
nv_state_t *pNv,
NvU32 vgpuTypeId,
char *buffer,
int type_info,
NvU8 devfn
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_create_request(
nvidia_stack_t *sp,
nv_state_t *pNv,
const NvU8 *pMdevUuid,
NvU32 vgpuTypeId,
NvU16 *vgpuId,
NvU32 gpuPciBdf,
NvBool *is_driver_vm
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_update_request(
nvidia_stack_t *sp ,
const NvU8 *pMdevUuid,
VGPU_DEVICE_STATE deviceState,
NvU64 *offsets,
NvU64 *sizes,
const char *configParams
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_get_sparse_mmap(
nvidia_stack_t *sp ,
nv_state_t *pNv,
const NvU8 *pMdevUuid,
NvU64 **offsets,
NvU64 **sizes,
NvU32 *numAreas
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_gpu_bind_event(
nvidia_stack_t *sp
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_start(
nvidia_stack_t *sp,
const NvU8 *pMdevUuid,
void *waitQueue,
NvS32 *returnStatus,
NvU8 *vmName,
NvU32 qemuPid
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_delete(
nvidia_stack_t *sp,
const NvU8 *pMdevUuid,
NvU16 vgpuId
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(
nvidia_stack_t *sp,
nv_state_t *pNv,
const NvU8 *pMdevUuid,
NvU64 *size,
NvU32 regionIndex,
void *pVgpuVfioRef
)
{
return NV_ERR_NOT_SUPPORTED;
}
void initVGXSpecificRegistry(OBJGPU *pGpu)
{}

File diff suppressed because it is too large Load Diff

View File

@@ -59,6 +59,12 @@
#include "mem_mgr/mem.h"
#include "gpu/mem_mgr/virt_mem_allocator_common.h"
#include <acpidsmguids.h>
#include <pex.h>
#include "gps.h"
#include "jt.h"
extern const char *ppOsBugCheckBugcodeStr[];
@@ -716,6 +722,7 @@ NvU32 osReleaseRmSema(void *pSema, OBJGPU *pDpcGpu)
void osSpinLoop(void)
{
// Enable this code to get debug prints from Libos.
}
NvU64 osGetMaxUserVa(void)
@@ -931,6 +938,11 @@ NV_STATUS osAllocPagesInternal(
nv->force_dma32_alloc = NV_FALSE;
}
if (status != NV_OK)
{
return status;
}
//
// If the OS layer doesn't think in RM page size, we need to inflate the
// PTE array into RM pages.
@@ -1597,6 +1609,7 @@ void osDevWriteReg032(
{
NvBool vgpuHandled = NV_FALSE;
vgpuDevWriteReg032(pGpu, thisAddress, thisValue, &vgpuHandled);
if (vgpuHandled)
{
return;
@@ -1656,6 +1669,7 @@ NvU32 osDevReadReg032(
NvU32 retval = 0;
NvBool vgpuHandled = NV_FALSE;
retval = vgpuDevReadReg032(pGpu, thisAddress, &vgpuHandled);
if (vgpuHandled)
{
return retval;
@@ -1782,6 +1796,16 @@ void osGetTimeoutParams(OBJGPU *pGpu, NvU32 *pTimeoutUs, NvU32 *pScale, NvU32 *p
NV_ASSERT((NV_GPU_MODE_GRAPHICS_MODE == gpuMode) ||
(NV_GPU_MODE_COMPUTE_MODE == gpuMode));
if (hypervisorIsVgxHyper())
{
//
// 1.8 seconds is chosen because it is 90% of the overall hard limit of 2.0
// seconds, imposed by WDDM driver rules.
// Currently primary use case of VGX is Windows, so setting 1.8 as default
//
*pTimeoutUs = 1.8 * 1000000;
}
else
{
switch (gpuMode)
{
@@ -1803,7 +1827,6 @@ void osGetTimeoutParams(OBJGPU *pGpu, NvU32 *pTimeoutUs, NvU32 *pScale, NvU32 *p
{
*pScale = 60; // 1s -> 1m
}
return;
}
@@ -1940,22 +1963,28 @@ _initializeExportObjectFd
NV_STATUS status;
RsResourceRef *pResourceRef;
Device *pDevice;
NvU32 deviceInstance = NV_MAX_DEVICES;
if (nvfp->handles != NULL)
{
return NV_ERR_STATE_IN_USE;
}
status = serverutilGetResourceRef(hClient, hDevice, &pResourceRef);
if (status != NV_OK)
if (hDevice != 0)
{
return status;
}
status = serverutilGetResourceRef(hClient, hDevice, &pResourceRef);
if (status != NV_OK)
{
return status;
}
pDevice = dynamicCast(pResourceRef->pResource, Device);
if (pDevice == NULL)
{
return NV_ERR_INVALID_PARAMETER;
pDevice = dynamicCast(pResourceRef->pResource, Device);
if (pDevice == NULL)
{
return NV_ERR_INVALID_PARAMETER;
}
deviceInstance = pDevice->deviceInst;
}
NV_ASSERT_OK_OR_RETURN(os_alloc_mem((void **)&nvfp->handles,
@@ -1965,7 +1994,7 @@ _initializeExportObjectFd
sizeof(nvfp->handles[0]) * maxObjects);
nvfp->maxHandles = maxObjects;
nvfp->deviceInstance = pDevice->deviceInst;
nvfp->deviceInstance = deviceInstance;
if (metadata != NULL)
{
@@ -2431,7 +2460,115 @@ NV_STATUS osCallACPI_DSM
NvU16 *pSize
)
{
return NV_ERR_NOT_SUPPORTED;
NV_STATUS status;
NvU8 *pAcpiDsmGuid = NULL;
NvU32 acpiDsmRev;
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
NvU16 acpiDsmInArgSize = 4;
NvBool acpiNvpcfDsmFunction = NV_FALSE;
// do any handling/remapping of guid needed.
status = checkDsmCall(pGpu,
(ACPI_DSM_FUNCTION *) &acpiDsmFunction,
&acpiDsmSubFunction,
pInOut,
pSize);
// return if subfunction is not supported or we're returning cache data
if (status != NV_WARN_MORE_PROCESSING_REQUIRED)
{
return status;
}
switch ((NvU32) acpiDsmFunction)
{
case ACPI_DSM_FUNCTION_NBSI:
pAcpiDsmGuid = (NvU8 *) &NBSI_DSM_GUID;
acpiDsmRev = NBSI_REVISION_ID;
break;
case ACPI_DSM_FUNCTION_NVHG:
pAcpiDsmGuid = (NvU8 *) &NVHG_DSM_GUID;
acpiDsmRev = NVHG_REVISION_ID;
break;
case ACPI_DSM_FUNCTION_MXM:
pAcpiDsmGuid = (NvU8 *) &DSM_MXM_GUID;
acpiDsmRev = ACPI_MXM_REVISION_ID;
break;
case ACPI_DSM_FUNCTION_NBCI:
pAcpiDsmGuid = (NvU8 *) &NBCI_DSM_GUID;
acpiDsmRev = NBCI_REVISION_ID;
break;
case ACPI_DSM_FUNCTION_NVOP:
pAcpiDsmGuid = (NvU8 *) &NVOP_DSM_GUID;
acpiDsmRev = NVOP_REVISION_ID;
break;
case ACPI_DSM_FUNCTION_PCFG:
pAcpiDsmGuid = (NvU8 *) &PCFG_DSM_GUID;
acpiDsmRev = PCFG_REVISION_ID;
break;
case ACPI_DSM_FUNCTION_PEX:
pAcpiDsmGuid = (NvU8 *) &PEX_DSM_GUID;
acpiDsmRev = PEX_REVISION_ID;
if (acpiDsmSubFunction == PEX_FUNC_SETLTRLATENCY)
{
acpiDsmInArgSize = (3 + *pSize);
}
break;
case (ACPI_DSM_FUNCTION_JT):
pAcpiDsmGuid = (NvU8 *) &JT_DSM_GUID;
acpiDsmRev = JT_REVISION_ID;
break;
case ACPI_DSM_FUNCTION_NVPCF:
{
pAcpiDsmGuid = (NvU8 *)&NVPCF_ACPI_DSM_GUID;
acpiDsmRev = NVPCF_ACPI_DSM_REVISION_ID;
acpiDsmInArgSize = (*pSize);
acpiNvpcfDsmFunction = NV_TRUE;
break;
}
case ACPI_DSM_FUNCTION_NVPCF_2X:
pAcpiDsmGuid = (NvU8 *)&NVPCF_ACPI_DSM_GUID;
acpiDsmRev = NVPCF_2X_ACPI_DSM_REVISION_ID;
acpiDsmInArgSize = (*pSize);
if (!nv->nvpcf_dsm_in_gpu_scope)
{
acpiNvpcfDsmFunction = NV_TRUE;
}
break;
default:
return NV_ERR_NOT_SUPPORTED;
break;
}
status = nv_acpi_dsm_method(nv,
pAcpiDsmGuid,
acpiDsmRev,
acpiNvpcfDsmFunction,
acpiDsmSubFunction,
pInOut,
acpiDsmInArgSize,
NULL,
pInOut,
pSize);
if (status == NV_OK)
{
if (acpiDsmSubFunction == NV_ACPI_ALL_FUNC_SUPPORT)
{
// if handling get supported functions list... cache it for later calls
cacheDsmSupportedFunction(pGpu, acpiDsmFunction, acpiDsmSubFunction, pInOut, *pSize);
}
}
else if (nvp->b_mobile_config_enabled)
{
NV_PRINTF(LEVEL_ERROR,
"osCallACPI_DSM: Error during 0x%x DSM subfunction 0x%x! status=0x%x\n",
acpiDsmFunction, acpiDsmSubFunction, status);
}
return status;
}
NV_STATUS osCallACPI_DOD
@@ -4744,6 +4881,35 @@ osTegraSocGetImpImportData
return NV_ERR_NOT_SUPPORTED;
}
/*!
* @brief Tells BPMP whether or not RFL is valid
*
* Display HW generates an ok_to_switch signal which asserts when mempool
* occupancy is high enough to be able to turn off memory long enough to
* execute a dramclk frequency switch without underflowing display output.
* ok_to_switch drives the RFL ("request for latency") signal in the memory
* unit, and the switch sequencer waits for this signal to go active before
* starting a dramclk switch. However, if the signal is not valid (e.g., if
* display HW or SW has not been initialized yet), the switch sequencer ignores
* the signal. This API tells BPMP whether or not the signal is valid.
*
* @param[in] pOsGpuInfo Per GPU Linux state
* @param[in] bEnable True if RFL will be valid; false if invalid
*
* @returns NV_OK if successful,
* NV_ERR_NOT_SUPPORTED if the functionality is not available, or
* NV_ERR_GENERIC if some other kind of error occurred.
*/
NV_STATUS
osTegraSocEnableDisableRfl
(
OS_GPU_INFO *pOsGpuInfo,
NvBool bEnable
)
{
return NV_ERR_NOT_SUPPORTED;
}
/*!
* @brief Allocates a specified amount of ISO memory bandwidth for display
*
@@ -4793,7 +4959,8 @@ osCreateNanoTimer
void **pTimer
)
{
return NV_ERR_NOT_SUPPORTED;
nv_create_nano_timer(pOsGpuInfo, pTmrEvent, (nv_nano_timer_t **)pTimer);
return NV_OK;
}
/*!
@@ -4811,7 +4978,8 @@ osStartNanoTimer
NvU64 timeNs
)
{
return NV_ERR_NOT_SUPPORTED;
nv_start_nano_timer(pOsGpuInfo, (nv_nano_timer_t *)pTimer, timeNs);
return NV_OK;
}
/*!
@@ -4827,8 +4995,8 @@ osCancelNanoTimer
void *pTimer
)
{
return NV_ERR_NOT_SUPPORTED;
nv_cancel_nano_timer(pOsGpuInfo, (nv_nano_timer_t *)pTimer);
return NV_OK;
}
/*!
@@ -4845,7 +5013,8 @@ osDestroyNanoTimer
void *pTimer
)
{
return NV_ERR_NOT_SUPPORTED;
nv_destroy_nano_timer(pOsGpuInfo, (nv_nano_timer_t *)pTimer);
return NV_OK;
}
/*!
@@ -5016,6 +5185,52 @@ osIsGpuAccessible
return nv_is_gpu_accessible(NV_GET_NV_STATE(pGpu));
}
/*!
* @brief Check whether GPU has received a shutdown notification from the OS
*/
NvBool
osIsGpuShutdown
(
OBJGPU *pGpu
)
{
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
return nv ? nv->is_shutdown : NV_TRUE;
}
/*!
* @brief Check GPU OS info matches
*
* @param[in] pGpu GPU object pointer
*
* @returns NVBool, Returns TRUE if matched.
*/
NvBool
osMatchGpuOsInfo
(
OBJGPU *pGpu,
void *pOsInfo
)
{
return nv_match_gpu_os_info(NV_GET_NV_STATE(pGpu), pOsInfo);
}
/*!
* @brief Release GPU OS info.
*
* @param[in] pOsInfo GPU OS info pointer
*
* @returns void
*/
void
osReleaseGpuOsInfo
(
void *pOsInfo
)
{
nv_put_file_private(pOsInfo);
}
NvBool
osDmabufIsSupported(void)
{

View File

@@ -105,7 +105,7 @@ RM_API *RmUnixRmApiPrologue(nv_state_t *pNv, THREAD_STATE_NODE *pThreadNode, NvU
{
threadStateInit(pThreadNode, THREAD_STATE_FLAGS_NONE);
if ((rmApiLockAcquire(API_LOCK_FLAGS_NONE, module)) == NV_OK)
if ((rmapiLockAcquire(API_LOCK_FLAGS_NONE, module)) == NV_OK)
{
if ((pNv->rmapi.hClient != 0) &&
(os_ref_dynamic_power(pNv, NV_DYNAMIC_PM_FINE) == NV_OK))
@@ -113,7 +113,7 @@ RM_API *RmUnixRmApiPrologue(nv_state_t *pNv, THREAD_STATE_NODE *pThreadNode, NvU
return rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
}
rmApiLockRelease();
rmapiLockRelease();
}
threadStateFree(pThreadNode, THREAD_STATE_FLAGS_NONE);
@@ -132,7 +132,7 @@ RM_API *RmUnixRmApiPrologue(nv_state_t *pNv, THREAD_STATE_NODE *pThreadNode, NvU
void RmUnixRmApiEpilogue(nv_state_t *pNv, THREAD_STATE_NODE *pThreadNode)
{
os_unref_dynamic_power(pNv, NV_DYNAMIC_PM_FINE);
rmApiLockRelease();
rmapiLockRelease();
threadStateFree(pThreadNode, THREAD_STATE_FLAGS_NONE);
}
@@ -209,9 +209,9 @@ const NvU8 * RmGetGpuUuidRaw(
gidFlags = DRF_DEF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_TYPE,_SHA1)
| DRF_DEF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_FORMAT,_BINARY);
if (!rmApiLockIsOwner())
if (!rmapiLockIsOwner())
{
rmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU);
rmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU);
if (rmStatus != NV_OK)
{
return NULL;
@@ -224,7 +224,7 @@ const NvU8 * RmGetGpuUuidRaw(
{
if (isApiLockTaken == NV_TRUE)
{
rmApiLockRelease();
rmapiLockRelease();
}
return NULL;
@@ -233,7 +233,7 @@ const NvU8 * RmGetGpuUuidRaw(
rmStatus = gpuGetGidInfo(pGpu, NULL, NULL, gidFlags);
if (isApiLockTaken == NV_TRUE)
{
rmApiLockRelease();
rmapiLockRelease();
}
if (rmStatus != NV_OK)
@@ -780,10 +780,8 @@ static NV_STATUS RmAccessRegistry(
RmStatus = NV_ERR_INVALID_STRING_LENGTH;
goto done;
}
// get access to client's parmStr
RMAPI_PARAM_COPY_INIT(parmStrParamCopy, tmpParmStr, clientParmStrAddress, ParmStrLength, 1);
parmStrParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER;
RmStatus = rmapiParamsAcquire(&parmStrParamCopy, NV_TRUE);
if (RmStatus != NV_OK)
{
@@ -1100,66 +1098,55 @@ static NV_STATUS RmPerformVersionCheck(
return NV_ERR_GENERIC;
}
NV_STATUS RmSystemEvent(
//
// Check if the NVPCF _DSM functions are implemented under
// NVPCF scope or GPU device scope.
// As part of RM initialisation this function checks the
// support of NVPCF _DSM function implementation under
// NVPCF scope, in case that fails, clear the cached DSM
// support status and retry the NVPCF _DSM function under
// GPU scope.
//
static void RmCheckNvpcfDsmScope(
OBJGPU *pGpu
)
{
NvU32 supportedFuncs;
NvU16 dsmDataSize = sizeof(supportedFuncs);
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
ACPI_DSM_FUNCTION acpiDsmFunction = ACPI_DSM_FUNCTION_NVPCF_2X;
NvU32 acpiDsmSubFunction = NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_SUPPORTED;
nv->nvpcf_dsm_in_gpu_scope = NV_FALSE;
if ((osCallACPI_DSM(pGpu, acpiDsmFunction, acpiDsmSubFunction,
&supportedFuncs, &dsmDataSize) != NV_OK) ||
(FLD_TEST_DRF(PCF0100, _CTRL_CONFIG_DSM,
_FUNC_GET_SUPPORTED_IS_SUPPORTED, _NO, supportedFuncs)) ||
(dsmDataSize != sizeof(supportedFuncs)))
{
nv->nvpcf_dsm_in_gpu_scope = NV_TRUE;
// clear cached DSM function status
uncacheDsmFuncStatus(pGpu, acpiDsmFunction, acpiDsmSubFunction);
}
}
NV_STATUS RmPowerSourceChangeEvent(
nv_state_t *pNv,
NvU32 event_type,
NvU32 event_val
)
{
NV_STATUS rmStatus = NV_OK;
NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS params;
NV2080_CTRL_PERF_SET_POWERSTATE_PARAMS params = {0};
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
switch (event_type)
{
case NV_SYSTEM_ACPI_BATTERY_POWER_EVENT:
{
Nv2080PowerEventNotification powerParams;
portMemSet(&powerParams, 0, sizeof(powerParams));
powerParams.bSwitchToAC = NV_TRUE;
powerParams.bGPUCapabilityChanged = NV_FALSE;
powerParams.displayMaskAffected = 0;
params.powerStateInfo.powerState = event_val ? NV2080_CTRL_PERF_POWER_SOURCE_BATTERY :
NV2080_CTRL_PERF_POWER_SOURCE_AC;
params.eventType = NV0000_CTRL_SYSTEM_EVENT_TYPE_POWER_SOURCE;
if (event_val == NV_SYSTEM_ACPI_EVENT_VALUE_POWER_EVENT_BATTERY)
{
params.eventData = NV0000_CTRL_SYSTEM_EVENT_DATA_POWER_BATTERY;
powerParams.bSwitchToAC = NV_FALSE;
}
else if (event_val == NV_SYSTEM_ACPI_EVENT_VALUE_POWER_EVENT_AC)
{
params.eventData = NV0000_CTRL_SYSTEM_EVENT_DATA_POWER_AC;
powerParams.bSwitchToAC = NV_TRUE;
}
else
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
}
if (rmStatus == NV_OK)
{
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv);
rmStatus = pRmApi->Control(pRmApi,
pNv->rmapi.hClient,
pNv->rmapi.hClient,
NV0000_CTRL_CMD_SYSTEM_NOTIFY_EVENT,
(void *)&params,
sizeof(NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS));
//
// TODO: bug 2812848 Investigate if we can use system event
// or if we can broadcast NV2080_NOTIFIERS_POWER_EVENT for all GPUs
//
gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_POWER_EVENT,
&powerParams, sizeof(powerParams), 0, 0);
}
break;
}
default:
rmStatus = NV_ERR_INVALID_ARGUMENT;
}
return rmStatus;
return pRmApi->Control(pRmApi, pNv->rmapi.hClient,
pNv->rmapi.hSubDevice,
NV2080_CTRL_CMD_PERF_SET_POWERSTATE,
&params, sizeof(params));
}
/*!
@@ -1175,6 +1162,51 @@ static void RmHandleDNotifierEvent(
NvU32 event_type
)
{
NV2080_CTRL_PERF_SET_AUX_POWER_STATE_PARAMS params = { 0 };
RM_API *pRmApi;
THREAD_STATE_NODE threadState;
NV_STATUS rmStatus = NV_OK;
switch (event_type)
{
case ACPI_NOTIFY_POWER_LEVEL_D1:
params.powerState = NV2080_CTRL_PERF_AUX_POWER_STATE_P0;
break;
case ACPI_NOTIFY_POWER_LEVEL_D2:
params.powerState = NV2080_CTRL_PERF_AUX_POWER_STATE_P1;
break;
case ACPI_NOTIFY_POWER_LEVEL_D3:
params.powerState = NV2080_CTRL_PERF_AUX_POWER_STATE_P2;
break;
case ACPI_NOTIFY_POWER_LEVEL_D4:
params.powerState = NV2080_CTRL_PERF_AUX_POWER_STATE_P3;
break;
case ACPI_NOTIFY_POWER_LEVEL_D5:
params.powerState = NV2080_CTRL_PERF_AUX_POWER_STATE_P4;
break;
default:
return;
}
pRmApi = RmUnixRmApiPrologue(pNv, &threadState, RM_LOCK_MODULES_ACPI);
if (pRmApi == NULL)
{
return;
}
rmStatus = pRmApi->Control(pRmApi, pNv->rmapi.hClient,
pNv->rmapi.hSubDevice,
NV2080_CTRL_CMD_PERF_SET_AUX_POWER_STATE,
&params, sizeof(params));
RmUnixRmApiEpilogue(pNv, &threadState);
if (rmStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR,
"%s: Failed to handle ACPI D-Notifier event, status=0x%x\n",
__FUNCTION__, rmStatus);
}
}
static NV_STATUS
@@ -1372,7 +1404,7 @@ NvBool NV_API_CALL rm_init_adapter(
threadStateInit(&threadState, THREAD_STATE_FLAGS_DEVICE_INIT);
// LOCK: acquire API lock
if (rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT) == NV_OK)
if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT) == NV_OK)
{
if (!((gpumgrQueryGpuDrainState(pNv->gpu_id, &bEnabled, NULL) == NV_OK)
&& bEnabled))
@@ -1388,7 +1420,7 @@ NvBool NV_API_CALL rm_init_adapter(
}
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
@@ -1411,7 +1443,7 @@ void NV_API_CALL rm_disable_adapter(
NV_ASSERT_OK(os_flush_work_queue(pNv->queue));
// LOCK: acquire API lock
if (rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DESTROY) == NV_OK)
if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DESTROY) == NV_OK)
{
if (pNv->flags & NV_FLAG_PERSISTENT_SW_STATE)
{
@@ -1423,7 +1455,7 @@ void NV_API_CALL rm_disable_adapter(
}
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
NV_ASSERT_OK(os_flush_work_queue(pNv->queue));
@@ -1443,12 +1475,12 @@ void NV_API_CALL rm_shutdown_adapter(
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// LOCK: acquire API lock
if (rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DESTROY) == NV_OK)
if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DESTROY) == NV_OK)
{
RmShutdownAdapter(pNv);
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
@@ -1487,7 +1519,7 @@ NV_STATUS NV_API_CALL rm_acquire_api_lock(
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// LOCK: acquire API lock
rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI);
rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
@@ -1506,7 +1538,7 @@ NV_STATUS NV_API_CALL rm_release_api_lock(
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
@@ -1972,7 +2004,6 @@ done:
// TODO: Bug 1802250: [uvm8] Use an alt stack in all functions in unix/src/osapi.c
NV_STATUS rm_create_mmap_context(
nv_state_t *pNv,
NvHandle hClient,
NvHandle hDevice,
NvHandle hMemory,
@@ -1985,7 +2016,7 @@ NV_STATUS rm_create_mmap_context(
{
NV_STATUS rmStatus = NV_OK;
// LOCK: acquire API lock
if ((rmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_OSAPI)) == NV_OK)
if ((rmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_OSAPI)) == NV_OK)
{
RmClient *pClient;
@@ -2008,7 +2039,7 @@ NV_STATUS rm_create_mmap_context(
serverutilReleaseClient(LOCK_ACCESS_READ, pClient);
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
return rmStatus;
@@ -2029,6 +2060,7 @@ static NV_STATUS RmGetAllocPrivate(
PMEMORY_DESCRIPTOR pMemDesc;
NvU32 pageOffset;
NvU64 pageCount;
NvU64 endingOffset;
RsResourceRef *pResourceRef;
RmResource *pRmResource;
void *pMemData;
@@ -2040,7 +2072,7 @@ static NV_STATUS RmGetAllocPrivate(
pageOffset = (offset & ~os_page_mask);
offset &= os_page_mask;
NV_ASSERT_OR_RETURN(rmApiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE);
NV_ASSERT_OR_RETURN(rmapiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE);
if (NV_OK != serverutilAcquireClient(hClient, LOCK_ACCESS_READ, &pClient))
return NV_ERR_INVALID_CLIENT;
@@ -2089,8 +2121,9 @@ static NV_STATUS RmGetAllocPrivate(
if (rmStatus != NV_OK)
goto done;
pageCount = ((pageOffset + length) / os_page_size);
pageCount += (*pPageIndex + (((pageOffset + length) % os_page_size) ? 1 : 0));
endingOffset = pageOffset + length;
pageCount = (endingOffset / os_page_size);
pageCount += (*pPageIndex + ((endingOffset % os_page_size) ? 1 : 0));
if (pageCount > NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount))
{
@@ -2150,12 +2183,12 @@ NV_STATUS rm_get_adapter_status(
NV_STATUS rmStatus = NV_ERR_OPERATING_SYSTEM;
// LOCK: acquire API lock
if (rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_OSAPI) == NV_OK)
if (rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_OSAPI) == NV_OK)
{
rmStatus = RmGetAdapterStatus(pNv, pStatus);
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
return rmStatus;
@@ -2419,7 +2452,7 @@ void NV_API_CALL rm_cleanup_file_private(
return;
// LOCK: acquire API lock
if (rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI) == NV_OK)
if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI) == NV_OK)
{
// Unref any object which was exported on this file.
if (nvfp->handles != NULL)
@@ -2444,7 +2477,7 @@ void NV_API_CALL rm_cleanup_file_private(
RmFreeUnusedClients(pNv, nvfp);
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
rmapiEpilogue(pRmApi, &rmApiContext);
@@ -2472,12 +2505,12 @@ void NV_API_CALL rm_unbind_lock(
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// LOCK: acquire API lock
if (rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI) == NV_OK)
if (rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI) == NV_OK)
{
RmUnbindLock(pNv);
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
@@ -2493,12 +2526,12 @@ NV_STATUS rm_alloc_os_event(
NV_STATUS RmStatus;
// LOCK: acquire API lock
if ((RmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK)
if ((RmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK)
{
RmStatus = RmAllocOsEvent(hClient, nvfp, fd);
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
return RmStatus;
@@ -2512,12 +2545,12 @@ NV_STATUS rm_free_os_event(
NV_STATUS RmStatus;
// LOCK: acquire API lock
if ((RmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK)
if ((RmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK)
{
RmStatus = RmFreeOsEvent(hClient, fd);
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
return RmStatus;
@@ -2532,12 +2565,12 @@ NV_STATUS rm_get_event_data(
NV_STATUS RmStatus;
// LOCK: acquire API lock
if ((RmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK)
if ((RmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK)
{
RmStatus = RmGetEventData(nvfp, pEvent, MoreEvents, NV_TRUE);
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
return RmStatus;
@@ -2569,7 +2602,7 @@ NV_STATUS NV_API_CALL rm_read_registry_dword(
if (nv != NULL)
{
// LOCK: acquire API lock
if ((RmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_OSAPI)) != NV_OK)
if ((RmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_OSAPI)) != NV_OK)
{
NV_EXIT_RM_RUNTIME(sp,fp);
return RmStatus;
@@ -2586,7 +2619,7 @@ NV_STATUS NV_API_CALL rm_read_registry_dword(
if (isApiLockTaken == NV_TRUE)
{
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
NV_EXIT_RM_RUNTIME(sp,fp);
@@ -2610,7 +2643,7 @@ NV_STATUS NV_API_CALL rm_write_registry_dword(
if (nv != NULL)
{
// LOCK: acquire API lock
if ((RmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) != NV_OK)
if ((RmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) != NV_OK)
{
NV_EXIT_RM_RUNTIME(sp,fp);
return RmStatus;
@@ -2624,7 +2657,7 @@ NV_STATUS NV_API_CALL rm_write_registry_dword(
if (isApiLockTaken == NV_TRUE)
{
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
NV_EXIT_RM_RUNTIME(sp,fp);
@@ -2649,7 +2682,7 @@ NV_STATUS NV_API_CALL rm_write_registry_binary(
if (nv != NULL)
{
// LOCK: acquire API lock
if ((RmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) != NV_OK)
if ((RmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) != NV_OK)
{
NV_EXIT_RM_RUNTIME(sp,fp);
return RmStatus;
@@ -2663,7 +2696,7 @@ NV_STATUS NV_API_CALL rm_write_registry_binary(
if (isApiLockTaken == NV_TRUE)
{
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
NV_EXIT_RM_RUNTIME(sp,fp);
@@ -2688,7 +2721,7 @@ NV_STATUS NV_API_CALL rm_write_registry_string(
if (nv != NULL)
{
// LOCK: acquire API lock
if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) != NV_OK)
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) != NV_OK)
{
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
@@ -2702,7 +2735,7 @@ NV_STATUS NV_API_CALL rm_write_registry_string(
if (isApiLockTaken == NV_TRUE)
{
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
NV_EXIT_RM_RUNTIME(sp,fp);
@@ -2864,12 +2897,13 @@ static NV_STATUS RmRunNanoTimerCallback(
OBJSYS *pSys = SYS_GET_INSTANCE();
POBJTMR pTmr = GPU_GET_TIMER(pGpu);
THREAD_STATE_NODE threadState;
NV_STATUS status = NV_OK;
NV_STATUS status = NV_OK;
// LOCK: try to acquire GPUs lock
if ((status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_COND_ACQUIRE, RM_LOCK_MODULES_TMR)) != NV_OK)
if ((status = rmGpuLocksAcquire(GPU_LOCK_FLAGS_COND_ACQUIRE, RM_LOCK_MODULES_TMR)) != NV_OK)
{
return status;
PTMR_EVENT_PVT pEvent = (PTMR_EVENT_PVT) pTmrEvent;
// We failed to acquire the lock; schedule a timer to try again.
return osStartNanoTimer(pGpu->pOsGpuInfo, pEvent->super.pOSTmrCBdata, 1000);
}
if ((status = osCondAcquireRmSema(pSys->pSema)) != NV_OK)
@@ -3003,7 +3037,7 @@ NV_STATUS rm_access_registry(
(AccessType == NVOS38_ACCESS_TYPE_READ_BINARY);
// LOCK: acquire API lock
if ((RmStatus = rmApiLockAcquire(bReadOnly ? RMAPI_LOCK_FLAGS_READ : RMAPI_LOCK_FLAGS_NONE,
if ((RmStatus = rmapiLockAcquire(bReadOnly ? RMAPI_LOCK_FLAGS_READ : RMAPI_LOCK_FLAGS_NONE,
RM_LOCK_MODULES_OSAPI)) == NV_OK)
{
RmStatus = RmAccessRegistry(hClient,
@@ -3019,7 +3053,7 @@ NV_STATUS rm_access_registry(
Entry);
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
return RmStatus;
@@ -3036,7 +3070,7 @@ NV_STATUS rm_update_device_mapping_info(
NV_STATUS RmStatus;
// LOCK: acquire API lock
if ((RmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU)) == NV_OK)
if ((RmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU)) == NV_OK)
{
RmStatus = RmUpdateDeviceMappingInfo(hClient,
hDevice,
@@ -3045,7 +3079,7 @@ NV_STATUS rm_update_device_mapping_info(
pNewCpuAddress);
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
return RmStatus;
@@ -3060,7 +3094,6 @@ static void rm_is_device_rm_firmware_capable(
{
NvBool bIsFirmwareCapable = NV_FALSE;
NvBool bEnableByDefault = NV_FALSE;
NvU16 pciDeviceId = pNv->pci_info.device_id;
if (NV_IS_SOC_DISPLAY_DEVICE(pNv))
{
@@ -3068,7 +3101,7 @@ static void rm_is_device_rm_firmware_capable(
}
else
{
bIsFirmwareCapable = gpumgrIsDeviceRmFirmwareCapable(pciDeviceId,
bIsFirmwareCapable = gpumgrIsDeviceRmFirmwareCapable(pNv->pci_info.device_id,
pmcBoot42,
&bEnableByDefault);
}
@@ -3135,6 +3168,7 @@ NV_STATUS NV_API_CALL rm_is_supported_device(
rmStatus = NV_ERR_OPERATING_SYSTEM;
goto threadfree;
}
NvU32 pmc_boot_1 = NV_PRIV_REG_RD32(reg_mapping, NV_PMC_BOOT_1);
pmc_boot_0 = NV_PRIV_REG_RD32(reg_mapping, NV_PMC_BOOT_0);
pmc_boot_42 = NV_PRIV_REG_RD32(reg_mapping, NV_PMC_BOOT_42);
@@ -3195,6 +3229,10 @@ NV_STATUS NV_API_CALL rm_is_supported_device(
goto print_unsupported;
}
rmStatus = rm_is_vgpu_supported_device(pNv, pmc_boot_1);
if (rmStatus != NV_OK)
goto print_unsupported;
goto threadfree;
print_unsupported:
@@ -3331,7 +3369,7 @@ static NV_STATUS RmNonDPAuxI2CTransfer
(
nv_state_t *pNv,
NvU8 portId,
NvU8 type,
nv_i2c_cmd_t type,
NvU8 addr,
NvU8 command,
NvU32 len,
@@ -3411,6 +3449,17 @@ static NV_STATUS RmNonDPAuxI2CTransfer
params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW;
break;
case NV_I2C_CMD_BLOCK_WRITE:
params->transData.i2cBufferData.bWrite = NV_TRUE;
/* fall through */
case NV_I2C_CMD_BLOCK_READ:
params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW;
params->transData.i2cBufferData.registerAddress = command;
params->transData.i2cBufferData.messageLength = len;
params->transData.i2cBufferData.pMessage = pData;
break;
default:
portMemFree(params);
return NV_ERR_INVALID_ARGUMENT;
@@ -3447,7 +3496,7 @@ NV_STATUS NV_API_CALL rm_i2c_transfer(
nvidia_stack_t *sp,
nv_state_t *pNv,
void *pI2cAdapter,
NvU8 type,
nv_i2c_cmd_t type,
NvU8 addr,
NvU8 command,
NvU32 len,
@@ -3470,7 +3519,7 @@ NV_STATUS NV_API_CALL rm_i2c_transfer(
if (pNvp->flags & NV_INIT_FLAG_PUBLIC_I2C)
{
// LOCK: acquire API lock
if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_I2C)) != NV_OK)
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_I2C)) != NV_OK)
goto finish;
unlockApi = NV_TRUE;
@@ -3547,7 +3596,7 @@ finish:
if (unlockApi)
{
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
@@ -3803,7 +3852,7 @@ NvBool NV_API_CALL rm_i2c_is_smbus_capable(
if (pNvp->flags & NV_INIT_FLAG_PUBLIC_I2C)
{
// LOCK: acquire API lock
if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_I2C)) != NV_OK)
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_I2C)) != NV_OK)
goto semafinish;
unlock = NV_TRUE;
@@ -3842,7 +3891,7 @@ semafinish:
if (unlock)
{
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
@@ -3872,23 +3921,27 @@ NV_STATUS NV_API_CALL rm_perform_version_check(
return rmStatus;
}
NV_STATUS NV_API_CALL rm_system_event(
//
// Handles the Power Source Change event(AC/DC) for Notebooks.
// Notebooks from Maxwell have only one Gpu, so this functions grabs first Gpu
// from GpuMgr and call subdevice RmControl.
//
void NV_API_CALL rm_power_source_change_event(
nvidia_stack_t *sp,
NvU32 event_type,
NvU32 event_val
)
{
THREAD_STATE_NODE threadState;
NV_STATUS rmStatus;
void *fp;
void *fp;
nv_state_t *nv;
OBJGPU *pGpu = gpumgrGetGpu(0);// Grab the first GPU
OBJGPU *pGpu = gpumgrGetGpu(0);
NV_STATUS rmStatus = NV_OK;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// LOCK: acquire API lock
if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_EVENT)) == NV_OK)
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_EVENT)) == NV_OK)
{
if (pGpu != NULL)
{
@@ -3900,7 +3953,7 @@ NV_STATUS NV_API_CALL rm_system_event(
if ((rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_EVENT)) ==
NV_OK)
{
rmStatus = RmSystemEvent(nv, event_type, event_val);
rmStatus = RmPowerSourceChangeEvent(nv, event_val);
// UNLOCK: release GPU lock
rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
@@ -3908,14 +3961,19 @@ NV_STATUS NV_API_CALL rm_system_event(
os_unref_dynamic_power(nv, NV_DYNAMIC_PM_FINE);
}
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
}
if (rmStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR,
"%s: Failed to handle Power Source change event, status=0x%x\n",
__FUNCTION__, rmStatus);
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
NV_STATUS NV_API_CALL rm_p2p_dma_map_pages(
@@ -3942,7 +4000,7 @@ NV_STATUS NV_API_CALL rm_p2p_dma_map_pages(
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
{
OBJGPU *pGpu = gpumgrGetGpuFromUuid(pGpuUuid,
DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1) |
@@ -3984,7 +4042,7 @@ NV_STATUS NV_API_CALL rm_p2p_dma_map_pages(
}
}
rmApiLockRelease();
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
@@ -4009,7 +4067,7 @@ NV_STATUS NV_API_CALL rm_p2p_get_gpu_info(
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// LOCK: acquire API lock
rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P);
rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P);
if (rmStatus == NV_OK)
{
OBJGPU *pGpu;
@@ -4039,7 +4097,7 @@ NV_STATUS NV_API_CALL rm_p2p_get_gpu_info(
}
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
@@ -4067,7 +4125,7 @@ NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent(
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// LOCK: acquire API lock
if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
{
rmStatus = RmP2PGetPagesPersistent(gpuVirtualAddress,
length,
@@ -4077,7 +4135,7 @@ NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent(
pPlatformData,
pGpuInfo);
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
@@ -4108,7 +4166,7 @@ NV_STATUS NV_API_CALL rm_p2p_get_pages(
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// LOCK: acquire API lock
if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
{
OBJGPU *pGpu;
rmStatus = RmP2PGetPagesWithoutCallbackRegistration(p2pToken,
@@ -4143,7 +4201,7 @@ NV_STATUS NV_API_CALL rm_p2p_get_pages(
}
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
@@ -4170,13 +4228,13 @@ NV_STATUS NV_API_CALL rm_p2p_register_callback(
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// LOCK: acquire API lock
if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
{
rmStatus = RmP2PRegisterCallback(p2pToken, gpuVirtualAddress, length,
pPlatformData, pFreeCallback, pData);
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
@@ -4199,12 +4257,12 @@ NV_STATUS NV_API_CALL rm_p2p_put_pages_persistent(
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// LOCK: acquire API lock
if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
{
rmStatus = RmP2PPutPagesPersistent(p2pObject, pKey);
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
@@ -4229,7 +4287,7 @@ NV_STATUS NV_API_CALL rm_p2p_put_pages(
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// LOCK: acquire API lock
if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
{
rmStatus = RmP2PPutPages(p2pToken,
vaSpaceToken,
@@ -4237,7 +4295,7 @@ NV_STATUS NV_API_CALL rm_p2p_put_pages(
pKey);
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
@@ -4253,7 +4311,6 @@ char* NV_API_CALL rm_get_gpu_uuid(
{
NV_STATUS rmStatus;
const NvU8 *pGid;
OBJGPU *pGpu;
char *pGidString;
THREAD_STATE_NODE threadState;
@@ -4285,16 +4342,7 @@ char* NV_API_CALL rm_get_gpu_uuid(
}
else
{
const char *pTmpString;
// No raw GID, but we still return a string
pGpu = NV_GET_NV_PRIV_PGPU(nv);
if (rmStatus == NV_ERR_NOT_SUPPORTED && pGpu != NULL &&
pGpu->getProperty(pGpu, PDB_PROP_GPU_STATE_INITIALIZED))
pTmpString = "N/A";
else
pTmpString = "GPU-???????\?-???\?-???\?-???\?-????????????";
const char *pTmpString = "GPU-???????\?-???\?-???\?-???\?-????????????";
portStringCopy(pGidString, GPU_UUID_ASCII_LEN, pTmpString,
portStringLength(pTmpString) + 1);
@@ -4463,7 +4511,7 @@ NV_STATUS NV_API_CALL rm_log_gpu_crash(
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
if ((status = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DIAG)) == NV_OK)
if ((status = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DIAG)) == NV_OK)
{
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv);
@@ -4474,7 +4522,7 @@ NV_STATUS NV_API_CALL rm_log_gpu_crash(
rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
}
rmApiLockRelease();
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
@@ -4564,6 +4612,10 @@ void RmInitAcpiMethods(OBJOS *pOS, OBJSYS *pSys, OBJGPU *pGpu)
nv_acpi_methods_init(&handlesPresent);
// Check if NVPCF _DSM functions are implemented under NVPCF or GPU device scope.
RmCheckNvpcfDsmScope(pGpu);
acpiDsmInit(pGpu);
}
//
@@ -4817,7 +4869,7 @@ NvBool NV_API_CALL rm_is_device_sequestered(
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
if (rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU) == NV_OK)
if (rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU) == NV_OK)
{
//
// If gpumgrQueryGpuDrainState succeeds, bDrain will be set as needed.
@@ -4827,7 +4879,7 @@ NvBool NV_API_CALL rm_is_device_sequestered(
//
(void) gpumgrQueryGpuDrainState(pNv->gpu_id, &bDrain, NULL);
rmApiLockRelease();
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
@@ -4848,7 +4900,7 @@ void NV_API_CALL rm_check_for_gpu_surprise_removal(
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// LOCK: acquire API lock.
if ((rmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU)) == NV_OK)
if ((rmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU)) == NV_OK)
{
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv);
@@ -4859,7 +4911,7 @@ void NV_API_CALL rm_check_for_gpu_surprise_removal(
}
// UNLOCK: release api lock
rmApiLockRelease();
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
@@ -5011,7 +5063,7 @@ NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle(
pGpu = NV_GET_NV_PRIV_PGPU(nv);
NV_ASSERT(rmApiLockIsOwner());
NV_ASSERT(rmapiLockIsOwner());
NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu)));
@@ -5079,7 +5131,7 @@ void NV_API_CALL rm_dma_buf_undup_mem_handle(
pGpu = NV_GET_NV_PRIV_PGPU(nv);
NV_ASSERT(rmApiLockIsOwner());
NV_ASSERT(rmapiLockIsOwner());
NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu)));
@@ -5117,7 +5169,7 @@ NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle(
pGpu = NV_GET_NV_PRIV_PGPU(nv);
pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
NV_ASSERT(rmApiLockIsOwner());
NV_ASSERT(rmapiLockIsOwner());
NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu)));
@@ -5155,7 +5207,7 @@ NV_STATUS NV_API_CALL rm_dma_buf_unmap_mem_handle(
pGpu = NV_GET_NV_PRIV_PGPU(nv);
pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
NV_ASSERT(rmApiLockIsOwner());
NV_ASSERT(rmapiLockIsOwner());
NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu)));
@@ -5186,7 +5238,7 @@ NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// LOCK: acquire API lock
rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI);
rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI);
if (rmStatus == NV_OK)
{
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv);
@@ -5201,7 +5253,7 @@ NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(
}
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
@@ -5227,7 +5279,7 @@ void NV_API_CALL rm_dma_buf_put_client_and_device(
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// LOCK: acquire API lock
rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI);
rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI);
if (rmStatus == NV_OK)
{
OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv);
@@ -5242,10 +5294,24 @@ void NV_API_CALL rm_dma_buf_put_client_and_device(
}
// UNLOCK: release API lock
rmApiLockRelease();
rmapiLockRelease();
}
NV_ASSERT_OK(rmStatus);
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
}
//
// Fetches GSP ucode data for usage during RM Init
// NOTE: Used only on VMWware
//
NvBool NV_API_CALL rm_is_altstack_in_use(void)
{
#if defined(__use_altstack__)
return NV_TRUE;
#else
return NV_FALSE;
#endif
}

View File

@@ -55,12 +55,13 @@
#include <platform/chipset/chipset.h>
#include <kernel/gpu/rc/kernel_rc.h>
#include <kernel/gpu/fifo/kernel_fifo.h>
#include <nvSha256.h>
#include <nv-firmware-chip-family-select.h>
#include <gpu/gsp/kernel_gsp.h>
#include <logdecode.h>
#include "liblogdecode.h"
#include <gpu/fsp/kern_fsp.h>
#include <mem_mgr/virt_mem_mgr.h>
#include <virtualization/kernel_vgpu_mgr.h>
#include <rmosxfac.h>
#include <gpu_mgr/gpu_mgr.h>
@@ -72,6 +73,7 @@
#include <platform/chipset/pci_pbi.h>
#include "platform/nbsi/nbsi_read.h"
#include "gpu_mgr/gpu_db.h"
#include <class/cl0080.h>
#include <class/cl0073.h>
@@ -119,7 +121,6 @@ typedef enum
/* rm firmware errors */
RM_INIT_FIRMWARE_POLICY_FAILED = 0x60,
RM_INIT_FIRMWARE_FETCH_FAILED,
RM_INIT_FIRMWARE_VALIDATION_FAILED,
RM_INIT_FIRMWARE_INIT_FAILED,
RM_INIT_MAX_FAILURES
@@ -638,6 +639,15 @@ osInitNvMapping(
sysApplyLockingPolicy(pSys);
pGpu->busInfo.IntLine = nv->interrupt_line;
//
// Set the DMA address size as soon as we have the HAL to call to
// determine the precise number of physical address bits supported
// by the architecture. DMA allocations should not be made before
// this point.
//
nv_set_dma_address_size(nv, gpuGetPhysAddrWidth_HAL(pGpu, ADDR_SYSMEM));
pGpu->dmaStartAddress = (RmPhysAddr)nv_get_dma_start_address(nv);
if (nv->fb != NULL)
{
@@ -726,15 +736,6 @@ osTeardownScalability(
return clTeardownPcie(pGpu, pCl);
}
static inline void
RmSetDeviceDmaAddressSize(
nv_state_t *nv,
NvU8 numDmaAddressBits
)
{
nv_set_dma_address_size(nv, numDmaAddressBits);
}
static void
populateDeviceAttributes(
OBJGPU *pGpu,
@@ -884,8 +885,6 @@ RmInitNvDevice(
return;
}
RmSetDeviceDmaAddressSize(nv, gpuGetPhysAddrWidth_HAL(pGpu, ADDR_SYSMEM));
os_disable_console_access();
status->rmStatus = gpumgrStateInitGpu(pGpu);
@@ -1089,7 +1088,10 @@ RmSetupRegisters(
nv->fb->cpu_address, nv->fb->size);
}
nv_os_map_kernel_space(nv, nv->regs);
{
nv_os_map_kernel_space(nv, nv->regs);
}
if (nv->regs->map == NULL)
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to map regs registers!!\n");
@@ -1144,7 +1146,7 @@ NvBool RmInitPrivateState(
NV_SET_NV_PRIV(pNv, NULL);
if (!NV_IS_SOC_DISPLAY_DEVICE(pNv))
if (!NV_IS_SOC_DISPLAY_DEVICE(pNv) && !NV_IS_SOC_IGPU_DEVICE(pNv))
{
pNv->regs->map_u = os_map_kernel_space(pNv->regs->cpu_address,
os_page_size,
@@ -1185,11 +1187,13 @@ NvBool RmInitPrivateState(
pNv->iovaspace_id = nv_requires_dma_remap(pNv) ? gpuId :
NV_IOVA_DOMAIN_NONE;
kvgpumgrAttachGpu(pNv->gpu_id);
//
// Set up a reasonable default DMA address size, based on the minimum
// possible on currently supported GPUs.
//
RmSetDeviceDmaAddressSize(pNv, NV_GPU_MIN_SUPPORTED_DMA_ADDR_WIDTH);
nv_set_dma_address_size(pNv, NV_GPU_MIN_SUPPORTED_DMA_ADDR_WIDTH);
os_mem_set(nvp, 0, sizeof(*nvp));
nvp->status = NV_ERR_INVALID_STATE;
@@ -1269,6 +1273,8 @@ void RmFreePrivateState(
gpumgrUnregisterGpuId(pNv->gpu_id);
kvgpumgrDetachGpu(pNv->gpu_id);
RmDestroyRegistry(pNv);
if (nvp != NULL)
@@ -1448,78 +1454,6 @@ fail:
return NV_FALSE;
}
static NvBool verifyGspFirmware(
const void *pGspFwBuf,
NvU32 gspFwBufSize
)
{
//
// This array will be populated with a sha256 hash of the GSP-RM firmware
// binary in a post-compile step. We really want this array to be 'const',
// but adding that qualifier here makes the compiler perform undesirable
// optimization assuming the array is always going to be zero. The
// .gspfwhash-rodata section is marked readonly when it is populated with
// the real hash in lieu of 'const'.
//
static NvU8 __attribute__((section(".gspfwhash-rodata")))
expectedFwHash[NV_SHA256_DIGEST_SIZE] = {};
NvU32 i;
NvBool bHashCheck = NV_FALSE;
//
// To allow for simple incremental build workflow, we will only
// perform the firmware hash check if the expected hash has been
// embedded into the kernel binary.
//
for (i = 0; i < NV_SHA256_DIGEST_SIZE; i++)
{
if (expectedFwHash[i] != 0)
{
bHashCheck = NV_TRUE;
break;
}
}
if (bHashCheck)
{
NvU8 gspFwHash[NV_SHA256_DIGEST_SIZE];
nv_sha256(pGspFwBuf, gspFwBufSize, gspFwHash);
#define NvU64_BIG_ENDIAN(buf) \
((NvU64)(buf)[0] << 56) | ((NvU64)(buf)[1] << 48) | \
((NvU64)(buf)[2] << 40) | ((NvU64)(buf)[3] << 32) | \
((NvU64)(buf)[4] << 24) | ((NvU64)(buf)[5] << 16) | \
((NvU64)(buf)[6] << 8) | ((NvU64)(buf)[7] << 0)
if (portMemCmp(expectedFwHash, gspFwHash, NV_SHA256_DIGEST_SIZE) != 0)
{
NV_PRINTF(LEVEL_ERROR, "GSP firmware validation failed: hash mismatch\n");
NV_PRINTF(LEVEL_ERROR, "Expected GSP firmware hash: %016llx%016llx%016llx%016llx\n",
NvU64_BIG_ENDIAN(&expectedFwHash[0]), NvU64_BIG_ENDIAN(&expectedFwHash[8]),
NvU64_BIG_ENDIAN(&expectedFwHash[16]), NvU64_BIG_ENDIAN(&expectedFwHash[24]));
NV_PRINTF(LEVEL_ERROR, "Got GSP firmware hash: %016llx%016llx%016llx%016llx\n",
NvU64_BIG_ENDIAN(&gspFwHash[0]), NvU64_BIG_ENDIAN(&gspFwHash[8]),
NvU64_BIG_ENDIAN(&gspFwHash[16]), NvU64_BIG_ENDIAN(&gspFwHash[24]));
NV_PRINTF(LEVEL_ERROR, "The GSP firmware version must exactly match the RM (nv-kernel.o) build.\n");
NV_PRINTF(LEVEL_ERROR, "Most likely cause of this error is an out of band update to one of the components\n");
return NV_FALSE;
}
else
{
NV_PRINTF(LEVEL_NOTICE, "GSP firmware hash: %016llx%016llx%016llx%016llx\n",
NvU64_BIG_ENDIAN(&gspFwHash[0]), NvU64_BIG_ENDIAN(&gspFwHash[8]),
NvU64_BIG_ENDIAN(&gspFwHash[16]), NvU64_BIG_ENDIAN(&gspFwHash[24]));
}
}
else
{
NV_PRINTF(LEVEL_NOTICE, "GSP firmware hash not found.\n");
}
return NV_TRUE;
}
NvBool RmInitAdapter(
nv_state_t *nv
)
@@ -1583,9 +1517,16 @@ NvBool RmInitAdapter(
//
if (nv->request_firmware)
{
RmSetDeviceDmaAddressSize(nv, NV_GSP_GPU_MIN_SUPPORTED_DMA_ADDR_WIDTH);
NvU32 gpuArch = (DRF_VAL(_PMC, _BOOT_42, _ARCHITECTURE, nvp->pmc_boot_42) <<
GPU_ARCH_SHIFT);
NvU32 gpuImpl = DRF_VAL(_PMC, _BOOT_42, _IMPLEMENTATION, nvp->pmc_boot_42);
gspFwHandle = nv_get_firmware(nv, NV_FIRMWARE_GSP,
nv_firmware_chip_family_t chipFamily = nv_firmware_get_chip_family(gpuArch, gpuImpl);
nv_set_dma_address_size(nv, NV_GSP_GPU_MIN_SUPPORTED_DMA_ADDR_WIDTH);
gspFwHandle = nv_get_firmware(nv, NV_FIRMWARE_TYPE_GSP,
chipFamily,
&gspFw.pBuf,
&gspFw.size);
if (gspFwHandle == NULL &&
@@ -1596,21 +1537,16 @@ NvBool RmInitAdapter(
}
else if (gspFwHandle != NULL)
{
if (!verifyGspFirmware(gspFw.pBuf, gspFw.size))
{
RM_SET_ERROR(status, RM_INIT_FIRMWARE_VALIDATION_FAILED);
goto shutdown;
}
#if LIBOS_LOG_DECODE_ENABLE
if (nv->enable_firmware_logs)
{
gspFwLogHandle = nv_get_firmware(nv, NV_FIRMWARE_GSP_LOG,
gspFwLogHandle = nv_get_firmware(nv, NV_FIRMWARE_TYPE_GSP_LOG,
chipFamily,
&gspFw.pLogElf,
&gspFw.logElfSize);
if (gspFwLogHandle == NULL)
{
NV_PRINTF(LEVEL_ERROR, "Failed to load gsp_log.bin, no GSP-RM logs will be printed (non-fatal)\n");
NV_PRINTF(LEVEL_ERROR, "Failed to load gsp_log_*.bin, no GSP-RM logs will be printed (non-fatal)\n");
}
}
#endif
@@ -1669,6 +1605,8 @@ NvBool RmInitAdapter(
RmSetConsolePreservationParams(pGpu);
RmInitAcpiMethods(pOS, pSys, pGpu);
//
// If GSP fw RM support is enabled then start the GSP microcode
// (including the task running the full instance of the RM) and
@@ -1710,17 +1648,11 @@ NvBool RmInitAdapter(
populateDeviceAttributes(pGpu, nv);
RmInitAcpiMethods(pOS, pSys, pGpu);
status.rmStatus = RmInitX86Emu(pGpu);
if (status.rmStatus != NV_OK)
{
RM_SET_ERROR(status, RM_INIT_VBIOS_X86EMU_FAILED);
NV_PRINTF(LEVEL_ERROR,
"RmInitX86Emu failed, bailing out of RmInitAdapter\n");
goto shutdown;
}
initVendorSpecificRegistry(pGpu, nv->pci_info.device_id);
if (!IS_VIRTUAL(pGpu) && !IS_GSP_CLIENT(pGpu))
{
initNbsiTable(pGpu);
}
// finally, initialize the device
RmInitNvDevice(devicereference, &status);
@@ -1809,6 +1741,15 @@ NvBool RmInitAdapter(
goto shutdown;
}
status.rmStatus = RmInitX86Emu(pGpu);
if (status.rmStatus != NV_OK)
{
RM_SET_ERROR(status, RM_INIT_VBIOS_X86EMU_FAILED);
NV_PRINTF(LEVEL_ERROR,
"RmInitX86Emu failed, bailing out of RmInitAdapter\n");
goto shutdown;
}
// i2c only on master device??
RmI2cAddGpuPorts(nv);
nvp->flags |= NV_INIT_FLAG_PUBLIC_I2C;
@@ -1861,10 +1802,10 @@ NvBool RmInitAdapter(
if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_NONE) == NV_OK)
{
//
// As we have already acquired the API Lock here, we are
// calling RmSystemEvent directly instead of rm_system_event.
// As we have already acquired the API Lock here, we are calling
// RmPowerSourceChangeEvent directly instead of rm_power_source_change_event.
//
RmSystemEvent(nv, NV_SYSTEM_ACPI_BATTERY_POWER_EVENT, !ac_plugged);
RmPowerSourceChangeEvent(nv, !ac_plugged);
// UNLOCK: release GPU lock
rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
@@ -1878,7 +1819,8 @@ NvBool RmInitAdapter(
// OpenRM with a special registry key, if not on a Data Center GPU.
const GspStaticConfigInfo *pSCI = GPU_GET_GSP_STATIC_INFO(pGpu);
if (pSCI->computeBranding != COMPUTE_BRANDING_TYPE_TESLA)
if (pSCI->computeBranding != COMPUTE_BRANDING_TYPE_TESLA &&
((pGpu->idInfo.PCIDeviceID >> 16) & 0xffff) != NV_PCI_DEVID_DEVICE_PG189_SKU600)
{
NvU32 data = NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT;
RmReadRegistryDword(nv, NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS, &data);
@@ -1949,6 +1891,8 @@ void RmShutdownAdapter(
{
RmDestroyDeferredDynamicPowerManagement(nv);
freeNbsiTable(pGpu);
gpuFreeEventHandle(pGpu);
OBJCL *pCl = SYS_GET_CL(pSys);

View File

@@ -526,7 +526,9 @@ osCreateOsDescriptorFromPhysAddr
NvU64 base = 0;
NvU32 cache_type = NV_MEMORY_CACHED;
NvU64 memdescFlags = MEMDESC_FLAGS_NONE;
NvU64 *pPhys_addrs;
NvU64 num_os_pages;
NvU32 idx;
// Currently only work with contiguous sysmem allocations
if (!FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, flags))
{
@@ -565,21 +567,34 @@ osCreateOsDescriptorFromPhysAddr
pPteArray = memdescGetPteArray(pMemDesc, AT_CPU);
pPteArray[0] = base;
num_os_pages = NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount);
pPhys_addrs = portMemAllocNonPaged(sizeof(NvU64) * num_os_pages);
if (pPhys_addrs == NULL)
return NV_ERR_NO_MEMORY;
for (idx = 0; idx < num_os_pages; idx++)
{
pPhys_addrs[idx] = base + (idx * os_page_size);
}
*ppPrivate = NULL;
rmStatus = nv_register_phys_pages(NV_GET_NV_STATE(pGpu), pPteArray,
NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount),
rmStatus = nv_register_phys_pages(NV_GET_NV_STATE(pGpu), pPhys_addrs,
num_os_pages,
memdescGetCpuCacheAttrib(pMemDesc),
ppPrivate);
if (rmStatus != NV_OK)
{
memdescDestroy(pMemDesc);
return rmStatus;
goto cleanup;
}
memdescSetMemData(pMemDesc, *ppPrivate,
osDestroyOsDescriptorFromPhysAddr);
return NV_OK;
cleanup:
portMemFree(pPhys_addrs);
return rmStatus;
}
static NV_STATUS

View File

@@ -340,6 +340,7 @@ NV_STATUS RmExportObject(NvHandle hSrcClient, NvHandle hSrcObject,
RmObjExportHandle hDstObject;
NvU32 deviceInstance = NV_MAX_DEVICES;
NvHandle hTmpObject;
NvBool bClientAsDstParent = NV_FALSE;
NV_STATUS status;
RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL);
@@ -370,9 +371,10 @@ NV_STATUS RmExportObject(NvHandle hSrcClient, NvHandle hSrcObject,
hTmpObject = pResourceRef->pParentRef ? pResourceRef->pParentRef->hResource : 0;
} while (hTmpObject != 0);
// If a memory object is not parented by a device, use client as a parent.
if ((hTmpObject == 0) || (deviceInstance >= NV_MAX_DEVICES))
{
return NV_ERR_OBJECT_NOT_FOUND;
bClientAsDstParent = NV_TRUE;
}
status = RmRefObjExportImport();
@@ -382,9 +384,10 @@ NV_STATUS RmExportObject(NvHandle hSrcClient, NvHandle hSrcObject,
return status;
}
if (objExportDevice[deviceInstance].hRmDevice == 0 ||
serverutilValidateNewResourceHandle(hObjExportRmClient,
objExportDevice[deviceInstance].hRmDevice))
if (!bClientAsDstParent &&
((objExportDevice[deviceInstance].hRmDevice == 0) ||
serverutilValidateNewResourceHandle(hObjExportRmClient,
objExportDevice[deviceInstance].hRmDevice)))
{
//
// Device object has not been created or it got destroyed in the
@@ -465,6 +468,7 @@ NV_STATUS RmExportObject(NvHandle hSrcClient, NvHandle hSrcObject,
// If duping under device handle fails, try subdevice handle.
status = pRmApi->DupObject(pRmApi,
hObjExportRmClient,
bClientAsDstParent ? hObjExportRmClient :
objExportDevice[deviceInstance].hRmDevice,
&hDstObject,
hSrcClient,
@@ -472,7 +476,7 @@ NV_STATUS RmExportObject(NvHandle hSrcClient, NvHandle hSrcObject,
0 /* flags */);
if (status != NV_OK)
{
if (status == NV_ERR_INVALID_OBJECT_PARENT)
if (!bClientAsDstParent && (status == NV_ERR_INVALID_OBJECT_PARENT))
{
NV_PRINTF(LEVEL_INFO,
"pRmApi->DupObject(Dev, failed due to invalid parent in %s."
@@ -584,6 +588,12 @@ NV_STATUS RmImportObject(NvHandle hDstClient, NvHandle hDstParent,
case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC:
*pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_FABRIC;
break;
#if defined(NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC_MC) && \
defined(NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_FABRIC_MC)
case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC_MC:
*pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_FABRIC_MC;
break;
#endif
default:
NV_ASSERT_OK_OR_RETURN(NV_ERR_INVALID_ARGUMENT);
}

View File

@@ -25,17 +25,19 @@
#include <ctrl/ctrl0080/ctrl0080unix.h>
#include <gpu/device/device.h>
#include <gpu/gpu.h>
#include <gpu/mem_mgr/mem_mgr.h>
#include <gpu/mem_mgr/mem_desc.h>
#include <nv-priv.h>
#include <nv.h>
#include <osapi.h>
#include <gpu/mem_mgr/mem_mgr.h>
NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_IMPL(Device *pDevice,
NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pParams)
NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_IMPL
(
Device *pDevice,
NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pParams
)
{
OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice);
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
NvBool bContinue = NV_TRUE;
if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_FB) == NV_OK)
{
@@ -45,8 +47,9 @@ NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_IMPL(Device *pDevice,
SLI_LOOP_START(SLI_LOOP_FLAGS_NONE)
MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
if (memmgrGetReservedConsoleMemDesc(pGpu, pMemoryManager) != NULL)
if ((memmgrGetReservedConsoleMemDesc(pGpu, pMemoryManager) != NULL) && bContinue)
{
NvU64 baseAddr;
@@ -61,6 +64,11 @@ NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_IMPL(Device *pDevice,
&pParams->pitch,
nv->bars[NV_GPU_BAR_INDEX_FB].cpu_address,
nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000);
if (baseAddr != 0)
{
bContinue = NV_FALSE;
}
}
SLI_LOOP_END
@@ -74,3 +82,37 @@ NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_IMPL(Device *pDevice,
return NV_OK;
}
void
dispdeviceFillVgaSavedDisplayState
(
OBJGPU *pGpu,
NvU64 vgaAddr,
NvU8 vgaMemType,
NvBool vgaValid,
NvU64 workspaceAddr,
NvU8 workspaceMemType,
NvBool workspaceValid,
NvBool baseValid,
NvBool workspaceBaseValid
)
{
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
nv_priv_t *nvp = NV_GET_NV_PRIV(nv);
NvBool use_vbios = NV_PRIMARY_VGA(nv) && RmGpuHasIOSpaceEnabled(nv);
if (use_vbios)
{
nvp->vga.base.addr = vgaAddr;
nvp->vga.base.memTarget = vgaMemType;
nvp->vga.base.valid = vgaValid;
nvp->vga.baseValid = baseValid;
nvp->vga.workspaceBase.addr = workspaceAddr;
nvp->vga.workspaceBase.memTarget = workspaceMemType;
nvp->vga.workspaceBase.valid = workspaceValid;
nvp->vga.workspaceBaseValid = workspaceBaseValid;
}
}

View File

@@ -28,10 +28,13 @@
#include <core/locks.h>
#include <gpu/gpu.h>
#include "kernel/gpu/intr/intr.h"
#include <gpu/bif/kernel_bif.h>
#include "gpu/bif/kernel_bif.h"
#include "gpu/mmu/kern_gmmu.h"
#include "gpu/disp/kern_disp.h"
#include <nv_sriov_defines.h>
#include "objtmr.h"
static NvBool osInterruptPending(
OBJGPU *pGpu,
NvBool *serviced,
@@ -134,7 +137,7 @@ static NvBool osInterruptPending(
}
// LOCK: try to acquire GPUs lock
if (rmDeviceGpuLocksAcquire(pDeviceLockGpu, GPUS_LOCK_FLAGS_COND_ACQUIRE, RM_LOCK_MODULES_ISR) == NV_OK)
if (rmDeviceGpuLocksAcquire(pDeviceLockGpu, GPU_LOCK_FLAGS_COND_ACQUIRE, RM_LOCK_MODULES_ISR) == NV_OK)
{
threadStateInitISRAndDeferredIntHandler(&threadState,
pDeviceLockGpu, THREAD_STATE_FLAGS_IS_ISR);
@@ -569,3 +572,129 @@ void NV_API_CALL rm_isr_bh_unlocked(
NV_EXIT_RM_RUNTIME(sp,fp);
}
NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults(
nvidia_stack_t *sp,
nv_state_t *nv,
NvU32 *faultsCopied
)
{
NV_STATUS status = NV_OK;
OBJGPU *pGpu;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
pGpu = NV_GET_NV_PRIV_PGPU(nv);
if (pGpu == NULL || faultsCopied == NULL)
{
status = NV_ERR_OBJECT_NOT_FOUND;
goto done;
}
// Non-replayable faults are copied to the client shadow buffer by GSP-RM.
if (IS_GSP_CLIENT(pGpu))
{
status = NV_ERR_NOT_SUPPORTED;
goto done;
}
done:
NV_EXIT_RM_RUNTIME(sp,fp);
return status;
}
//
// Use this call when MMU faults needs to be copied
// outisde of RM lock.
//
NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults_unlocked(
nvidia_stack_t *sp,
nv_state_t *nv,
NvU32 *faultsCopied
)
{
OBJGPU *pGpu;
void *fp;
NV_STATUS status = NV_OK;
NV_ENTER_RM_RUNTIME(sp,fp);
pGpu = NV_GET_NV_PRIV_PGPU(nv);
if (pGpu == NULL || faultsCopied == NULL)
{
status = NV_ERR_OBJECT_NOT_FOUND;
goto done;
}
// Non-replayable faults are copied to the client shadow buffer by GSP-RM.
if (IS_GSP_CLIENT(pGpu))
{
status = NV_ERR_NOT_SUPPORTED;
goto done;
}
done:
NV_EXIT_RM_RUNTIME(sp,fp);
return status;
}
//
// Wrapper to handle calls to copy mmu faults
//
NV_STATUS rm_gpu_handle_mmu_faults(
nvidia_stack_t *sp,
nv_state_t *nv,
NvU32 *faultsCopied
)
{
NvU32 status = NV_OK;
*faultsCopied = 0;
OBJGPU *pGpu;
pGpu = NV_GET_NV_PRIV_PGPU(nv);
if (pGpu == NULL)
{
return NV_ERR_OBJECT_NOT_FOUND;
}
if (IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) && !IS_VIRTUAL(pGpu))
{
THREAD_STATE_NODE threadState;
KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu);
Intr *pIntr = GPU_GET_INTR(pGpu);
NvU32 hw_put = 0;
NvU32 hw_get = 0;
threadStateInitISRLockless(&threadState, pGpu, THREAD_STATE_FLAGS_IS_ISR_LOCKLESS);
kgmmuReadFaultBufferPutPtr_HAL(pGpu, pKernelGmmu, NON_REPLAYABLE_FAULT_BUFFER,
&hw_put, &threadState);
kgmmuReadFaultBufferGetPtr_HAL(pGpu, pKernelGmmu, NON_REPLAYABLE_FAULT_BUFFER,
&hw_get, &threadState);
if(hw_get != hw_put)
{
// We have to clear the top level interrupt bit here since otherwise
// the bottom half will attempt to service the interrupt on the CPU
// side before GSP recieves the notification and services it
kgmmuClearNonReplayableFaultIntr(pGpu, pKernelGmmu, &threadState);
status = intrTriggerPrivDoorbell_HAL(pGpu, pIntr, NV_DOORBELL_NOTIFY_LEAF_SERVICE_NON_REPLAYABLE_FAULT_HANDLE);
}
threadStateFreeISRLockless(&threadState, pGpu, THREAD_STATE_FLAGS_IS_ISR_LOCKLESS);
}
else
{
status = rm_gpu_copy_mmu_faults_unlocked(sp, nv, faultsCopied);
}
return status;
}