595.44.02

This commit is contained in:
Andy Ritger
2026-03-09 13:13:35 -07:00
parent 2ccbad25e1
commit 2c7bfb4706
1210 changed files with 436452 additions and 437093 deletions

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2018-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -28,7 +28,7 @@
*
* cpuopsys.h
*
* Copyright (c) 2001, Nvidia Corporation. All rights reserved.
* Copyright (c) 2001-2025, Nvidia Corporation. All rights reserved.
*/
/*!
@@ -134,7 +134,6 @@
#endif
/* ***** Processor type variations */
/* Note: The prefix NV_CPU_* is taken by Nvcm.h */
#if ((defined(_M_IX86) || defined(__i386__) || defined(__i386)) && !defined(NVCPU_X86)) /* XXX until removed from Makefiles */
/* _M_IX86 for windows, __i386__ for Linux (or any x86 using gcc) */

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -59,8 +59,9 @@
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_ENABLED 0x00000001
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DEFAULT 0x00000002
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_MASK 0x000000F0
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_ALLOW_FALLBACK 0x00000010
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_MASK 0x000000F0
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_ALLOW_FALLBACK 0x00000010
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_DEFAULT_ON_WS_SERVER 0x00000020
#define NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE 0x00000012

View File

@@ -33,6 +33,7 @@ typedef struct {
} pci_info;
NvBool needs_numa_setup;
NvBool is_soc_disp;
/*
* opaque OS-specific pointer; on Linux, this is a pointer to the

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,6 +26,10 @@
#define NV_IOCTL_NUMBERS_H
/* NOTE: using an ioctl() number > 55 will overflow! */
/*
* NOTE: Any new ioctls added here should also be added to nv_validate_ioctls() in nv.c or
* rm_validate_ioctls() in osapi.c
*/
#define NV_IOCTL_MAGIC 'F'
#define NV_IOCTL_BASE 200
#define NV_ESC_CARD_INFO (NV_IOCTL_BASE + 0)

View File

@@ -129,6 +129,7 @@
#include <asm/pgtable.h> /* pte bit definitions */
#include <asm/bitops.h> /* __set_bit() */
#include <linux/time.h> /* FD_SET() */
#include <linux/memremap.h>
/*
* Use current->cred->euid, instead of calling current_euid().
@@ -503,7 +504,7 @@ static inline pgprot_t nv_adjust_pgprot(pgprot_t vm_prot)
#define NV_HAVE_MEMORY_ENCRYPT_DECRYPT 0
#if defined(NVCPU_X86_64) && \
#if (defined(NVCPU_X86_64) || defined(NVCPU_AARCH64)) && \
NV_IS_EXPORT_SYMBOL_GPL_set_memory_encrypted && \
NV_IS_EXPORT_SYMBOL_GPL_set_memory_decrypted
#undef NV_HAVE_MEMORY_ENCRYPT_DECRYPT
@@ -553,7 +554,7 @@ static inline dma_addr_t nv_phys_to_dma(struct device *dev, NvU64 pa)
#endif
}
#define NV_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page))
#define NV_GET_PAGE_STRUCT(phys_page) pfn_to_page(phys_page >> PAGE_SHIFT)
#define NV_VMA_PGOFF(vma) ((vma)->vm_pgoff)
#define NV_VMA_SIZE(vma) ((vma)->vm_end - (vma)->vm_start)
#define NV_VMA_OFFSET(vma) (((NvU64)(vma)->vm_pgoff) << PAGE_SHIFT)
@@ -562,6 +563,12 @@ static inline dma_addr_t nv_phys_to_dma(struct device *dev, NvU64 pa)
#define NV_DEVICE_MINOR_NUMBER(x) minor((x)->i_rdev)
#if defined(NV_GET_DEV_PAGEMAP_HAS_PGMAP_ARG)
#define NV_GET_DEV_PAGEMAP(pfn) get_dev_pagemap(pfn, NULL)
#else
#define NV_GET_DEV_PAGEMAP get_dev_pagemap
#endif
#define NV_PCI_DISABLE_DEVICE(pci_dev) \
{ \
NvU16 __cmd[2]; \
@@ -671,18 +678,6 @@ static inline dma_addr_t nv_phys_to_dma(struct device *dev, NvU64 pa)
#define PCI_CAP_ID_EXP 0x10
#endif
/*
* If the host OS has page sizes larger than 4KB, we may have a security
* problem. Registers are typically grouped in 4KB pages, but if there are
* larger pages, then the smallest userspace mapping possible (e.g., a page)
* may give more access than intended to the user.
*/
#define NV_4K_PAGE_ISOLATION_REQUIRED(addr, size) \
((PAGE_SIZE > NV_RM_PAGE_SIZE) && \
((size) <= NV_RM_PAGE_SIZE) && \
(((addr) >> NV_RM_PAGE_SHIFT) == \
(((addr) + (size) - 1) >> NV_RM_PAGE_SHIFT)))
static inline int nv_remap_page_range(struct vm_area_struct *vma,
unsigned long virt_addr, NvU64 phys_addr, NvU64 size, pgprot_t prot)
{
@@ -1212,6 +1207,10 @@ typedef struct coherent_link_info_s {
* of virutalized OS environment it is Intermediate Physical Address(IPA) */
NvU64 gpu_mem_pa;
/* Size of the GPU memory mappable through coherent link. It is possible
that less than whole FB is mapped to CPU. */
NvU64 gpu_mem_size;
/* Physical address of the reserved portion of the GPU memory, applicable
* only in Grace Hopper self hosted passthrough virtualizatioan platform. */
NvU64 rsvd_mem_pa;
@@ -1374,6 +1373,13 @@ typedef struct nv_linux_state_s {
/* Lock serializing ISRs for different SOC vectors */
nv_spinlock_t soc_isr_lock;
/*
* Lock serializing access to the soc_isr_info struct across top and
* bottom halves for SOC vectors.
*/
nv_spinlock_t soc_isr_info_lock;
void *soc_bh_mutex;
struct nv_timer snapshot_timer;
@@ -1428,6 +1434,9 @@ typedef struct nv_linux_state_s {
int (*devfreq_enable_boost)(struct device *dev, unsigned int duration);
int (*devfreq_disable_boost)(struct device *dev);
#endif
/* Per-device GPU init on probe setting, initialized from global NVreg_GpuInitOnProbe */
NvBool init_on_probe;
} nv_linux_state_t;
extern nv_linux_state_t *nv_linux_devices;
@@ -1485,9 +1494,11 @@ typedef struct
nv_kthread_q_item_t deferred_close_q_item;
NvU32 *attached_gpus;
size_t num_attached_gpus;
nv_alloc_mapping_context_t mmap_context;
struct address_space mapping;
struct rw_semaphore fileVaLock;
nv_alloc_mapping_list_node_t *file_mapping_list;
nv_kthread_q_item_t open_q_item;
struct completion open_complete;
nv_linux_state_t *deferred_open_nvl;
@@ -1599,6 +1610,8 @@ extern NvU32 NVreg_RegisterPlatformDeviceDriver;
extern NvU32 NVreg_EnableResizableBar;
extern NvU32 NVreg_TegraGpuPgMask;
extern NvU32 NVreg_EnableNonblockingOpen;
extern NvU32 NVreg_UseKernelSuspendNotifiers;
extern NvU32 NVreg_GpuInitOnProbe;
extern NvU32 num_probed_nv_devices;
extern NvU32 num_nv_devices;

View File

@@ -114,8 +114,8 @@ typedef struct {
* specified GPU. This is equivalent to opening and closing a
* /dev/nvidiaN device file from user-space.
*/
int (*open_gpu)(NvU32 gpu_id, nvidia_modeset_stack_ptr sp);
void (*close_gpu)(NvU32 gpu_id, nvidia_modeset_stack_ptr sp);
int (*open_gpu)(NvU32 gpu_id, nvidia_modeset_stack_ptr sp, NvBool reset_aware);
void (*close_gpu)(NvU32 gpu_id, nvidia_modeset_stack_ptr sp, NvBool reset_aware);
void (*op)(nvidia_modeset_stack_ptr sp, void *ops_cmd);

View File

@@ -38,4 +38,5 @@ int nvidia_dev_get_pci_info(const NvU8 *, struct pci_dev **, NvU64 *, NvU64 *);
nv_linux_state_t * find_pci(NvU32, NvU8, NvU8, NvU8);
NvBool nv_pci_is_valid_topology_for_direct_pci(nv_state_t *, struct pci_dev *);
NvBool nv_pci_has_common_pci_switch(nv_state_t *nv, struct pci_dev *);
void nv_pci_tegra_boost_clocks(struct device *dev);
#endif

View File

@@ -45,4 +45,16 @@ void nv_soc_free_irqs(nv_state_t *nv);
#define NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE (NV_SUPPORTS_PLATFORM_DEVICE && NV_SUPPORTS_DCE_CLIENT_IPC)
#if defined(CONFIG_OF)
NV_STATUS nv_platform_get_screen_info_dt(
NvU64 *pPhysicalAddress,
NvU32 *pFbWidth,
NvU32 *pFbHeight,
NvU32 *pFbDepth,
NvU32 *pFbPitch,
NvU64 *pFbSize
);
#endif // CONFIG_OF
#endif

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -40,6 +40,8 @@ void nv_procfs_add_warning (const char *, const char *);
int nv_procfs_add_gpu (nv_linux_state_t *);
void nv_procfs_remove_gpu (nv_linux_state_t *);
extern nv_pm_action_depth_t nv_procfs_pm_action_depth;
int nvidia_mmap (struct file *, struct vm_area_struct *);
int nvidia_mmap_helper (nv_state_t *, nv_linux_file_private_t *, nvidia_stack_t *, struct vm_area_struct *, void *);
int nv_encode_caching (pgprot_t *, NvU32, nv_memory_type_t);
@@ -63,8 +65,8 @@ NV_STATUS nv_uvm_resume_P2P (const NvU8 *uuid);
/* Move these to nv.h once implemented by other UNIX platforms */
NvBool nvidia_get_gpuid_list (NvU32 *gpu_ids, NvU32 *gpu_count);
int nvidia_dev_get (NvU32, nvidia_stack_t *);
void nvidia_dev_put (NvU32, nvidia_stack_t *);
int nvidia_dev_get (NvU32, nvidia_stack_t *, NvBool reset_aware);
void nvidia_dev_put (NvU32, nvidia_stack_t *, NvBool reset_aware);
int nvidia_dev_get_uuid (const NvU8 *, nvidia_stack_t *);
void nvidia_dev_put_uuid (const NvU8 *, nvidia_stack_t *);
int nvidia_dev_block_gc6 (const NvU8 *, nvidia_stack_t *);
@@ -87,11 +89,15 @@ void nv_shutdown_adapter(nvidia_stack_t *, nv_state_t *, nv_linux_state
void nv_dev_free_stacks(nv_linux_state_t *);
NvBool nv_lock_init_locks(nvidia_stack_t *, nv_state_t *);
void nv_lock_destroy_locks(nvidia_stack_t *, nv_state_t *);
int nv_linux_add_device_locked(nv_linux_state_t *);
void nv_linux_add_device_locked(nv_linux_state_t *);
int nv_linux_assign_minor_locked(nv_linux_state_t *);
void nv_linux_remove_minor_locked(nv_linux_state_t *);
void nv_linux_remove_device_locked(nv_linux_state_t *);
NvBool nv_acpi_power_resource_method_present(struct pci_dev *);
int nv_linux_init_open_q(nv_linux_state_t *);
void nv_linux_stop_open_q(nv_linux_state_t *);
int nv_start_device(nv_state_t *nv, nvidia_stack_t *sp);
void nv_stop_device(nv_state_t *nv, nvidia_stack_t *sp);
#endif /* _NV_PROTO_H_ */

View File

@@ -46,6 +46,12 @@
#include <nvmisc.h>
#include <os/nv_memory_area.h>
#if !defined(NV_KERNEL_INTERFACE_LAYER) && defined(NVRM) && !defined(NV_DRM_FREEBSD_LKPI)
#include <nvport/nvport.h>
#else
#define PORT_ATOMIC
#endif
extern nv_cap_t *nvidia_caps_root;
extern const NvBool nv_is_rm_firmware_supported_os;
@@ -312,6 +318,13 @@ typedef struct
((((NvUPtr)(count)) >> NV_RM_TO_OS_PAGE_SHIFT) + \
((((count) & ((1 << NV_RM_TO_OS_PAGE_SHIFT) - 1)) != 0) ? 1 : 0))
// for dynamic granularity page array feature.
// Notice: dst_granu should be larger than src_granu.
#define NV_SRC_TO_DST_PAGE_SHIFT(src_granu, dst_granu) (GET_PAGE_SHIFT(dst_granu) - GET_PAGE_SHIFT(src_granu))
#define NV_SRC_TO_DST_PAGE_COUNT(src_page_count,src_granu, dst_granu) \
((((NvUPtr)(src_page_count)) >> NV_SRC_TO_DST_PAGE_SHIFT(src_granu, dst_granu)) + \
((((src_page_count) & ((1 << NV_SRC_TO_DST_PAGE_SHIFT(src_granu, dst_granu)) - 1)) != 0) ? 1 : 0))
#if defined(NVCPU_X86_64)
#define NV_STACK_SIZE (NV_RM_PAGE_SIZE * 3)
#else
@@ -389,6 +402,15 @@ typedef struct nv_alloc_mapping_context_s {
NvU32 caching;
} nv_alloc_mapping_context_t;
struct nv_alloc_mapping_list_node_s;
typedef struct nv_alloc_mapping_list_node_s {
nv_alloc_mapping_context_t context;
MemoryRange fileRange;
struct nv_alloc_mapping_list_node_s *pNext;
struct nv_alloc_mapping_list_node_s *pPrev;
} nv_alloc_mapping_list_node_t;
typedef enum
{
NV_SOC_IRQ_DISPLAY_TYPE = 0x1,
@@ -492,6 +514,7 @@ typedef struct nv_state_t
NvBool supports_tegra_igpu_rg;
NvBool is_tegra_pci_igpu_rg_enabled;
NvU32 tegra_pci_igpu_pg_mask;
NvU32 gpc_fuse_status_offset;
NvBool primary_vga;
@@ -562,9 +585,6 @@ typedef struct nv_state_t
/* Bool to check if dma-buf is supported */
NvBool dma_buf_supported;
/* Check if NVPCF DSM function is implemented under NVPCF or GPU device scope */
NvBool nvpcf_dsm_in_gpu_scope;
/* Bool to check if the device received a shutdown notification */
NvBool is_shutdown;
@@ -578,6 +598,14 @@ typedef struct nv_state_t
*/
NvBool mem_has_struct_page;
/*
* Coherent GPU Memory Mode, one of:
* NV_COHERENT_GPU_MEM_MODE_NONE
* NV_COHERENT_GPU_MEM_MODE_NUMA
* NV_COHERENT_GPU_MEM_MODE_DRIVER
*/
NvBool coherent_gpu_mem_mode;
/* OS detected GPU has ATS capability */
NvBool ats_support;
/*
@@ -596,9 +624,18 @@ typedef struct nv_state_t
NvU32 dispNisoStreamId;
} iommus;
struct {
NvU32 max_dispclk_rate_using_disppllkhz;
NvU32 max_dispclk_rate_using_sppll0clkoutakhz;
NvU32 max_hubclk_rate_using_sppll0clkoutbkhz;
} clocks;
/* Console is managed by drm drivers or NVKMS */
NvBool client_managed_console;
/* Console is mapped to sysmem instead of GPU PCIe BAR1 or BAR2 */
NvBool sysmem_mapped_console;
/* Struct to cache the gpu info details */
nv_cached_gpu_info_t cached_gpu_info;
@@ -607,9 +644,9 @@ typedef struct nv_state_t
} nv_state_t;
#define NVFP_TYPE_NONE 0x0
#define NVFP_TYPE_REFCOUNTED 0x1
#define NVFP_TYPE_REGISTERED 0x2
#define NVFP_TYPE_NONE ((NvU32)0x0)
#define NVFP_TYPE_REFCOUNTED ((NvU32)0x1)
#define NVFP_TYPE_REGISTERED ((NvU32)0x2)
struct nv_file_private_t
{
@@ -619,9 +656,9 @@ struct nv_file_private_t
NvU32 gpuInstanceId;
NvU8 metadata[64];
nv_file_private_t *ctl_nvfp;
nv_file_private_t * PORT_ATOMIC ctl_nvfp;
void *ctl_nvfp_priv;
NvU32 register_or_refcount;
PORT_ATOMIC NvU32 register_or_refcount;
//
// True if a client or an event was ever allocated on this fd.
@@ -671,7 +708,7 @@ typedef struct UvmGpuAccessBitsBufferAlloc_tag *nvgpuAccessBitBufferAlloc_t
* flags
*/
#define NV_FLAG_OPEN 0x0001
#define NV_FLAG_INITIALIZED 0x0001
#define NV_FLAG_EXCLUDE 0x0002
#define NV_FLAG_CONTROL 0x0004
#define NV_FLAG_PCI_P2P_UNSUPPORTED_CHIPSET 0x0008
@@ -685,7 +722,7 @@ typedef struct UvmGpuAccessBitsBufferAlloc_tag *nvgpuAccessBitBufferAlloc_t
#define NV_FLAG_TRIGGER_FLR 0x0400
#define NV_FLAG_PERSISTENT_SW_STATE 0x0800
#define NV_FLAG_IN_RECOVERY 0x1000
#define NV_FLAG_PCI_REMOVE_IN_PROGRESS 0x2000
// Unused 0x2000
#define NV_FLAG_UNBIND_LOCK 0x4000
/* To be set when GPU is not present on the bus, to help device teardown */
#define NV_FLAG_IN_SURPRISE_REMOVAL 0x8000
@@ -782,6 +819,13 @@ typedef enum
#define NV_EVAL_ACPI_METHOD_NVIF 0x01
#define NV_EVAL_ACPI_METHOD_WMMX 0x02
/*
* Coherent GPU Memory Mode
*/
#define NV_COHERENT_GPU_MEM_MODE_NONE 0
#define NV_COHERENT_GPU_MEM_MODE_NUMA 1
#define NV_COHERENT_GPU_MEM_MODE_DRIVER 2
typedef enum {
NV_I2C_CMD_READ = 1,
NV_I2C_CMD_WRITE,
@@ -795,7 +839,7 @@ typedef enum {
NV_I2C_CMD_BLOCK_WRITE
} nv_i2c_cmd_t;
// Flags needed by OSAllocPagesNode
// Flags needed by osAllocPagesNode / os_alloc_pages_node
#define NV_ALLOC_PAGES_NODE_NONE 0x0
#define NV_ALLOC_PAGES_NODE_SKIP_RECLAIM 0x1
@@ -930,13 +974,14 @@ NV_STATUS NV_API_CALL nv_dma_map_alloc (nv_dma_device_t *, NvU64, NvU6
NV_STATUS NV_API_CALL nv_dma_unmap_alloc (nv_dma_device_t *, NvU64, NvU64 *, void **);
NV_STATUS NV_API_CALL nv_dma_map_peer (nv_dma_device_t *, nv_dma_device_t *, NvU8, NvU64, NvU64 *);
NV_STATUS NV_API_CALL nv_dma_map_non_pci_peer (nv_dma_device_t *, NvU64, NvU64 *);
void NV_API_CALL nv_dma_unmap_peer (nv_dma_device_t *, NvU64, NvU64);
NV_STATUS NV_API_CALL nv_dma_map_mmio (nv_dma_device_t *, NvU64, NvU64 *);
void NV_API_CALL nv_dma_unmap_mmio (nv_dma_device_t *, NvU64, NvU64);
void NV_API_CALL nv_dma_cache_invalidate (nv_dma_device_t *, void *);
void* NV_API_CALL nv_dma_get_dev_pagemap (NvU64);
void NV_API_CALL nv_dma_put_dev_pagemap (void *);
NvBool NV_API_CALL nv_grdma_pci_topology_supported(nv_state_t *, nv_dma_device_t *);
NvS32 NV_API_CALL nv_start_rc_timer (nv_state_t *);
@@ -974,7 +1019,7 @@ void NV_API_CALL nv_put_firmware(const void *);
nv_file_private_t* NV_API_CALL nv_get_file_private(NvS32, NvBool, void **);
void NV_API_CALL nv_put_file_private(void *);
NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU64 *, NvU32 *, NvS32 *);
NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU64 *, NvU64 *, NvU32 *, NvS32 *);
NV_STATUS NV_API_CALL nv_get_egm_info(nv_state_t *, NvU64 *, NvU64 *, NvS32 *);
void NV_API_CALL nv_p2p_free_platform_data(void *data);
@@ -985,6 +1030,11 @@ void NV_API_CALL nv_release_mmap_lock (nv_state_t *);
NvBool NV_API_CALL nv_get_all_mappings_revoked_locked (nv_state_t *);
void NV_API_CALL nv_set_safe_to_mmap_locked (nv_state_t *, NvBool);
#if !defined(NV_VMWARE)
nv_alloc_mapping_list_node_t** NV_API_CALL nv_acquire_file_va(nv_file_private_t *, NvBool);
void NV_API_CALL nv_release_file_va(nv_file_private_t *, NvBool);
#endif
NV_STATUS NV_API_CALL nv_indicate_idle (nv_state_t *);
NV_STATUS NV_API_CALL nv_indicate_not_idle (nv_state_t *);
void NV_API_CALL nv_idle_holdoff (nv_state_t *);
@@ -1004,6 +1054,7 @@ NvBool NV_API_CALL nv_match_gpu_os_info(nv_state_t *, void *);
void NV_API_CALL nv_get_updated_emu_seg(NvU32 *start, NvU32 *end);
void NV_API_CALL nv_get_screen_info(nv_state_t *, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU32 *, NvU64 *);
void NV_API_CALL nv_set_gpu_pg_mask(nv_state_t *);
void NV_API_CALL nv_trigger_gpu_flr(nv_state_t *);
struct dma_buf;
typedef struct nv_dma_buf nv_dma_buf_t;
@@ -1045,6 +1096,7 @@ NV_STATUS NV_API_CALL nv_i2c_transfer(nv_state_t *, NvU32, NvU8, nv_i2c_msg_t *,
void NV_API_CALL nv_i2c_unregister_clients(nv_state_t *);
NV_STATUS NV_API_CALL nv_i2c_bus_status(nv_state_t *, NvU32, NvS32 *, NvS32 *);
NV_STATUS NV_API_CALL nv_imp_get_import_data (TEGRA_IMP_IMPORT_DATA *);
NV_STATUS NV_API_CALL nv_imp_get_uefi_data (nv_state_t *nv, NvU32 *iso_bw_kbps, NvU32 *floor_bw_kbps);
NV_STATUS NV_API_CALL nv_imp_enable_disable_rfl (nv_state_t *nv, NvBool bEnable);
NV_STATUS NV_API_CALL nv_imp_icc_set_bw (nv_state_t *nv, NvU32 avg_bw_kbps, NvU32 floor_bw_kbps);
NV_STATUS NV_API_CALL nv_get_num_dpaux_instances(nv_state_t *nv, NvU32 *num_instances);
@@ -1119,6 +1171,7 @@ NV_STATUS NV_API_CALL rm_release_gpu_lock (nvidia_stack_t *, nv_state_t *
NV_STATUS NV_API_CALL rm_acquire_all_gpus_lock (nvidia_stack_t *);
NV_STATUS NV_API_CALL rm_release_all_gpus_lock (nvidia_stack_t *);
NV_STATUS NV_API_CALL rm_ioctl (nvidia_stack_t *, nv_state_t *, nv_file_private_t *, NvU32, void *, NvU32);
NV_STATUS NV_API_CALL rm_validate_ioctls (NvU32, NvU32);
NvBool NV_API_CALL rm_isr (nvidia_stack_t *, nv_state_t *, NvU32 *);
void NV_API_CALL rm_isr_bh (nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_isr_bh_unlocked (nvidia_stack_t *, nv_state_t *);
@@ -1197,7 +1250,6 @@ void NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd);
NvBool NV_API_CALL rm_get_device_remove_flag(nvidia_stack_t *sp, NvU32 gpu_id);
NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *);
NV_STATUS NV_API_CALL rm_gpu_handle_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *);
NvBool NV_API_CALL rm_gpu_need_4k_page_isolation(nv_state_t *);
NvBool NV_API_CALL rm_is_chipset_io_coherent(nv_stack_t *);
NvBool NV_API_CALL rm_init_event_locks(nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_destroy_event_locks(nvidia_stack_t *, nv_state_t *);
@@ -1241,7 +1293,6 @@ NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *,
NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *, NvU32, NvBool *);
NV_STATUS NV_API_CALL nv_gpu_unbind_event(nvidia_stack_t *, NvU32, NvBool *);
NV_STATUS NV_API_CALL nv_check_usermap_access_params(nv_state_t*, const nv_usermap_access_params_t*);
nv_soc_irq_type_t NV_API_CALL nv_get_current_irq_type(nv_state_t*);
void NV_API_CALL nv_flush_coherent_cpu_cache_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size);
@@ -1287,6 +1338,14 @@ static inline NvU64 nv_rdtsc(void)
#endif
static inline NvBool nv_dev_needs_vidmem_preservation(const nv_state_t *nv)
{
/*
* Tegra iGPUs and SoC display devices don't need vidmem preservation.
*/
return !nv->is_tegra_pci_igpu && !NV_IS_SOC_DISPLAY_DEVICE(nv);
}
#endif /* NVRM */
static inline int nv_count_bits(NvU64 word)

View File

@@ -209,6 +209,7 @@ struct NvKmsKapiConnectorInfo {
NvU32 numIncompatibleConnectors;
NvKmsKapiConnector incompatibleConnectorHandles[NVKMS_KAPI_MAX_CONNECTORS];
NvBool dynamicDpyIdListValid;
NVDpyIdList dynamicDpyIdList;
};
@@ -518,6 +519,10 @@ struct NvKmsKapiDynamicDisplayParams {
NvU8 buffer[NVKMS_KAPI_EDID_BUFFER_SIZE];
} edid;
/* [OUT] Max resolution allowed for modelist */
NvU32 maxWidthInPixels;
NvU32 maxHeightInPixels;
/* [IN] Set true to override EDID */
NvBool overrideEdid;
@@ -1028,6 +1033,30 @@ struct NvKmsKapiFunctionsTable {
const struct NvKmsKapiMemory *memory
);
/*!
* Increment the GC6 blocker reference count to prevent the GPU from
* entering GC6 power state.
*
* \param [in] device A device allocated using allocateDevice().
*
* \return NV_TRUE on success, NV_FALSE on failure.
*/
NvBool (*gc6BlockerRefCntInc)(
const struct NvKmsKapiDevice *device
);
/*!
* Decrement the GC6 blocker reference count to allow the GPU to
* enter GC6 power state.
*
* \param [in] device A device allocated using allocateDevice().
*
* \return NV_TRUE on success, NV_FALSE on failure.
*/
NvBool (*gc6BlockerRefCntDec)(
const struct NvKmsKapiDevice *device
);
/*!
* Create a formatted surface from an NvKmsKapiMemory object.
*

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,6 +27,8 @@
#ifndef __NV_MISC_H
#define __NV_MISC_H
#if !defined(NVRISCV_LIBFSP_BUILD) || !NVRISCV_LIBFSP_BUILD
#ifdef __cplusplus
extern "C" {
#endif //__cplusplus
@@ -991,10 +993,145 @@ static NV_FORCEINLINE void *NV_NVUPTR_TO_PTR(NvUPtr address)
#define BIT64(b) ((NvU64)1U<<(b))
#endif
#endif
//! 1 if @p x needs more than @p bits to represent
#define NV_BITFIELD_SIZE_ADD_ONE(x, bits) \
((!!(((NvU64)(x)) >> (bits))) ? 1 : 0)
/*!
* Minimum bits required to represent a value between 1 and @p x inclusive.
*
* @param[in] x Highest unsigned value to represent: 1 <= x <= NV_U64_MAX
*
* Example
* @code
* enum Foo {
* E_A,
* E_B,
* E_C,
* E_D,
* E_LAST = E_D
* };
*
* struct Bar {
* NvU32 fooValue : NV_BITFIELD_SIZE_64(E_LAST);
* // note, it is the highest value of the enum that needs to be
* // represented.
* // If you define a _COUNT as a convenient value for declaring arrays
* // indexed by the enum, remember to subtract 1.
* }
* @endcode
*
* @note This function assumes that the enums are defined from 0..x (inclusive,
* not necessarily contiguous) It is technically possible to have negative enum
* values, so to be safe, the underlying type should always be unsigned.
*
* If @p x is larger than what can fit in the underlying type, a
* 'width of <bitfield> exceeds its type'
* compiler warning is expected.
*
* @return -1 if @p x <= 0. This causes a compile time error if used as
* a bitfield size.
*/
#define NV_BITFIELD_SIZE_64(x) \
(((x) <= 0) ? -1 : \
(NV_BITFIELD_SIZE_ADD_ONE((x), 0) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 1) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 2) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 3) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 4) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 5) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 6) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 7) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 8) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 9) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 10) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 11) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 12) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 13) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 14) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 15) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 16) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 17) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 18) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 19) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 20) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 21) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 22) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 23) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 24) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 25) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 26) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 27) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 28) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 29) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 30) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 31) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 32) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 33) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 34) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 35) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 36) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 37) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 38) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 39) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 40) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 41) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 42) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 43) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 44) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 45) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 46) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 47) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 48) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 49) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 50) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 51) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 52) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 53) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 54) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 55) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 56) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 57) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 58) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 59) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 60) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 61) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 62) + \
NV_BITFIELD_SIZE_ADD_ONE((x), 63)))
//
// Bug 4851259: Newly added functions must be hidden from certain HS-signed
// ucode compilers to avoid signature mismatch.
//
#ifndef NVDEC_1_0
/*!
* Find the Greatest Common Denominator of two NvU64s
*
* @param[in] a first number
* @param[in] b second number
*
* @return GCD of a and b *
*/
static NV_FORCEINLINE NvU64 nvFindGcdU64(NvU64 a, NvU64 b)
{
while (1)
{
NvU64 temp;
if (a == 0)
return b;
temp = a;
a = b % a;
b = temp;
}
}
#endif // NVDEC_1_0
#ifdef __cplusplus
}
#endif //__cplusplus
#else
#include <misc/nvmisc_drf.h>
#include <misc/bitops.h>
#endif // !defined(NVRISCV_LIBFSP_BUILD) || !NVRISCV_LIBFSP_BUILD
#endif // __NV_MISC_H

View File

@@ -132,7 +132,7 @@ NV_STATUS_CODE(NV_ERR_TIMEOUT_RETRY, 0x00000066, "Call timed o
NV_STATUS_CODE(NV_ERR_TOO_MANY_PRIMARIES, 0x00000067, "Too many primaries")
NV_STATUS_CODE(NV_ERR_UVM_ADDRESS_IN_USE, 0x00000068, "Unified virtual memory requested address already in use")
NV_STATUS_CODE(NV_ERR_MAX_SESSION_LIMIT_REACHED, 0x00000069, "Maximum number of sessions reached")
NV_STATUS_CODE(NV_ERR_LIB_RM_VERSION_MISMATCH, 0x0000006A, "Library version doesn't match driver version") //Contained within the RMAPI library
NV_STATUS_CODE(NV_ERR_LIB_RM_VERSION_MISMATCH, 0x0000006A, "Library version doesn't match driver version") // Contained within the RMAPI library
NV_STATUS_CODE(NV_ERR_PRIV_SEC_VIOLATION, 0x0000006B, "Priv security violation")
NV_STATUS_CODE(NV_ERR_GPU_IN_DEBUG_MODE, 0x0000006C, "GPU currently in debug mode")
NV_STATUS_CODE(NV_ERR_FEATURE_NOT_ENABLED, 0x0000006D, "Requested Feature functionality is not enabled")
@@ -165,7 +165,8 @@ NV_STATUS_CODE(NV_ERR_FABRIC_STATE_OUT_OF_SYNC, 0x00000087, "NVLink fabri
NV_STATUS_CODE(NV_ERR_BUFFER_FULL, 0x00000088, "Buffer is full")
NV_STATUS_CODE(NV_ERR_BUFFER_EMPTY, 0x00000089, "Buffer is empty")
NV_STATUS_CODE(NV_ERR_MC_FLA_OFFSET_TABLE_FULL, 0x0000008A, "Multicast FLA offset table has no available slots")
NV_STATUS_CODE(NV_ERR_DMA_XFER_FAILED, 0x0000008B, "DMA transfer failed")
NV_STATUS_CODE(NV_ERR_OPERATION_ABORTED, 0x0000008B, "Operation has aborted")
NV_STATUS_CODE(NV_ERR_DMA_XFER_FAILED, 0x0000008C, "DMA transfer failed")
// Warnings:
NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch")

View File

@@ -664,6 +664,27 @@ typedef struct
#endif // defined(_MSC_VER)
/***************************************************************************\
|* *|
|* Definitions of binary/bytes representations of floating point values *|
|* *|
|* Intended for use when passing values across kernel contexts in which *|
|* floating point use is forbidden *|
|* *|
\***************************************************************************/
/*!
* A struct containing the 32-bit representation of an (IEEE-754 binary32)
* floating point value, i.e., the raw underlying bytes of an @ref NvF32
*
* This struct must be used in places where a value may be passed across a
* kernel boundary, as use of floating point within some kernels is, in general,
* banned.
*/
typedef struct
{
NvU32 value;
} NvF32Bytes;
#ifdef __cplusplus
}
#endif

View File

@@ -163,6 +163,7 @@ void NV_API_CALL os_release_rwlock_write (void *);
NvBool NV_API_CALL os_semaphore_may_sleep (void);
NV_STATUS NV_API_CALL os_get_version_info (os_version_info*);
NV_STATUS NV_API_CALL os_get_is_openrm (NvBool *);
NvBool NV_API_CALL os_is_bif_reset_supported (void *);
NvBool NV_API_CALL os_is_isr (void);
NvBool NV_API_CALL os_pat_supported (void);
void NV_API_CALL os_dump_stack (void);
@@ -233,6 +234,7 @@ NV_STATUS NV_API_CALL os_find_ns_pid(void *pid_info, NvU32 *ns_pid);
NvBool NV_API_CALL os_is_init_ns(void);
NV_STATUS NV_API_CALL os_iommu_sva_bind(void *arg, void **handle, NvU32 *pasid);
void NV_API_CALL os_iommu_sva_unbind(void *handle);
NvBool NV_API_CALL os_supports_kernel_suspend_notifiers(void);
extern NvU64 os_page_size;
extern NvU64 os_max_page_size;