570.86.15

This commit is contained in:
Bernhard Stoeckner
2025-01-27 19:36:56 +01:00
parent 9d0b0414a5
commit 54d69484da
1166 changed files with 318863 additions and 182687 deletions

View File

@@ -32,7 +32,10 @@
typedef enum
{
NV_FIRMWARE_TYPE_GSP,
NV_FIRMWARE_TYPE_GSP_LOG
NV_FIRMWARE_TYPE_GSP_LOG,
#if defined(NV_VMWARE)
NV_FIRMWARE_TYPE_BINDATA
#endif
} nv_firmware_type_t;
typedef enum
@@ -45,6 +48,7 @@ typedef enum
NV_FIRMWARE_CHIP_FAMILY_AD10X = 5,
NV_FIRMWARE_CHIP_FAMILY_GH100 = 6,
NV_FIRMWARE_CHIP_FAMILY_GB10X = 8,
NV_FIRMWARE_CHIP_FAMILY_GB20X = 9,
NV_FIRMWARE_CHIP_FAMILY_END,
} nv_firmware_chip_family_t;
@@ -54,6 +58,7 @@ static inline const char *nv_firmware_chip_family_to_string(
{
switch (fw_chip_family) {
case NV_FIRMWARE_CHIP_FAMILY_GB10X: return "gb10x";
case NV_FIRMWARE_CHIP_FAMILY_GB20X: return "gb20x";
case NV_FIRMWARE_CHIP_FAMILY_GH100: return "gh100";
case NV_FIRMWARE_CHIP_FAMILY_AD10X: return "ad10x";
case NV_FIRMWARE_CHIP_FAMILY_GA10X: return "ga10x";
@@ -84,6 +89,7 @@ static inline const char *nv_firmware_for_chip_family(
switch (fw_chip_family)
{
case NV_FIRMWARE_CHIP_FAMILY_GB10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GB20X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GA10X:
@@ -104,6 +110,7 @@ static inline const char *nv_firmware_for_chip_family(
switch (fw_chip_family)
{
case NV_FIRMWARE_CHIP_FAMILY_GB10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GB20X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GA10X:
@@ -119,7 +126,12 @@ static inline const char *nv_firmware_for_chip_family(
return "";
}
}
#if defined(NV_VMWARE)
else if (fw_type == NV_FIRMWARE_TYPE_BINDATA)
{
return NV_FIRMWARE_FOR_NAME("bindata_image");
}
#endif
return "";
}
#endif // defined(NV_FIRMWARE_FOR_NAME)

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -128,6 +128,9 @@ typedef struct nv_ioctl_register_fd
#define NV_DMABUF_EXPORT_MAX_HANDLES 128
#define NV_DMABUF_EXPORT_MAPPING_TYPE_DEFAULT 0
#define NV_DMABUF_EXPORT_MAPPING_TYPE_FORCE_PCIE 1
typedef struct nv_ioctl_export_to_dma_buf_fd
{
int fd;
@@ -136,6 +139,7 @@ typedef struct nv_ioctl_export_to_dma_buf_fd
NvU32 numObjects;
NvU32 index;
NvU64 totalSize NV_ALIGN_BYTES(8);
NvU8 mappingType;
NvHandle handles[NV_DMABUF_EXPORT_MAX_HANDLES];
NvU64 offsets[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8);
NvU64 sizes[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8);

View File

@@ -231,12 +231,6 @@ NV_STATUS nvos_forward_error_to_cray(struct pci_dev *, NvU32,
const char *, va_list);
#endif
#if defined(NVCPU_PPC64LE) && defined(CONFIG_EEH)
#include <asm/eeh.h>
#define NV_PCI_ERROR_RECOVERY_ENABLED() eeh_enabled()
#define NV_PCI_ERROR_RECOVERY
#endif
#if defined(NV_ASM_SET_MEMORY_H_PRESENT)
#include <asm/set_memory.h>
#endif
@@ -609,7 +603,7 @@ static NvBool nv_numa_node_has_memory(int node_id)
#define NV_ALLOC_PAGES_NODE(ptr, nid, order, gfp_mask) \
{ \
(ptr) = (unsigned long)page_address(alloc_pages_node(nid, gfp_mask, order)); \
(ptr) = (unsigned long) alloc_pages_node(nid, gfp_mask, order); \
}
#define NV_GET_FREE_PAGES(ptr, order, gfp_mask) \
@@ -881,16 +875,6 @@ typedef void irqreturn_t;
#define PCI_CAP_ID_EXP 0x10
#endif
/*
* On Linux on PPC64LE enable basic support for Linux PCI error recovery (see
* Documentation/PCI/pci-error-recovery.txt). Currently RM only supports error
* notification and data collection, not actual recovery of the device.
*/
#if defined(NVCPU_PPC64LE) && defined(CONFIG_EEH)
#include <asm/eeh.h>
#define NV_PCI_ERROR_RECOVERY
#endif
/*
* If the host OS has page sizes larger than 4KB, we may have a security
* problem. Registers are typically grouped in 4KB pages, but if there are
@@ -1419,8 +1403,6 @@ typedef struct nv_dma_map_s {
0 ? NV_OK : NV_ERR_OPERATING_SYSTEM)
#endif
typedef struct nv_ibmnpu_info nv_ibmnpu_info_t;
typedef struct nv_work_s {
struct work_struct task;
void *data;
@@ -1468,7 +1450,6 @@ struct nv_dma_device {
} addressable_range;
struct device *dev;
NvBool nvlink;
};
/* Properties of the coherent link */
@@ -1517,9 +1498,6 @@ typedef struct nv_linux_state_s {
struct device *dev;
struct pci_dev *pci_dev;
/* IBM-NPU info associated with this GPU */
nv_ibmnpu_info_t *npu;
/* coherent link information */
coherent_link_info_t coherent_link_info;
@@ -1835,7 +1813,7 @@ static inline int nv_is_control_device(struct inode *inode)
return (minor((inode)->i_rdev) == NV_MINOR_DEVICE_NUMBER_CONTROL_DEVICE);
}
#if defined(NV_DOM0_KERNEL_PRESENT) || defined(NV_VGPU_KVM_BUILD)
#if defined(NV_DOM0_KERNEL_PRESENT) || defined(NV_VGPU_KVM_BUILD) || defined(NV_DEVICE_VM_BUILD)
#define NV_VGX_HYPER
#if defined(NV_XEN_IOEMU_INJECT_MSI)
#include <xen/ioemu.h>
@@ -1872,59 +1850,6 @@ static inline NvBool nv_alloc_release(nv_linux_file_private_t *nvlfp, nv_alloc_t
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
#endif
/*
* Starting on Power9 systems, DMA addresses for NVLink are no longer
* the same as used over PCIe.
*
* Power9 supports a 56-bit Real Address. This address range is compressed
* when accessed over NVLink to allow the GPU to access all of memory using
* its 47-bit Physical address.
*
* If there is an NPU device present on the system, it implies that NVLink
* sysmem links are present and we need to apply the required address
* conversion for NVLink within the driver.
*
* See Bug 1920398 for further background and details.
*
* Note, a deviation from the documented compression scheme is that the
* upper address bits (i.e. bit 56-63) instead of being set to zero are
* preserved during NVLink address compression so the orignal PCIe DMA
* address can be reconstructed on expansion. These bits can be safely
* ignored on NVLink since they are truncated by the GPU.
*
* Bug 1968345: As a performance enhancement it is the responsibility of
* the caller on PowerPC platforms to check for presence of an NPU device
* before the address transformation is applied.
*/
static inline NvU64 nv_compress_nvlink_addr(NvU64 addr)
{
NvU64 addr47 = addr;
#if defined(NVCPU_PPC64LE)
addr47 = addr & ((1ULL << 43) - 1);
addr47 |= (addr & (0x3ULL << 45)) >> 2;
WARN_ON(addr47 & (1ULL << 44));
addr47 |= (addr & (0x3ULL << 49)) >> 4;
addr47 |= addr & ~((1ULL << 56) - 1);
#endif
return addr47;
}
static inline NvU64 nv_expand_nvlink_addr(NvU64 addr47)
{
NvU64 addr = addr47;
#if defined(NVCPU_PPC64LE)
addr = addr47 & ((1ULL << 43) - 1);
addr |= (addr47 & (3ULL << 43)) << 2;
addr |= (addr47 & (3ULL << 45)) << 4;
addr |= addr47 & ~((1ULL << 56) - 1);
#endif
return addr;
}
// Default flags for ISRs
static inline NvU32 nv_default_irq_flags(nv_state_t *nv)
{

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -36,5 +36,6 @@ int nv_pci_count_devices(void);
NvU8 nv_find_pci_capability(struct pci_dev *, NvU8);
int nvidia_dev_get_pci_info(const NvU8 *, struct pci_dev **, NvU64 *, NvU64 *);
nv_linux_state_t * find_pci(NvU32, NvU8, NvU8, NvU8);
NvBool nv_pci_is_valid_topology_for_direct_pci(nv_state_t *, struct device *);
#endif

View File

@@ -368,6 +368,8 @@ typedef struct nv_state_t
{
NvBool valid;
NvU8 uuid[GPU_UUID_LEN];
NvBool pci_uuid_read_attempted;
NV_STATUS pci_uuid_status;
} nv_uuid_cache;
void *handle;
@@ -479,6 +481,8 @@ typedef struct nv_state_t
/* Bool to check if the GPU has a coherent sysmem link */
NvBool coherent;
/* OS detected GPU has ATS capability */
NvBool ats_support;
/*
* NUMA node ID of the CPU to which the GPU is attached.
* Holds NUMA_NO_NODE on platforms that don't support NUMA configuration.
@@ -570,7 +574,8 @@ typedef NV_STATUS (*nvPmaEvictRangeCallback)(void *, NvU64, NvU64, nvgpuGpuMemor
#define NV_FLAG_PASSTHRU 0x0080
#define NV_FLAG_SUSPENDED 0x0100
#define NV_FLAG_SOC_IGPU 0x0200
// Unused 0x0400
/* To be set when an FLR needs to be triggered after device shut down. */
#define NV_FLAG_TRIGGER_FLR 0x0400
#define NV_FLAG_PERSISTENT_SW_STATE 0x0800
#define NV_FLAG_IN_RECOVERY 0x1000
// Unused 0x2000
@@ -613,6 +618,7 @@ typedef struct
const char *gc6_support;
const char *gcoff_support;
const char *s0ix_status;
const char *db_support;
} nv_power_info_t;
#define NV_PRIMARY_VGA(nv) ((nv)->primary_vga)
@@ -758,6 +764,7 @@ static inline NvBool IS_IMEM_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
#define NV_ALIGN_DOWN(v,g) ((v) & ~((g) - 1))
#endif
/*
* driver internal interfaces
*/
@@ -813,7 +820,6 @@ NV_STATUS NV_API_CALL nv_dma_map_mmio (nv_dma_device_t *, NvU64, NvU6
void NV_API_CALL nv_dma_unmap_mmio (nv_dma_device_t *, NvU64, NvU64);
void NV_API_CALL nv_dma_cache_invalidate (nv_dma_device_t *, void *);
void NV_API_CALL nv_dma_enable_nvlink (nv_dma_device_t *);
NvS32 NV_API_CALL nv_start_rc_timer (nv_state_t *);
NvS32 NV_API_CALL nv_stop_rc_timer (nv_state_t *);
@@ -840,9 +846,7 @@ NV_STATUS NV_API_CALL nv_acpi_mux_method (nv_state_t *, NvU32 *, NvU32,
NV_STATUS NV_API_CALL nv_log_error (nv_state_t *, NvU32, const char *, va_list);
NvU64 NV_API_CALL nv_get_dma_start_address (nv_state_t *);
NV_STATUS NV_API_CALL nv_set_primary_vga_status(nv_state_t *);
NV_STATUS NV_API_CALL nv_pci_trigger_recovery (nv_state_t *);
NvBool NV_API_CALL nv_requires_dma_remap (nv_state_t *);
NvBool NV_API_CALL nv_is_rm_firmware_active(nv_state_t *);
@@ -855,19 +859,8 @@ void NV_API_CALL nv_put_file_private(void *);
NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU64 *, NvU32 *, NvS32 *);
NV_STATUS NV_API_CALL nv_get_egm_info(nv_state_t *, NvU64 *, NvU64 *, NvS32 *);
NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *, NvU64 *, NvU64 *, void**);
NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv, NvBool *mode);
void NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv);
void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64, NvU64);
void NV_API_CALL nv_p2p_free_platform_data(void *data);
#if defined(NVCPU_PPC64LE)
NV_STATUS NV_API_CALL nv_get_nvlink_line_rate (nv_state_t *, NvU32 *);
#endif
NV_STATUS NV_API_CALL nv_revoke_gpu_mappings (nv_state_t *);
void NV_API_CALL nv_acquire_mmap_lock (nv_state_t *);
void NV_API_CALL nv_release_mmap_lock (nv_state_t *);
@@ -998,18 +991,24 @@ NV_STATUS NV_API_CALL rm_p2p_init_mapping (nvidia_stack_t *, NvU64, NvU6
NV_STATUS NV_API_CALL rm_p2p_destroy_mapping (nvidia_stack_t *, NvU64);
NV_STATUS NV_API_CALL rm_p2p_get_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU8 **, void *);
NV_STATUS NV_API_CALL rm_p2p_get_gpu_info (nvidia_stack_t *, NvU64, NvU64, NvU8 **, void **);
NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent (nvidia_stack_t *, NvU64, NvU64, void **, NvU64 *, NvU32 *, void *, void *, void **);
NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent (nvidia_stack_t *, NvU64, NvU64, void **, NvU64 *, NvU32 *, NvBool, void *, void *, void **);
NV_STATUS NV_API_CALL rm_p2p_register_callback (nvidia_stack_t *, NvU64, NvU64, NvU64, void *, void (*)(void *), void *);
NV_STATUS NV_API_CALL rm_p2p_put_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, void *);
NV_STATUS NV_API_CALL rm_p2p_put_pages_persistent(nvidia_stack_t *, void *, void *, void *);
NV_STATUS NV_API_CALL rm_p2p_dma_map_pages (nvidia_stack_t *, nv_dma_device_t *, NvU8 *, NvU64, NvU32, NvU64 *, void **);
NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *, void **);
void NV_API_CALL rm_dma_buf_undup_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle);
NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, MemoryRange, void *, NvBool, MemoryArea *);
void NV_API_CALL rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, void *, NvBool, MemoryArea);
NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle *, NvHandle *, NvHandle *, void **, NvBool *);
NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *,
NvHandle, NvHandle, MemoryRange,
NvU8, void *, NvBool, MemoryArea *);
void NV_API_CALL rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *,
NvHandle, NvHandle, NvU8, void *,
NvBool, MemoryArea);
NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *,
nv_state_t *, NvHandle, NvHandle,
NvU8, NvHandle *, NvHandle *,
NvHandle *, void **, NvBool *);
void NV_API_CALL rm_dma_buf_put_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, void *);
NV_STATUS NV_API_CALL rm_log_gpu_crash (nv_stack_t *, nv_state_t *);
void NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd);
NvBool NV_API_CALL rm_get_device_remove_flag(nvidia_stack_t *sp, NvU32 gpu_id);
@@ -1026,7 +1025,6 @@ NvBool NV_API_CALL rm_is_device_sequestered(nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_check_for_gpu_surprise_removal(nvidia_stack_t *, nv_state_t *);
NV_STATUS NV_API_CALL rm_set_external_kernel_client_count(nvidia_stack_t *, nv_state_t *, NvBool);
NV_STATUS NV_API_CALL rm_schedule_gpu_wakeup(nvidia_stack_t *, nv_state_t *);
NvBool NV_API_CALL rm_is_iommu_needed_for_sriov(nvidia_stack_t *, nv_state_t *);
NvBool NV_API_CALL rm_disable_iomap_wc(void);
void NV_API_CALL rm_init_dynamic_power_management(nvidia_stack_t *, nv_state_t *, NvBool);
@@ -1043,12 +1041,14 @@ void NV_API_CALL rm_acpi_nvpcf_notify(nvidia_stack_t *);
NvBool NV_API_CALL rm_is_altstack_in_use(void);
/* vGPU VFIO specific functions */
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32);
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *,
NvU32 *, NvU32 *, NvU32);
NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16);
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool);
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8);
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *,
NvU64 *, NvU64 *, NvU32 *, NvBool *, NvU8 *);
NV_STATUS NV_API_CALL nv_vgpu_update_sysfs_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU32);
NV_STATUS NV_API_CALL nv_vgpu_get_hbm_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU64 *);
NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *);
NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *, NvU32, NvBool *);

View File

@@ -592,6 +592,14 @@ void nvUvmInterfaceChannelDestroy(uvmGpuChannelHandle channel);
Error codes:
NV_ERR_GENERIC
NV_ERR_NO_MEMORY
NV_ERR_INVALID_STATE
NV_ERR_NOT_SUPPORTED
NV_ERR_NOT_READY
NV_ERR_INVALID_LOCK_STATE
NV_ERR_INVALID_STATE
NV_ERR_NVLINK_FABRIC_NOT_READY
NV_ERR_NVLINK_FABRIC_FAILURE
NV_ERR_GPU_MEMORY_ONLINING_FAILURE
*/
NV_STATUS nvUvmInterfaceQueryCaps(uvmGpuDeviceHandle device,
UvmGpuCaps *caps);

View File

@@ -620,19 +620,12 @@ typedef struct UvmGpuClientInfo_tag
NvHandle hSmcPartRef;
} UvmGpuClientInfo;
typedef enum
{
UVM_GPU_CONF_COMPUTE_MODE_NONE,
UVM_GPU_CONF_COMPUTE_MODE_APM,
UVM_GPU_CONF_COMPUTE_MODE_HCC,
UVM_GPU_CONF_COMPUTE_MODE_COUNT
} UvmGpuConfComputeMode;
typedef struct UvmGpuConfComputeCaps_tag
{
// Out: GPU's confidential compute mode
UvmGpuConfComputeMode mode;
// Is key rotation enabled for UVM keys
// Out: true if Confidential Computing is enabled on the GPU
NvBool bConfComputingEnabled;
// Out: true if key rotation is enabled (for UVM keys) on the GPU
NvBool bKeyRotationEnabled;
} UvmGpuConfComputeCaps;
@@ -746,6 +739,8 @@ typedef struct UvmGpuInfo_tag
// to NVSwitch peers.
NvU64 nvswitchEgmMemoryWindowStart;
// GPU supports ATS capability
NvBool atsSupport;
} UvmGpuInfo;
typedef struct UvmGpuFbInfo_tag
@@ -759,6 +754,7 @@ typedef struct UvmGpuFbInfo_tag
NvBool bZeroFb; // Zero FB mode enabled.
NvU64 maxVidmemPageSize; // Largest GPU page size to access vidmem.
NvBool bStaticBar1Enabled; // Static BAR1 mode is enabled
NvU64 staticBar1StartOffset; // The start offset of the the static mapping
NvU64 staticBar1Size; // The size of the static mapping
} UvmGpuFbInfo;

View File

@@ -544,6 +544,9 @@ struct NvKmsKapiCreateSurfaceParams {
* explicit_layout is NV_TRUE and layout is
* NvKmsSurfaceMemoryLayoutBlockLinear */
NvU8 log2GobsPerBlockY;
/* [IN] Whether a surface can be updated directly on the screen */
NvBool noDisplayCaching;
};
enum NvKmsKapiAllocationType {
@@ -1011,6 +1014,17 @@ struct NvKmsKapiFunctionsTable {
const void *pLinearAddress
);
/*!
* Check if memory object allocated is video memory.
*
* \param [in] memory Memory allocated using allocateMemory()
*
* \return NV_TRUE if memory is vidmem, NV_FALSE otherwise.
*/
NvBool (*isVidmem)(
const struct NvKmsKapiMemory *memory
);
/*!
* Create a formatted surface from an NvKmsKapiMemory object.
*

View File

@@ -33,38 +33,18 @@ extern "C" {
#include "nvtypes.h"
#if !defined(NVIDIA_UNDEF_LEGACY_BIT_MACROS)
//
// Miscellaneous macros useful for bit field manipulations
//
// STUPID HACK FOR CL 19434692. Will revert when fix CL is delivered bfm -> chips_a.
#ifndef BIT
#define BIT(b) (1U<<(b))
#endif
#ifndef BIT32
#define BIT32(b) ((NvU32)1U<<(b))
#endif
#ifndef BIT64
#define BIT64(b) ((NvU64)1U<<(b))
#endif
#endif
//
// It is recommended to use the following bit macros to avoid macro name
// collisions with other src code bases.
//
// Miscellaneous macros useful for bit field manipulations.
#ifndef NVBIT
#define NVBIT(b) (1U<<(b))
#define NVBIT(b) (1U<<(b))
#endif
#ifndef NVBIT_TYPE
#define NVBIT_TYPE(b, t) (((t)1U)<<(b))
#define NVBIT_TYPE(b, t) (((t)1U)<<(b))
#endif
#ifndef NVBIT32
#define NVBIT32(b) NVBIT_TYPE(b, NvU32)
#define NVBIT32(b) NVBIT_TYPE(b, NvU32)
#endif
#ifndef NVBIT64
#define NVBIT64(b) NVBIT_TYPE(b, NvU64)
#define NVBIT64(b) NVBIT_TYPE(b, NvU64)
#endif
//Concatenate 2 32bit values to a 64bit value
@@ -72,7 +52,7 @@ extern "C" {
// Helper macro's for 32 bit bitmasks
#define NV_BITMASK32_ELEMENT_SIZE (sizeof(NvU32) << 3)
#define NV_BITMASK32_IDX(chId) (((chId) & ~(0x1F)) >> 5)
#define NV_BITMASK32_IDX(chId) (((chId) & ~(0x1F)) >> 5)
#define NV_BITMASK32_OFFSET(chId) ((chId) & (0x1F))
#define NV_BITMASK32_SET(pChannelMask, chId) \
(pChannelMask)[NV_BITMASK32_IDX(chId)] |= NVBIT(NV_BITMASK32_OFFSET(chId))
@@ -990,6 +970,22 @@ static NV_FORCEINLINE void *NV_NVUPTR_TO_PTR(NvUPtr address)
// Get the number of elements the specified fixed-size array
#define NV_ARRAY_ELEMENTS(x) ((sizeof(x)/sizeof((x)[0])))
#if !defined(NVIDIA_UNDEF_LEGACY_BIT_MACROS)
//
// Deprecated macros whose definition can be removed once the code base no longer references them.
// Use the NVBIT* macros instead of these macros.
//
#ifndef BIT
#define BIT(b) (1U<<(b))
#endif
#ifndef BIT32
#define BIT32(b) ((NvU32)1U<<(b))
#endif
#ifndef BIT64
#define BIT64(b) ((NvU64)1U<<(b))
#endif
#endif
#ifdef __cplusplus
}
#endif //__cplusplus

View File

@@ -155,6 +155,10 @@ NV_STATUS_CODE(NV_ERR_KEY_ROTATION_IN_PROGRESS, 0x0000007D, "Operation no
NV_STATUS_CODE(NV_ERR_TEST_ONLY_CODE_NOT_ENABLED, 0x0000007E, "Test-only code path not enabled")
NV_STATUS_CODE(NV_ERR_SECURE_BOOT_FAILED, 0x0000007F, "GFW secure boot failed")
NV_STATUS_CODE(NV_ERR_INSUFFICIENT_ZBC_ENTRY, 0x00000080, "No more ZBC entry for the client")
NV_STATUS_CODE(NV_ERR_NVLINK_FABRIC_NOT_READY, 0x00000081, "Nvlink Fabric Status or Fabric Probe is not yet complete, caller needs to retry")
NV_STATUS_CODE(NV_ERR_NVLINK_FABRIC_FAILURE, 0x00000082, "Nvlink Fabric Probe failed")
NV_STATUS_CODE(NV_ERR_GPU_MEMORY_ONLINING_FAILURE, 0x00000083, "GPU Memory Onlining failed")
NV_STATUS_CODE(NV_ERR_REDUCTION_MANAGER_NOT_AVAILABLE, 0x00000084, "Reduction Manager is not available")
// Warnings:
NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch")

View File

@@ -170,7 +170,7 @@ NvU32 NV_API_CALL os_get_grid_csp_support (void);
void NV_API_CALL os_bug_check (NvU32, const char *);
NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32);
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **);
NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *);
NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *, NvU32);
NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *);
NV_STATUS NV_API_CALL os_get_euid (NvU32 *);
NV_STATUS NV_API_CALL os_get_smbios_header (NvU64 *pSmbsAddr);
@@ -178,6 +178,7 @@ NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi (NvU32 *);
void NV_API_CALL os_add_record_for_crashLog (void *, NvU32);
void NV_API_CALL os_delete_record_for_crashLog (void *);
NV_STATUS NV_API_CALL os_call_vgpu_vfio (void *, NvU32);
NV_STATUS NV_API_CALL os_device_vm_present (void);
NV_STATUS NV_API_CALL os_numa_memblock_size (NvU64 *);
NV_STATUS NV_API_CALL os_alloc_pages_node (NvS32, NvU32, NvU32, NvU64 *);
NV_STATUS NV_API_CALL os_get_page (NvU64 address);
@@ -213,6 +214,7 @@ enum os_pci_req_atomics_type {
OS_INTF_PCIE_REQ_ATOMICS_128BIT
};
NV_STATUS NV_API_CALL os_enable_pci_req_atomics (void *, enum os_pci_req_atomics_type);
void NV_API_CALL os_pci_trigger_flr(void *handle);
NV_STATUS NV_API_CALL os_get_numa_node_memory_usage (NvS32, NvU64 *, NvU64 *);
NV_STATUS NV_API_CALL os_numa_add_gpu_memory (void *, NvU64, NvU64, NvU32 *);
NV_STATUS NV_API_CALL os_numa_remove_gpu_memory (void *, NvU64, NvU64, NvU32);
@@ -220,6 +222,7 @@ NV_STATUS NV_API_CALL os_offline_page_at_address(NvU64 address);
void* NV_API_CALL os_get_pid_info(void);
void NV_API_CALL os_put_pid_info(void *pid_info);
NV_STATUS NV_API_CALL os_find_ns_pid(void *pid_info, NvU32 *ns_pid);
NvBool NV_API_CALL os_is_init_ns(void);
extern NvU32 os_page_size;
extern NvU64 os_page_mask;