520.61.05

This commit is contained in:
Andy Ritger
2022-10-10 14:59:24 -07:00
parent fe0728787f
commit 90eb10774f
758 changed files with 88383 additions and 26493 deletions

View File

@@ -101,13 +101,6 @@
# define NV_ANDROID
#endif
#if defined(DceCore) && !defined(NV_DCECORE)
# define NV_DCECORE
#endif
@@ -355,15 +348,6 @@
#define NVOS_IS_INTEGRITY 0
#endif
#if defined(NVCPU_X86)
#define NVCPU_IS_X86 1
#else

View File

@@ -1037,6 +1037,32 @@ static inline vm_fault_t nv_insert_pfn(struct vm_area_struct *vma,
return VM_FAULT_SIGBUS;
}
/* Converts BAR index to Linux specific PCI BAR index */
static inline NvU8 nv_bar_index_to_os_bar_index
(
struct pci_dev *dev,
NvU8 nv_bar_index
)
{
NvU8 bar_index = 0;
NvU8 i;
BUG_ON(nv_bar_index >= NV_GPU_NUM_BARS);
for (i = 0; i < nv_bar_index; i++)
{
if (NV_PCI_RESOURCE_FLAGS(dev, bar_index) & PCI_BASE_ADDRESS_MEM_TYPE_64)
{
bar_index += 2;
}
else
{
bar_index++;
}
}
return bar_index;
}
#define NV_PAGE_MASK (NvU64)(long)PAGE_MASK
@@ -1161,16 +1187,6 @@ typedef struct nvidia_pte_s {
unsigned int page_count;
} nvidia_pte_t;
typedef struct nv_alloc_s {
struct nv_alloc_s *next;
struct device *dev;
@@ -1413,34 +1429,6 @@ struct os_wait_queue {
struct completion q;
};
/*
* To report error in msi/msix when unhandled count reaches a threshold
*/
@@ -1464,19 +1452,6 @@ struct nv_dma_device {
NvBool nvlink;
};
/* linux-specific version of old nv_state_t */
/* this is a general os-specific state structure. the first element *must* be
the general state structure, for the generic unix-based code */
@@ -1492,11 +1467,6 @@ typedef struct nv_linux_state_s {
/* IBM-NPU info associated with this GPU */
nv_ibmnpu_info_t *npu;
/* NUMA node information for the platforms where GPU memory is presented
* as a NUMA node to the kernel */
struct {
@@ -1576,23 +1546,6 @@ typedef struct nv_linux_state_s {
/* Per-device notifier block for ACPI events */
struct notifier_block acpi_nb;
/* Lock serializing ISRs for different SOC vectors */
nv_spinlock_t soc_isr_lock;
@@ -1760,12 +1713,10 @@ static inline struct kmem_cache *nv_kmem_cache_create(const char *name, unsigned
return cache;
}
#if defined(CONFIG_PCI_IOV)
#define NV_PCI_SRIOV_SUPPORT
#endif /* CONFIG_PCI_IOV */
#define NV_PCIE_CFG_MAX_OFFSET 0x1000
#include "nv-proto.h"
@@ -1959,11 +1910,6 @@ static inline NvU32 nv_default_irq_flags(nv_state_t *nv)
NvS32 nv_request_soc_irq(nv_linux_state_t *, NvU32, nv_soc_irq_type_t, NvU32, NvU32);
static inline void nv_mutex_destroy(struct mutex *lock)
{
mutex_destroy(lock);

View File

@@ -47,55 +47,37 @@ typedef int vm_fault_t;
*
*/
#if defined(NV_GET_USER_PAGES_HAS_TASK_STRUCT)
#if defined(NV_GET_USER_PAGES_HAS_WRITE_AND_FORCE_ARGS)
#define NV_GET_USER_PAGES(start, nr_pages, write, force, pages, vmas) \
get_user_pages(current, current->mm, start, nr_pages, write, force, pages, vmas)
#else
#include <linux/mm.h>
#include <linux/sched.h>
static inline long NV_GET_USER_PAGES(unsigned long start,
unsigned long nr_pages,
int write,
int force,
struct page **pages,
struct vm_area_struct **vmas)
{
unsigned int flags = 0;
if (write)
flags |= FOLL_WRITE;
if (force)
flags |= FOLL_FORCE;
return get_user_pages(current, current->mm, start, nr_pages, flags,
pages, vmas);
}
#endif
#if defined(NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE)
#define NV_GET_USER_PAGES get_user_pages
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE)
#define NV_GET_USER_PAGES(start, nr_pages, write, force, pages, vmas) \
get_user_pages(current, current->mm, start, nr_pages, write, force, pages, vmas)
#else
#if defined(NV_GET_USER_PAGES_HAS_WRITE_AND_FORCE_ARGS)
#define NV_GET_USER_PAGES get_user_pages
#include <linux/mm.h>
#include <linux/sched.h>
static inline long NV_GET_USER_PAGES(unsigned long start,
unsigned long nr_pages,
int write,
int force,
struct page **pages,
struct vm_area_struct **vmas)
{
unsigned int flags = 0;
if (write)
flags |= FOLL_WRITE;
if (force)
flags |= FOLL_FORCE;
#if defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS)
return get_user_pages(current, current->mm, start, nr_pages, flags,
pages, vmas);
#else
#include <linux/mm.h>
static inline long NV_GET_USER_PAGES(unsigned long start,
unsigned long nr_pages,
int write,
int force,
struct page **pages,
struct vm_area_struct **vmas)
{
unsigned int flags = 0;
if (write)
flags |= FOLL_WRITE;
if (force)
flags |= FOLL_FORCE;
return get_user_pages(start, nr_pages, flags, pages, vmas);
}
// remaining defination(NV_GET_USER_PAGES_HAS_ARGS_FLAGS)
return get_user_pages(start, nr_pages, flags, pages, vmas);
#endif
}
#endif
/*
@@ -131,7 +113,7 @@ typedef int vm_fault_t;
*/
#if defined(NV_GET_USER_PAGES_REMOTE_PRESENT)
#if defined(NV_GET_USER_PAGES_REMOTE_HAS_WRITE_AND_FORCE_ARGS)
#if defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE)
#define NV_GET_USER_PAGES_REMOTE get_user_pages_remote
#else
static inline long NV_GET_USER_PAGES_REMOTE(struct task_struct *tsk,
@@ -150,26 +132,21 @@ typedef int vm_fault_t;
if (force)
flags |= FOLL_FORCE;
#if defined(NV_GET_USER_PAGES_REMOTE_HAS_LOCKED_ARG)
#if defined (NV_GET_USER_PAGES_REMOTE_HAS_TSK_ARG)
return get_user_pages_remote(tsk, mm, start, nr_pages, flags,
pages, vmas, NULL);
#else
return get_user_pages_remote(mm, start, nr_pages, flags,
pages, vmas, NULL);
#endif
#if defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS)
return get_user_pages_remote(tsk, mm, start, nr_pages, flags,
pages, vmas);
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED)
return get_user_pages_remote(tsk, mm, start, nr_pages, flags,
pages, vmas, NULL);
#else
return get_user_pages_remote(tsk, mm, start, nr_pages, flags,
pages, vmas);
// remaining defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED)
return get_user_pages_remote(mm, start, nr_pages, flags,
pages, vmas, NULL);
#endif
}
#endif
#else
#if defined(NV_GET_USER_PAGES_HAS_WRITE_AND_FORCE_ARGS)
#if defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE)
#define NV_GET_USER_PAGES_REMOTE get_user_pages
#else
#include <linux/mm.h>

View File

@@ -27,9 +27,6 @@
#include "nv-pci.h"
#include "nv-register-module.h"
extern const char *nv_device_name;
extern nvidia_module_t nv_fops;

View File

@@ -47,11 +47,6 @@ extern nv_cap_t *nvidia_caps_root;
extern const NvBool nv_is_rm_firmware_supported_os;
#include <nv-kernel-interface-api.h>
/* NVIDIA's reserved major character device number (Linux). */
@@ -286,6 +281,7 @@ typedef struct nv_usermap_access_params_s
NvU64 access_size;
NvU64 remap_prot_extra;
NvBool contig;
NvU32 caching;
} nv_usermap_access_params_t;
/*
@@ -303,6 +299,7 @@ typedef struct nv_alloc_mapping_context_s {
NvU64 remap_prot_extra;
NvU32 prot;
NvBool valid;
NvU32 caching;
} nv_alloc_mapping_context_t;
typedef enum
@@ -331,6 +328,9 @@ typedef struct nv_soc_irq_info_s {
#define NV_MAX_DPAUX_NUM_DEVICES 4
#define NV_MAX_SOC_DPAUX_NUM_DEVICES 2 // From SOC_DEV_MAPPING
#define NV_IGPU_LEGACY_STALL_IRQ 70
#define NV_IGPU_MAX_STALL_IRQS 3
#define NV_IGPU_MAX_NONSTALL_IRQS 1
/*
* per device state
*/
@@ -367,6 +367,7 @@ typedef struct nv_state_t
nv_aperture_t *hdacodec_regs;
nv_aperture_t *mipical_regs;
nv_aperture_t *fb, ud;
nv_aperture_t *simregs;
NvU32 num_dpaux_instance;
NvU32 interrupt_line;
@@ -379,6 +380,11 @@ typedef struct nv_state_t
NvU32 soc_dcb_size;
NvU32 disp_sw_soc_chip_id;
NvU32 igpu_stall_irq[NV_IGPU_MAX_STALL_IRQS];
NvU32 igpu_nonstall_irq;
NvU32 num_stall_irqs;
NvU64 dma_mask;
NvBool primary_vga;
NvU32 sim_env;
@@ -456,6 +462,9 @@ typedef struct nv_state_t
NvBool printed_openrm_enable_unsupported_gpus_error;
/* Check if NVPCF DSM function is implemented under NVPCF or GPU device scope */
NvBool nvpcf_dsm_in_gpu_scope;
} nv_state_t;
// These define need to be in sync with defines in system.h
@@ -520,7 +529,7 @@ typedef NV_STATUS (*nvPmaEvictRangeCallback)(void *, NvU64, NvU64);
#define NV_FLAG_USES_MSIX 0x0040
#define NV_FLAG_PASSTHRU 0x0080
#define NV_FLAG_SUSPENDED 0x0100
// Unused 0x0200
#define NV_FLAG_SOC_IGPU 0x0200
// Unused 0x0400
#define NV_FLAG_PERSISTENT_SW_STATE 0x0800
#define NV_FLAG_IN_RECOVERY 0x1000
@@ -569,6 +578,9 @@ typedef enum
#define NV_IS_SOC_DISPLAY_DEVICE(nv) \
((nv)->flags & NV_FLAG_SOC_DISPLAY)
#define NV_IS_SOC_IGPU_DEVICE(nv) \
((nv)->flags & NV_FLAG_SOC_IGPU)
#define NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv) \
(((nv)->flags & NV_FLAG_IN_SURPRISE_REMOVAL) != 0)
@@ -627,18 +639,12 @@ typedef enum
static inline NvBool IS_REG_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
{
return ((offset >= nv->regs->cpu_address) &&
((offset + (length - 1)) <= (nv->regs->cpu_address + (nv->regs->size - 1))));
}
static inline NvBool IS_FB_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
{
return ((nv->fb) && (offset >= nv->fb->cpu_address) &&
((offset + (length - 1)) <= (nv->fb->cpu_address + (nv->fb->size - 1))));
}
@@ -646,9 +652,6 @@ static inline NvBool IS_UD_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
{
return ((nv->ud.cpu_address != 0) && (nv->ud.size != 0) &&
(offset >= nv->ud.cpu_address) &&
((offset + (length - 1)) <= (nv->ud.cpu_address + (nv->ud.size - 1))));
}
@@ -657,9 +660,6 @@ static inline NvBool IS_IMEM_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
return ((nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address != 0) &&
(nv->bars[NV_GPU_BAR_INDEX_IMEM].size != 0) &&
(offset >= nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address) &&
((offset + (length - 1)) <= (nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address +
(nv->bars[NV_GPU_BAR_INDEX_IMEM].size - 1))));
}
@@ -799,7 +799,7 @@ void NV_API_CALL nv_put_firmware(const void *);
nv_file_private_t* NV_API_CALL nv_get_file_private(NvS32, NvBool, void **);
void NV_API_CALL nv_put_file_private(void *);
NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU32 *, NvU32 *, NvS32 *);
NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU32 *, NvS32 *);
NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *, NvU64 *, NvU64 *, void**);
NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv, NvBool *mode);
@@ -850,11 +850,9 @@ void NV_API_CALL nv_dma_release_dma_buf (void *, nv_dma_buf_t *);
void NV_API_CALL nv_schedule_uvm_isr (nv_state_t *);
NvBool NV_API_CALL nv_platform_supports_s0ix (void);
NvBool NV_API_CALL nv_s2idle_pm_configured (void);
NvBool NV_API_CALL nv_is_chassis_notebook (void);
void NV_API_CALL nv_allow_runtime_suspend (nv_state_t *nv);
void NV_API_CALL nv_disallow_runtime_suspend (nv_state_t *nv);
@@ -864,45 +862,6 @@ typedef void (*nvTegraDceClientIpcCallback)(NvU32, NvU32, NvU32, void *, void *)
NV_STATUS NV_API_CALL nv_get_num_phys_pages (void *, NvU32 *);
NV_STATUS NV_API_CALL nv_get_phys_pages (void *, void *, NvU32 *);
/*
* ---------------------------------------------------------------------------
*
@@ -1019,7 +978,7 @@ void NV_API_CALL rm_acpi_notify(nvidia_stack_t *, nv_state_t *, NvU32);
NV_STATUS NV_API_CALL rm_get_clientnvpcf_power_limits(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *);
/* vGPU VFIO specific functions */
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32);
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32, NvBool *);
NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16);
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 **, NvBool);
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8);
@@ -1045,18 +1004,6 @@ static inline const NvU8 *nv_get_cached_uuid(nv_state_t *nv)
return nv->nv_uuid_cache.valid ? nv->nv_uuid_cache.uuid : NULL;
}
#if defined(NVCPU_X86_64)
static inline NvU64 nv_rdtsc(void)

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -201,7 +201,8 @@ void nvUvmInterfaceAddressSpaceDestroy(uvmGpuAddressSpaceHandle vaSpace);
and will return a unique GPU virtual address.
The default page size will be the small page size (as returned by query
caps). The Alignment will also be enforced to small page size(64K/128K).
caps). The physical alignment will also be enforced to small page
size(64K/128K).
Arguments:
vaSpace[IN] - Pointer to vaSpace object
@@ -211,15 +212,15 @@ void nvUvmInterfaceAddressSpaceDestroy(uvmGpuAddressSpaceHandle vaSpace);
contains below given fields
allocInfo Members:
rangeBegin[IN] - Allocation will be made between rangeBegin
rangeEnd[IN] and rangeEnd(both inclusive). Default will be
no-range limitation.
gpuPhysOffset[OUT] - Physical offset of allocation returned only
if contiguous allocation is requested.
pageSize[IN] - Override the default page size (see above).
alignment[IN] - gpuPointer GPU VA alignment. 0 means 4KB
alignment.
bContiguousPhysAlloc[IN] - Flag to request contiguous allocation. Default
will follow the vidHeapControl default policy.
bHandleProvided [IN] - Flag to signify that the client has provided
the handle for phys allocation.
bMemGrowsDown[IN]
bPersistentVidmem[IN] - Allocate persistent vidmem.
hPhysHandle[IN/OUT] - The handle will be used in allocation if provided.
If not provided; allocator will return the handle
it used eventually.
@@ -247,7 +248,6 @@ NV_STATUS nvUvmInterfaceMemoryAllocFB(uvmGpuAddressSpaceHandle vaSpace,
and will return a unique GPU virtual address.
The default page size will be the small page size (as returned by query caps)
The Alignment will also be enforced to small page size.
Arguments:
vaSpace[IN] - Pointer to vaSpace object
@@ -257,15 +257,15 @@ NV_STATUS nvUvmInterfaceMemoryAllocFB(uvmGpuAddressSpaceHandle vaSpace,
contains below given fields
allocInfo Members:
rangeBegin[IN] - Allocation will be made between rangeBegin
rangeEnd[IN] and rangeEnd(both inclusive). Default will be
no-range limitation.
gpuPhysOffset[OUT] - Physical offset of allocation returned only
if contiguous allocation is requested.
pageSize[IN] - Override the default page size (see above).
alignment[IN] - gpuPointer GPU VA alignment. 0 means 4KB
alignment.
bContiguousPhysAlloc[IN] - Flag to request contiguous allocation. Default
will follow the vidHeapControl default policy.
bHandleProvided [IN] - Flag to signify that the client has provided
the handle for phys allocation.
bMemGrowsDown[IN]
bPersistentVidmem[IN] - Allocate persistent vidmem.
hPhysHandle[IN/OUT] - The handle will be used in allocation if provided.
If not provided; allocator will return the handle
it used eventually.
@@ -671,14 +671,16 @@ NV_STATUS nvUvmInterfaceUnsetPageDirectory(uvmGpuAddressSpaceHandle vaSpace);
For duplication of physical memory use nvUvmInterfaceDupMemory.
Arguments:
srcVaSpace[IN] - Source VA space.
srcAddress[IN] - GPU VA in the source VA space. The provided address
should match one previously returned by
nvUvmInterfaceMemoryAllocFB or
nvUvmInterfaceMemoryAllocSys.
dstVaSpace[IN] - Destination VA space where the new mapping will be
created.
dstAddress[OUT] - Pointer to the GPU VA in the destination VA space.
srcVaSpace[IN] - Source VA space.
srcAddress[IN] - GPU VA in the source VA space. The provided address
should match one previously returned by
nvUvmInterfaceMemoryAllocFB or
nvUvmInterfaceMemoryAllocSys.
dstVaSpace[IN] - Destination VA space where the new mapping will be
created.
dstVaAlignment[IN] - Alignment of the GPU VA in the destination VA
space. 0 means 4KB alignment.
dstAddress[OUT] - Pointer to the GPU VA in the destination VA space.
Error codes:
NV_ERR_INVALID_ARGUMENT - If any of the inputs is invalid, or the source
@@ -692,6 +694,7 @@ NV_STATUS nvUvmInterfaceUnsetPageDirectory(uvmGpuAddressSpaceHandle vaSpace);
NV_STATUS nvUvmInterfaceDupAllocation(uvmGpuAddressSpaceHandle srcVaSpace,
NvU64 srcAddress,
uvmGpuAddressSpaceHandle dstVaSpace,
NvU64 dstVaAlignment,
NvU64 *dstAddress);
/*******************************************************************************
@@ -1068,10 +1071,6 @@ void nvUvmInterfaceP2pObjectDestroy(uvmGpuSessionHandle session,
NV_ERR_NOT_READY - Returned when querying the PTEs requires a deferred setup
which has not yet completed. It is expected that the caller
will reattempt the call until a different code is returned.
*/
NV_STATUS nvUvmInterfaceGetExternalAllocPtes(uvmGpuAddressSpaceHandle vaSpace,
NvHandle hMemory,
@@ -1260,7 +1259,7 @@ void nvUvmInterfacePagingChannelDestroy(UvmGpuPagingChannelHandle channel);
device[IN] - device under which paging channels were allocated
dstAddress[OUT] - a virtual address that is valid (i.e. is mapped) in
all the paging channels allocated under the given vaSpace.
Error codes:
NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed.
NV_ERR_NOT_SUPPORTED - SR-IOV heavy mode is disabled.
@@ -1373,7 +1372,7 @@ void nvUvmInterfacePagingChannelsUnmap(uvmGpuAddressSpaceHandle srcVaSpace,
methodStreamSize[IN] - Size of methodStream, in bytes. The maximum push
size is 128KB.
Error codes:
NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed.
NV_ERR_NOT_SUPPORTED - SR-IOV heavy mode is disabled.
@@ -1382,136 +1381,4 @@ NV_STATUS nvUvmInterfacePagingChannelPushStream(UvmGpuPagingChannelHandle channe
char *methodStream,
NvU32 methodStreamSize);
#endif // _NV_UVM_INTERFACE_H_

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -217,12 +217,6 @@ typedef struct UvmGpuChannelInstanceInfo_tag
// Out: Type of the engine the channel is bound to
NvU32 channelEngineType;
// Out: Channel handle required to ring the doorbell
NvU32 workSubmissionToken;
// Out: Address of the doorbell
volatile NvU32 *workSubmissionOffset;
// Out: Channel handle to be used in the CLEAR_FAULTED method
NvU32 clearFaultedToken;
@@ -231,6 +225,10 @@ typedef struct UvmGpuChannelInstanceInfo_tag
// Ampere+ GPUs
volatile NvU32 *pChramChannelRegister;
// Out: Address of the Runlist PRI Base Register required to ring the
// doorbell after clearing the faulted bit.
volatile NvU32 *pRunlistPRIBaseRegister;
// Out: SMC engine id to which the GR channel is bound, or zero if the GPU
// does not support SMC or it is a CE channel
NvU32 smcEngineId;
@@ -372,10 +370,8 @@ typedef enum
UVM_LINK_TYPE_NVLINK_1,
UVM_LINK_TYPE_NVLINK_2,
UVM_LINK_TYPE_NVLINK_3,
UVM_LINK_TYPE_NVLINK_4,
UVM_LINK_TYPE_C2C,
} UVM_LINK_TYPE;
typedef struct UvmGpuCaps_tag
@@ -433,19 +429,13 @@ typedef struct UvmGpuAddressSpaceInfo_tag
typedef struct UvmGpuAllocInfo_tag
{
NvU64 rangeBegin; // Allocation will be made between
NvU64 rangeEnd; // rangeBegin & rangeEnd both included
NvU64 gpuPhysOffset; // Returns gpuPhysOffset if contiguous requested
NvU32 pageSize; // default is RM big page size - 64K or 128 K" else use 4K or 2M
NvU64 alignment; // Alignment of allocation
NvU64 alignment; // Virtual alignment
NvBool bContiguousPhysAlloc; // Flag to request contiguous physical allocation
NvBool bMemGrowsDown; // Causes RM to reserve physical heap from top of FB
NvBool bPersistentVidmem; // Causes RM to allocate persistent video memory
NvHandle hPhysHandle; // Handle for phys allocation either provided or retrieved
} UvmGpuAllocInfo;
typedef enum
@@ -576,10 +566,8 @@ typedef struct UvmPlatformInfo_tag
// Out: ATS (Address Translation Services) is supported
NvBool atsSupported;
// Out: AMD SEV (Secure Encrypted Virtualization) is enabled
NvBool sevEnabled;
} UvmPlatformInfo;
typedef struct UvmGpuClientInfo_tag
@@ -589,24 +577,6 @@ typedef struct UvmGpuClientInfo_tag
NvHandle hSmcPartRef;
} UvmGpuClientInfo;
#define UVM_GPU_NAME_LENGTH 0x40
typedef struct UvmGpuInfo_tag
@@ -671,10 +641,6 @@ typedef struct UvmGpuInfo_tag
UvmGpuClientInfo smcUserClientInfo;
} UvmGpuInfo;
typedef struct UvmGpuFbInfo_tag
@@ -717,11 +683,6 @@ typedef struct UvmPmaStatistics_tag
volatile NvU64 numPages2m; // PMA-wide 2MB pages count across all regions
volatile NvU64 numFreePages64k; // PMA-wide free 64KB page count across all regions
volatile NvU64 numFreePages2m; // PMA-wide free 2MB pages count across all regions
} UvmPmaStatistics;
/*******************************************************************************
@@ -961,10 +922,4 @@ typedef UvmGpuPagingChannelInfo gpuPagingChannelInfo;
typedef UvmGpuPagingChannelAllocParams gpuPagingChannelAllocParams;
typedef UvmPmaAllocationOptions gpuPmaAllocationOptions;
#endif // _NV_UVM_TYPES_H_

View File

@@ -416,6 +416,12 @@ struct NvKmsKapiCreateSurfaceParams {
NvU8 log2GobsPerBlockY;
};
enum NvKmsKapiAllocationType {
NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT = 0,
NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER = 1,
NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN = 2,
};
struct NvKmsKapiFunctionsTable {
/*!
@@ -609,6 +615,8 @@ struct NvKmsKapiFunctionsTable {
* \param [in] device A device allocated using allocateDevice().
*
* \param [in] layout BlockLinear or Pitch.
*
* \param [in] type Allocation type.
*
* \param [in] size Size, in bytes, of the memory to allocate.
*
@@ -624,6 +632,7 @@ struct NvKmsKapiFunctionsTable {
(
struct NvKmsKapiDevice *device,
enum NvKmsSurfaceMemoryLayout layout,
enum NvKmsKapiAllocationType type,
NvU64 size,
NvU8 *compressible
);
@@ -637,6 +646,8 @@ struct NvKmsKapiFunctionsTable {
* \param [in] device A device allocated using allocateDevice().
*
* \param [in] layout BlockLinear or Pitch.
*
* \param [in] type Allocation type.
*
* \param [in] size Size, in bytes, of the memory to allocate.
*
@@ -652,6 +663,7 @@ struct NvKmsKapiFunctionsTable {
(
struct NvKmsKapiDevice *device,
enum NvKmsSurfaceMemoryLayout layout,
enum NvKmsKapiAllocationType type,
NvU64 size,
NvU8 *compressible
);

View File

@@ -31,13 +31,6 @@
/*
* This is the maximum number of GPUs supported in a single system.
*/

View File

@@ -125,6 +125,7 @@ NvU32 NV_API_CALL os_get_cpu_number (void);
void NV_API_CALL os_disable_console_access (void);
void NV_API_CALL os_enable_console_access (void);
NV_STATUS NV_API_CALL os_registry_init (void);
NvU64 NV_API_CALL os_get_max_user_va (void);
NV_STATUS NV_API_CALL os_schedule (void);
NV_STATUS NV_API_CALL os_alloc_spinlock (void **);
void NV_API_CALL os_free_spinlock (void *);
@@ -193,19 +194,12 @@ void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *);
int NV_API_CALL os_nv_cap_validate_and_dup_fd(const nv_cap_t *, int);
void NV_API_CALL os_nv_cap_close_fd (int);
enum os_pci_req_atomics_type {
OS_INTF_PCIE_REQ_ATOMICS_32BIT,
OS_INTF_PCIE_REQ_ATOMICS_64BIT,
OS_INTF_PCIE_REQ_ATOMICS_128BIT
};
NV_STATUS NV_API_CALL os_enable_pci_req_atomics (void *, enum os_pci_req_atomics_type);
extern NvU32 os_page_size;
extern NvU64 os_page_mask;
@@ -245,11 +239,4 @@ int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...);
#define NV_LOCK_USER_PAGES_FLAGS_WRITE_NO 0x00000000
#define NV_LOCK_USER_PAGES_FLAGS_WRITE_YES 0x00000001
#endif /* OS_INTERFACE_H */

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -63,7 +63,7 @@ NV_STATUS NV_API_CALL rm_gpu_ops_query_caps(nvidia_stack_t *, nvgpuDeviceHandle
NV_STATUS NV_API_CALL rm_gpu_ops_query_ces_caps(nvidia_stack_t *sp, nvgpuDeviceHandle_t, nvgpuCesCaps_t);
NV_STATUS NV_API_CALL rm_gpu_ops_get_gpu_info(nvidia_stack_t *, const NvProcessorUuid *pUuid, const nvgpuClientInfo_t *, nvgpuInfo_t *);
NV_STATUS NV_API_CALL rm_gpu_ops_service_device_interrupts_rm(nvidia_stack_t *, nvgpuDeviceHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_dup_allocation(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuAddressSpaceHandle_t, NvU64 *);
NV_STATUS NV_API_CALL rm_gpu_ops_dup_allocation(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuAddressSpaceHandle_t, NvU64, NvU64 *);
NV_STATUS NV_API_CALL rm_gpu_ops_dup_memory (nvidia_stack_t *, nvgpuDeviceHandle_t, NvHandle, NvHandle, NvHandle *, nvgpuMemoryInfo_t);
@@ -98,13 +98,4 @@ NV_STATUS NV_API_CALL rm_gpu_ops_paging_channels_map(nvidia_stack_t *, nvgpuAdd
void NV_API_CALL rm_gpu_ops_paging_channels_unmap(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuDeviceHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_paging_channel_push_stream(nvidia_stack_t *, nvgpuPagingChannelHandle_t, char *, NvU32);
#endif