mirror of
https://github.com/NVIDIA/open-gpu-kernel-modules.git
synced 2026-01-27 03:29:47 +00:00
580.65.06
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -49,6 +49,8 @@ typedef enum
|
||||
NV_FIRMWARE_CHIP_FAMILY_GH100 = 6,
|
||||
NV_FIRMWARE_CHIP_FAMILY_GB10X = 8,
|
||||
NV_FIRMWARE_CHIP_FAMILY_GB20X = 9,
|
||||
NV_FIRMWARE_CHIP_FAMILY_GB10Y = 11,
|
||||
NV_FIRMWARE_CHIP_FAMILY_GB20Y = 12,
|
||||
NV_FIRMWARE_CHIP_FAMILY_END,
|
||||
} nv_firmware_chip_family_t;
|
||||
|
||||
@@ -58,6 +60,8 @@ static inline const char *nv_firmware_chip_family_to_string(
|
||||
{
|
||||
switch (fw_chip_family) {
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GB10X: return "gb10x";
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GB10Y: return "gb10y";
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GB20Y: return "gb20y";
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GB20X: return "gb20x";
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GH100: return "gh100";
|
||||
case NV_FIRMWARE_CHIP_FAMILY_AD10X: return "ad10x";
|
||||
@@ -68,9 +72,9 @@ static inline const char *nv_firmware_chip_family_to_string(
|
||||
|
||||
case NV_FIRMWARE_CHIP_FAMILY_END: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_NULL:
|
||||
return NULL;
|
||||
return "";
|
||||
}
|
||||
return NULL;
|
||||
return "";
|
||||
}
|
||||
|
||||
// The includer may optionally define
|
||||
@@ -89,6 +93,8 @@ static inline const char *nv_firmware_for_chip_family(
|
||||
switch (fw_chip_family)
|
||||
{
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GB10X: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GB10Y: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GB20Y: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GB20X: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
|
||||
@@ -110,6 +116,8 @@ static inline const char *nv_firmware_for_chip_family(
|
||||
switch (fw_chip_family)
|
||||
{
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GB10X: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GB10Y: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GB20Y: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GB20X: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
|
||||
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
|
||||
|
||||
@@ -29,17 +29,9 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/hash.h>
|
||||
|
||||
#if defined(NV_LINUX_STRINGHASH_H_PRESENT)
|
||||
#include <linux/stringhash.h> /* full_name_hash() */
|
||||
#else
|
||||
#include <linux/dcache.h>
|
||||
#endif
|
||||
|
||||
#if (NV_FULL_NAME_HASH_ARGUMENT_COUNT == 3)
|
||||
#define nv_string_hash(_str) full_name_hash(NULL, _str, strlen(_str))
|
||||
#else
|
||||
#define nv_string_hash(_str) full_name_hash(_str, strlen(_str))
|
||||
#endif
|
||||
|
||||
/**
|
||||
* This naive hashtable was introduced by commit d9b482c8ba19 (v3.7, 2012-10-31).
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -140,6 +140,7 @@ typedef struct nv_ioctl_export_to_dma_buf_fd
|
||||
NvU32 index;
|
||||
NvU64 totalSize NV_ALIGN_BYTES(8);
|
||||
NvU8 mappingType;
|
||||
NvBool bAllowMmap;
|
||||
NvHandle handles[NV_DMABUF_EXPORT_MAX_HANDLES];
|
||||
NvU64 offsets[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8);
|
||||
NvU64 sizes[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8);
|
||||
|
||||
@@ -57,9 +57,7 @@
|
||||
#include <linux/version.h>
|
||||
#include <linux/utsname.h>
|
||||
|
||||
#if LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 0)
|
||||
// Version 4.4 is allowed, temporarily, although not officially supported.
|
||||
#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
|
||||
#error "This driver does not support kernels older than Linux 4.15!"
|
||||
#endif
|
||||
|
||||
@@ -78,16 +76,6 @@
|
||||
|
||||
#include <linux/mm.h>
|
||||
|
||||
#if !defined(VM_RESERVED)
|
||||
#define VM_RESERVED 0x00000000
|
||||
#endif
|
||||
#if !defined(VM_DONTEXPAND)
|
||||
#define VM_DONTEXPAND 0x00000000
|
||||
#endif
|
||||
#if !defined(VM_DONTDUMP)
|
||||
#define VM_DONTDUMP 0x00000000
|
||||
#endif
|
||||
|
||||
#include <linux/init.h> /* module_init, module_exit */
|
||||
#include <linux/types.h> /* pic_t, size_t, __u32, etc */
|
||||
#include <linux/errno.h> /* error codes */
|
||||
@@ -115,38 +103,20 @@
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
#if defined(NV_DRM_DRM_DEVICE_H_PRESENT)
|
||||
#include <drm/drm_device.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
|
||||
#include <drm/drm_drv.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_DRMP_H_PRESENT)
|
||||
#include <drm/drmP.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_DRM_GEM_H_PRESENT)
|
||||
#include <drm/drm_gem.h>
|
||||
#endif
|
||||
#endif /* NV_DRM_AVAILABLE */
|
||||
|
||||
/*
|
||||
* sched.h was refactored with this commit (as part of Linux 4.11)
|
||||
* 2017-03-03 1827adb11ad26b2290dc9fe2aaf54976b2439865
|
||||
*/
|
||||
#if defined(NV_LINUX_SCHED_SIGNAL_H_PRESENT)
|
||||
#include <linux/sched/signal.h> /* task_lock(), task_unlock() */
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX_SCHED_TASK_H_PRESENT)
|
||||
#include <linux/sched/task.h> /* task_lock(), task_unlock() */
|
||||
#endif
|
||||
|
||||
/* task and signal-related items, for kernels < 4.11: */
|
||||
#include <linux/sched.h> /* task_lock(), task_unlock() */
|
||||
|
||||
/* task and signal-related items */
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/sched/task.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/moduleparam.h> /* module_param() */
|
||||
#include <asm/tlbflush.h> /* flush_tlb(), flush_tlb_all() */
|
||||
|
||||
@@ -169,10 +139,7 @@
|
||||
#include <asm/page.h> /* PAGE_OFFSET */
|
||||
#include <asm/pgtable.h> /* pte bit definitions */
|
||||
#include <asm/bitops.h> /* __set_bit() */
|
||||
|
||||
#if defined(NV_LINUX_TIME_H_PRESENT)
|
||||
#include <linux/time.h> /* FD_SET() */
|
||||
#endif
|
||||
|
||||
#include "nv-list-helpers.h"
|
||||
|
||||
@@ -211,11 +178,7 @@
|
||||
|
||||
#include <linux/workqueue.h> /* workqueue */
|
||||
#include "nv-kthread-q.h" /* kthread based queue */
|
||||
|
||||
#if defined(NV_LINUX_EFI_H_PRESENT)
|
||||
#include <linux/efi.h> /* efi_enabled */
|
||||
#endif
|
||||
|
||||
#include <linux/fb.h> /* fb_info struct */
|
||||
#include <linux/screen_info.h> /* screen_info */
|
||||
|
||||
@@ -315,65 +278,11 @@ extern int nv_pat_mode;
|
||||
#define NV_CONFIG_PREEMPT_RT 1
|
||||
#endif
|
||||
|
||||
#if defined(NV_WRITE_CR4_PRESENT)
|
||||
#define NV_READ_CR4() read_cr4()
|
||||
#define NV_WRITE_CR4(cr4) write_cr4(cr4)
|
||||
#else
|
||||
#define NV_READ_CR4() __read_cr4()
|
||||
#define NV_WRITE_CR4(cr4) __write_cr4(cr4)
|
||||
#endif
|
||||
|
||||
#ifndef get_cpu
|
||||
#define get_cpu() smp_processor_id()
|
||||
#define put_cpu()
|
||||
#endif
|
||||
|
||||
#if !defined(unregister_hotcpu_notifier)
|
||||
#define unregister_hotcpu_notifier unregister_cpu_notifier
|
||||
#endif
|
||||
#if !defined(register_hotcpu_notifier)
|
||||
#define register_hotcpu_notifier register_cpu_notifier
|
||||
#endif
|
||||
|
||||
#if defined(NVCPU_X86_64)
|
||||
#if !defined(pmd_large)
|
||||
#define pmd_large(_pmd) \
|
||||
((pmd_val(_pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
|
||||
#endif
|
||||
#endif /* defined(NVCPU_X86_64) */
|
||||
|
||||
#define NV_PAGE_COUNT(page) \
|
||||
((unsigned int)page_count(page))
|
||||
#define NV_GET_PAGE_FLAGS(page_ptr) \
|
||||
(NV_GET_PAGE_STRUCT(page_ptr->phys_addr)->flags)
|
||||
|
||||
/*
|
||||
* Before the introduction of VM_PFNMAP, there was an VM_UNPAGED flag.
|
||||
* Drivers which wanted to call remap_pfn_range on normal pages had to use this
|
||||
* VM_UNPAGED flag *and* set PageReserved. With the introduction of VM_PFNMAP,
|
||||
* that restriction went away. This is described in commit
|
||||
*
|
||||
* 2005-10-28 6aab341e0a28aff100a09831c5300a2994b8b986
|
||||
* ("mm: re-architect the VM_UNPAGED logic")
|
||||
*
|
||||
* , which added VM_PFNMAP and vm_normal_page. Therefore, if VM_PFNMAP is
|
||||
* defined, then we do *not* need to mark a page as reserved, in order to
|
||||
* call remap_pfn_range().
|
||||
*/
|
||||
#if !defined(VM_PFNMAP)
|
||||
#define NV_MAYBE_RESERVE_PAGE(ptr_ptr) \
|
||||
SetPageReserved(NV_GET_PAGE_STRUCT(page_ptr->phys_addr))
|
||||
#define NV_MAYBE_UNRESERVE_PAGE(page_ptr) \
|
||||
ClearPageReserved(NV_GET_PAGE_STRUCT(page_ptr->phys_addr))
|
||||
#else
|
||||
#define NV_MAYBE_RESERVE_PAGE(ptr_ptr)
|
||||
#define NV_MAYBE_UNRESERVE_PAGE(page_ptr)
|
||||
#endif /* defined(VM_PFNMAP) */
|
||||
|
||||
#if !defined(__GFP_COMP)
|
||||
#define __GFP_COMP 0
|
||||
#endif
|
||||
|
||||
#if !defined(DEBUG) && defined(__GFP_NOWARN)
|
||||
#define NV_GFP_KERNEL (GFP_KERNEL | __GFP_NOWARN)
|
||||
#define NV_GFP_ATOMIC (GFP_ATOMIC | __GFP_NOWARN)
|
||||
@@ -394,14 +303,6 @@ extern int nv_pat_mode;
|
||||
#define NV_GFP_DMA32 (NV_GFP_KERNEL)
|
||||
#endif
|
||||
|
||||
typedef enum
|
||||
{
|
||||
NV_MEMORY_TYPE_SYSTEM, /* Memory mapped for ROM, SBIOS and physical RAM. */
|
||||
NV_MEMORY_TYPE_REGISTERS,
|
||||
NV_MEMORY_TYPE_FRAMEBUFFER,
|
||||
NV_MEMORY_TYPE_DEVICE_MMIO, /* All kinds of MMIO referred by NVRM e.g. BARs and MCFG of device */
|
||||
} nv_memory_type_t;
|
||||
|
||||
#if defined(NVCPU_AARCH64) || defined(NVCPU_RISCV64)
|
||||
#define NV_ALLOW_WRITE_COMBINING(mt) 1
|
||||
#elif defined(NVCPU_X86_64)
|
||||
@@ -414,10 +315,6 @@ typedef enum
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined(IRQF_SHARED)
|
||||
#define IRQF_SHARED SA_SHIRQ
|
||||
#endif
|
||||
|
||||
#define NV_MAX_RECURRING_WARNING_MESSAGES 10
|
||||
|
||||
/* various memory tracking/debugging techniques
|
||||
@@ -432,6 +329,25 @@ typedef enum
|
||||
#define NV_DBG_MEMINFO NV_DBG_INFO
|
||||
#endif
|
||||
|
||||
// Provides a consistent way for the driver to obtain the maximum page order
|
||||
// Starting with Linux kernel 6.8, MAX_ORDER is renamed to MAX_PAGE_ORDER.
|
||||
#if defined(MAX_PAGE_ORDER)
|
||||
#define NV_MAX_PAGE_ORDER MAX_PAGE_ORDER
|
||||
#else
|
||||
// Linux kernel 6.4.0 changed the meaning of the MAX_ORDER define.
|
||||
// Prior to 6.4.0, MAX_ORDER was defined as the number of orders available -
|
||||
// By default defined at 11, it signals that values between 0 and 10 (inclusive)
|
||||
// are valid order values that the Linux buddy allocator supports.
|
||||
//
|
||||
// Starting with 6.4.0, MAX_ORDER is redefined as the maximum valid order value.
|
||||
// By default defined at 10, it signals that order == 10 is the maximum valid
|
||||
// order value that the Linux buddy allocator supports.
|
||||
//
|
||||
// To smooth interfacing, define NV_MAX_PAGE_ORDER in a safe way even though it might cause
|
||||
// RM to report a smaller than max order value.
|
||||
#define NV_MAX_PAGE_ORDER (MAX_ORDER - 1)
|
||||
#endif // defined(MAX_PAGE_ORDER)
|
||||
|
||||
#define NV_MEM_TRACKING_PAD_SIZE(size) \
|
||||
(size) = NV_ALIGN_UP((size + sizeof(void *)), sizeof(void *))
|
||||
|
||||
@@ -597,11 +513,7 @@ static inline pgprot_t nv_adjust_pgprot(pgprot_t vm_prot)
|
||||
{
|
||||
pgprot_t prot = __pgprot(pgprot_val(vm_prot));
|
||||
|
||||
#if defined(pgprot_decrypted)
|
||||
return pgprot_decrypted(prot);
|
||||
#else
|
||||
return nv_sme_clr(prot);
|
||||
#endif // pgprot_decrypted
|
||||
}
|
||||
|
||||
#if defined(PAGE_KERNEL_NOENC)
|
||||
@@ -616,20 +528,12 @@ static inline pgprot_t nv_adjust_pgprot(pgprot_t vm_prot)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(NV_GET_NUM_PHYSPAGES_PRESENT)
|
||||
#define NV_NUM_PHYSPAGES get_num_physpages()
|
||||
#else
|
||||
#define NV_NUM_PHYSPAGES num_physpages
|
||||
#endif
|
||||
#define NV_GET_CURRENT_PROCESS() current->tgid
|
||||
#define NV_IN_ATOMIC() in_atomic()
|
||||
#define NV_LOCAL_BH_DISABLE() local_bh_disable()
|
||||
#define NV_LOCAL_BH_ENABLE() local_bh_enable()
|
||||
#define NV_COPY_TO_USER(to, from, n) copy_to_user(to, from, n)
|
||||
#define NV_COPY_FROM_USER(to, from, n) copy_from_user(to, from, n)
|
||||
|
||||
#define NV_IS_SUSER() capable(CAP_SYS_ADMIN)
|
||||
#define NV_PCI_DEVICE_NAME(pci_dev) ((pci_dev)->pretty_name)
|
||||
#define NV_CLI() local_irq_disable()
|
||||
#define NV_SAVE_FLAGS(eflags) local_save_flags(eflags)
|
||||
#define NV_RESTORE_FLAGS(eflags) local_irq_restore(eflags)
|
||||
@@ -781,29 +685,9 @@ static inline dma_addr_t nv_phys_to_dma(struct device *dev, NvU64 pa)
|
||||
} \
|
||||
__dev; \
|
||||
})
|
||||
#elif defined(NV_PCI_GET_DOMAIN_BUS_AND_SLOT_PRESENT)
|
||||
#else
|
||||
#define NV_GET_DOMAIN_BUS_AND_SLOT(domain,bus, devfn) \
|
||||
pci_get_domain_bus_and_slot(domain, bus, devfn)
|
||||
#else
|
||||
#define NV_GET_DOMAIN_BUS_AND_SLOT(domain,bus,devfn) \
|
||||
({ \
|
||||
struct pci_dev *__dev = NULL; \
|
||||
while ((__dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, \
|
||||
__dev)) != NULL) \
|
||||
{ \
|
||||
if ((NV_PCI_DOMAIN_NUMBER(__dev) == domain) && \
|
||||
(NV_PCI_BUS_NUMBER(__dev) == bus) && \
|
||||
(NV_PCI_DEVFN(__dev) == devfn)) \
|
||||
{ \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
__dev; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#if defined(NV_PCI_STOP_AND_REMOVE_BUS_DEVICE_PRESENT) // introduced in 3.18-rc1 for aarch64
|
||||
#define NV_PCI_STOP_AND_REMOVE_BUS_DEVICE(pci_dev) pci_stop_and_remove_bus_device(pci_dev)
|
||||
#endif
|
||||
|
||||
#define NV_PRINT_AT(nv_debug_level,at) \
|
||||
@@ -827,17 +711,6 @@ static inline dma_addr_t nv_phys_to_dma(struct device *dev, NvU64 pa)
|
||||
# define minor(x) MINOR(x)
|
||||
#endif
|
||||
|
||||
#if defined(cpu_relax)
|
||||
#define NV_CPU_RELAX() cpu_relax()
|
||||
#else
|
||||
#define NV_CPU_RELAX() barrier()
|
||||
#endif
|
||||
|
||||
#ifndef IRQ_RETVAL
|
||||
typedef void irqreturn_t;
|
||||
#define IRQ_RETVAL(a)
|
||||
#endif
|
||||
|
||||
#if !defined(PCI_COMMAND_SERR)
|
||||
#define PCI_COMMAND_SERR 0x100
|
||||
#endif
|
||||
@@ -892,13 +765,8 @@ static inline vm_fault_t nv_insert_pfn(struct vm_area_struct *vma,
|
||||
return vmf_insert_pfn_prot(vma, virt_addr, pfn,
|
||||
__pgprot(pgprot_val(vma->vm_page_prot)));
|
||||
#else
|
||||
int ret = -EINVAL;
|
||||
#if defined(NV_VM_INSERT_PFN_PROT_PRESENT)
|
||||
ret = vm_insert_pfn_prot(vma, virt_addr, pfn,
|
||||
int ret = vm_insert_pfn_prot(vma, virt_addr, pfn,
|
||||
__pgprot(pgprot_val(vma->vm_page_prot)));
|
||||
#else
|
||||
ret = vm_insert_pfn(vma, virt_addr, pfn);
|
||||
#endif
|
||||
switch (ret)
|
||||
{
|
||||
case 0:
|
||||
@@ -913,8 +781,8 @@ static inline vm_fault_t nv_insert_pfn(struct vm_area_struct *vma,
|
||||
default:
|
||||
break;
|
||||
}
|
||||
#endif /* defined(NV_VMF_INSERT_PFN_PROT_PRESENT) */
|
||||
return VM_FAULT_SIGBUS;
|
||||
#endif /* defined(NV_VMF_INSERT_PFN_PROT_PRESENT) */
|
||||
}
|
||||
|
||||
/* Converts BAR index to Linux specific PCI BAR index */
|
||||
@@ -970,7 +838,7 @@ extern void *nvidia_stack_t_cache;
|
||||
* wait for the timestamp to increment by at least one to ensure that we do
|
||||
* not hit a name conflict in cache create -> destroy (async) -> create cycle.
|
||||
*/
|
||||
#if defined(NV_KMEM_CACHE_HAS_KOBJ_REMOVE_WORK) && !defined(NV_SYSFS_SLAB_UNLINK_PRESENT)
|
||||
#if !defined(NV_SYSFS_SLAB_UNLINK_PRESENT)
|
||||
static inline void nv_kmem_ctor_dummy(void *arg)
|
||||
{
|
||||
(void)arg;
|
||||
@@ -998,7 +866,7 @@ static inline void nv_kmem_ctor_dummy(void *arg)
|
||||
|
||||
static inline void *nv_kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
|
||||
{
|
||||
#if defined(NV_KMEM_CACHE_HAS_KOBJ_REMOVE_WORK) && !defined(NV_SYSFS_SLAB_UNLINK_PRESENT)
|
||||
#if !defined(NV_SYSFS_SLAB_UNLINK_PRESENT)
|
||||
/*
|
||||
* We cannot call kmem_cache_zalloc directly as it adds the __GFP_ZERO
|
||||
* flag. This flag together with the presence of a slab constructor is
|
||||
@@ -1091,6 +959,7 @@ struct nv_dma_buf
|
||||
struct dma_buf *dma_buf;
|
||||
struct dma_buf_attachment *dma_attach;
|
||||
struct sg_table *sgt;
|
||||
enum dma_data_direction direction;
|
||||
};
|
||||
#endif // CONFIG_DMA_SHARED_BUFFER
|
||||
|
||||
@@ -1115,7 +984,7 @@ typedef struct nv_alloc_s {
|
||||
unsigned int num_pages;
|
||||
unsigned int order;
|
||||
unsigned int size;
|
||||
nvidia_pte_t **page_table; /* list of physical pages allocated */
|
||||
nvidia_pte_t *page_table; /* array of physical pages allocated */
|
||||
unsigned int pid;
|
||||
struct page **user_pages;
|
||||
NvU64 guest_id; /* id of guest VM */
|
||||
@@ -1160,14 +1029,6 @@ nv_dma_maps_swiotlb(struct device *dev)
|
||||
{
|
||||
NvBool swiotlb_in_use = NV_FALSE;
|
||||
#if defined(CONFIG_SWIOTLB)
|
||||
#if defined(NV_DMA_OPS_PRESENT) || defined(NV_GET_DMA_OPS_PRESENT) || \
|
||||
defined(NV_SWIOTLB_DMA_OPS_PRESENT)
|
||||
/*
|
||||
* We only use the 'dma_ops' symbol on older x86_64 kernels; later kernels,
|
||||
* including those for other architectures, have converged on the
|
||||
* get_dma_ops() interface.
|
||||
*/
|
||||
#if defined(NV_GET_DMA_OPS_PRESENT)
|
||||
/*
|
||||
* The __attribute__ ((unused)) is necessary because in at least one
|
||||
* case, *none* of the preprocessor branches below are taken, and
|
||||
@@ -1176,57 +1037,47 @@ nv_dma_maps_swiotlb(struct device *dev)
|
||||
* case.
|
||||
*/
|
||||
const struct dma_map_ops *ops __attribute__ ((unused)) = get_dma_ops(dev);
|
||||
#else
|
||||
const struct dma_mapping_ops *ops __attribute__ ((unused)) = dma_ops;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The switch from dma_mapping_ops -> dma_map_ops coincided with the
|
||||
* switch from swiotlb_map_sg -> swiotlb_map_sg_attrs.
|
||||
*/
|
||||
#if defined(NVCPU_AARCH64) && \
|
||||
defined(NV_NONCOHERENT_SWIOTLB_DMA_OPS_PRESENT)
|
||||
/* AArch64 exports these symbols directly */
|
||||
swiotlb_in_use = ((ops == &noncoherent_swiotlb_dma_ops) ||
|
||||
(ops == &coherent_swiotlb_dma_ops));
|
||||
#elif NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_map_sg_attrs != 0
|
||||
#if NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_map_sg_attrs != 0
|
||||
swiotlb_in_use = (ops->map_sg == swiotlb_map_sg_attrs);
|
||||
#elif NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_dma_ops != 0
|
||||
#elif NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_dma_ops != 0
|
||||
swiotlb_in_use = (ops == &swiotlb_dma_ops);
|
||||
#endif
|
||||
/*
|
||||
* The "else" case that is not shown
|
||||
* (for NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_map_sg_attrs == 0 ||
|
||||
* NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_dma_ops == 0) does
|
||||
* nothing, and ends up dropping us out to the last line of this function,
|
||||
* effectively returning false. The nearly-human-readable version of that
|
||||
* case is "struct swiotlb_dma_ops is present (NV_SWIOTLB_DMA_OPS_PRESENT
|
||||
* is defined) but neither swiotlb_map_sg_attrs nor swiotlb_dma_ops is
|
||||
* present".
|
||||
*
|
||||
* That can happen on kernels that fall within below range:
|
||||
*
|
||||
* 2017-12-24 4bd89ed39b2ab8dc4ac4b6c59b07d420b0213bec
|
||||
* ("swiotlb: remove various exports")
|
||||
* 2018-06-28 210d0797c97d0e8f3b1a932a0dc143f4c57008a3
|
||||
* ("swiotlb: export swiotlb_dma_ops")
|
||||
*
|
||||
* Related to this: Between above two commits, this driver has no way of
|
||||
* detecting whether or not the SWIOTLB is in use. Furthermore, the
|
||||
* driver cannot support DMA remapping. That leads to the following
|
||||
* point: "swiotlb=force" is not supported for kernels falling in above
|
||||
* range.
|
||||
*
|
||||
* The other "else" case that is not shown:
|
||||
* Starting with the 5.0 kernel, swiotlb is integrated into dma_direct,
|
||||
* which is used when there's no IOMMU. In these kernels, ops == NULL,
|
||||
* swiotlb_dma_ops no longer exists, and we do not support swiotlb=force
|
||||
* (doing so would require detecting when swiotlb=force is enabled and
|
||||
* then returning NV_TRUE even when dma_direct is in use). So for now,
|
||||
* we just return NV_FALSE and in nv_compute_gfp_mask() we check for
|
||||
* whether swiotlb could possibly be used (outside of swiotlb=force).
|
||||
*/
|
||||
#endif
|
||||
#endif
|
||||
/*
|
||||
* The "else" case that is not shown
|
||||
* (for NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_map_sg_attrs == 0 ||
|
||||
* NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_dma_ops == 0) does
|
||||
* nothing, and ends up dropping us out to the last line of this function,
|
||||
* effectively returning false. The nearly-human-readable version of that
|
||||
* case is "get_dma_ops() is defined, but neither swiotlb_map_sg_attrs
|
||||
* nor swiotlb_dma_ops is present".
|
||||
*
|
||||
* That can happen on kernels that fall within below range:
|
||||
*
|
||||
* 2017-12-24 4bd89ed39b2ab8dc4ac4b6c59b07d420b0213bec
|
||||
* ("swiotlb: remove various exports")
|
||||
* 2018-06-28 210d0797c97d0e8f3b1a932a0dc143f4c57008a3
|
||||
* ("swiotlb: export swiotlb_dma_ops")
|
||||
*
|
||||
* Related to this: Between above two commits, this driver has no way of
|
||||
* detecting whether or not the SWIOTLB is in use. Furthermore, the
|
||||
* driver cannot support DMA remapping. That leads to the following
|
||||
* point: "swiotlb=force" is not supported for kernels falling in above
|
||||
* range.
|
||||
*
|
||||
* The other "else" case that is not shown:
|
||||
* Starting with the 5.0 kernel, swiotlb is integrated into dma_direct,
|
||||
* which is used when there's no IOMMU. In these kernels, ops == NULL,
|
||||
* swiotlb_dma_ops no longer exists, and we do not support swiotlb=force
|
||||
* (doing so would require detecting when swiotlb=force is enabled and
|
||||
* then returning NV_TRUE even when dma_direct is in use). So for now,
|
||||
* we just return NV_FALSE and in nv_compute_gfp_mask() we check for
|
||||
* whether swiotlb could possibly be used (outside of swiotlb=force).
|
||||
*/
|
||||
|
||||
/*
|
||||
* Commit 2017-11-07 d7b417fa08d ("x86/mm: Add DMA support for
|
||||
@@ -1348,6 +1199,15 @@ struct os_wait_queue {
|
||||
struct completion q;
|
||||
};
|
||||
|
||||
#define MAX_CLIENTS_PER_ADAPTER 127
|
||||
#define MAX_TEGRA_I2C_PORTS 16
|
||||
|
||||
typedef struct nv_i2c_client_entry_s
|
||||
{
|
||||
NvU32 port;
|
||||
void *pOsClient[MAX_CLIENTS_PER_ADAPTER];
|
||||
} nv_i2c_client_entry_t;
|
||||
|
||||
/*!
|
||||
* @brief Mapping between clock names and clock handles.
|
||||
*
|
||||
@@ -1421,6 +1281,9 @@ typedef struct
|
||||
} nv_acpi_t;
|
||||
#endif
|
||||
|
||||
struct nv_pci_tegra_devfreq_data;
|
||||
struct nv_pci_tegra_devfreq_dev;
|
||||
|
||||
/* linux-specific version of old nv_state_t */
|
||||
/* this is a general os-specific state structure. the first element *must* be
|
||||
the general state structure, for the generic unix-based code */
|
||||
@@ -1524,6 +1387,30 @@ typedef struct nv_linux_state_s {
|
||||
nv_acpi_t* nv_acpi_object;
|
||||
#endif
|
||||
|
||||
nv_i2c_client_entry_t i2c_clients[MAX_TEGRA_I2C_PORTS];
|
||||
|
||||
struct reset_control *dpaux0_reset;
|
||||
struct reset_control *nvdisplay_reset;
|
||||
struct reset_control *dsi_core_reset;
|
||||
struct reset_control *mipi_cal_reset;
|
||||
struct reset_control *hdacodec_reset;
|
||||
|
||||
/*
|
||||
* nv_imp_icc_path represents the interconnect path across which display
|
||||
* data must travel.
|
||||
*/
|
||||
struct icc_path *nv_imp_icc_path;
|
||||
|
||||
#if defined(NV_DEVM_ICC_GET_PRESENT)
|
||||
/*
|
||||
* is_upstream_icc_path tracks whether we are using upstream ICC. This
|
||||
* is required till we fully migrate to use upstream ICC when it is
|
||||
* available. Right now, even if upstream ICC is available we are still
|
||||
* using downstream ICC mechanisms for T23x.
|
||||
*/
|
||||
NvBool is_upstream_icc_path;
|
||||
#endif
|
||||
|
||||
nvsoc_clks_t soc_clk_handles;
|
||||
|
||||
/* Lock serializing ISRs for different SOC vectors */
|
||||
@@ -1568,6 +1455,18 @@ typedef struct nv_linux_state_s {
|
||||
wait_queue_head_t wait;
|
||||
NvS32 return_status;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PM_DEVFREQ)
|
||||
const struct nv_pci_tegra_devfreq_data *devfreq_table;
|
||||
unsigned int devfreq_table_size;
|
||||
struct nv_pci_tegra_devfreq_dev *gpc_devfreq_dev;
|
||||
struct nv_pci_tegra_devfreq_dev *nvd_devfreq_dev;
|
||||
struct nv_pci_tegra_devfreq_dev *sys_devfreq_dev;
|
||||
struct nv_pci_tegra_devfreq_dev *pwr_devfreq_dev;
|
||||
|
||||
int (*devfreq_suspend)(struct device *dev);
|
||||
int (*devfreq_resume)(struct device *dev);
|
||||
#endif
|
||||
} nv_linux_state_t;
|
||||
|
||||
extern nv_linux_state_t *nv_linux_devices;
|
||||
@@ -1677,8 +1576,8 @@ static inline struct kmem_cache *nv_kmem_cache_create(const char *name, unsigned
|
||||
{
|
||||
char *name_unique;
|
||||
struct kmem_cache *cache;
|
||||
|
||||
#if defined(NV_KMEM_CACHE_HAS_KOBJ_REMOVE_WORK) && !defined(NV_SYSFS_SLAB_UNLINK_PRESENT)
|
||||
|
||||
#if !defined(NV_SYSFS_SLAB_UNLINK_PRESENT)
|
||||
size_t len;
|
||||
NvU64 tm_ns = nv_ktime_get_raw_ns();
|
||||
|
||||
@@ -1735,6 +1634,7 @@ static inline NV_STATUS nv_check_gpu_state(nv_state_t *nv)
|
||||
|
||||
extern NvU32 NVreg_EnableUserNUMAManagement;
|
||||
extern NvU32 NVreg_RegisterPCIDriver;
|
||||
extern NvU32 NVreg_RegisterPlatformDeviceDriver;
|
||||
extern NvU32 NVreg_EnableResizableBar;
|
||||
extern NvU32 NVreg_EnableNonblockingOpen;
|
||||
|
||||
@@ -1777,32 +1677,11 @@ static inline NvBool nv_alloc_release(nv_linux_file_private_t *nvlfp, nv_alloc_t
|
||||
return NV_FALSE;
|
||||
}
|
||||
|
||||
/*
|
||||
* RB_EMPTY_ROOT was added in 2.6.18 by this commit:
|
||||
* 2006-06-21 dd67d051529387f6e44d22d1d5540ef281965fdd
|
||||
*/
|
||||
#if !defined(RB_EMPTY_ROOT)
|
||||
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
|
||||
#endif
|
||||
|
||||
// Default flags for ISRs
|
||||
static inline NvU32 nv_default_irq_flags(nv_state_t *nv)
|
||||
{
|
||||
NvU32 flags = 0;
|
||||
|
||||
/*
|
||||
* Request IRQs to be disabled in our ISRs to keep consistency across the
|
||||
* supported kernel versions.
|
||||
*
|
||||
* IRQF_DISABLED has been made the default in 2.6.35 with commit e58aa3d2d0cc
|
||||
* from March 2010. And it has been later completely removed in 4.1 with commit
|
||||
* d8bf368d0631 from March 2015. Add it to our flags if it's defined to get the
|
||||
* same behaviour on pre-2.6.35 kernels as on recent ones.
|
||||
*/
|
||||
#if defined(IRQF_DISABLED)
|
||||
flags |= IRQF_DISABLED;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* For legacy interrupts, also allow sharing. Sharing doesn't make sense
|
||||
* for MSI(-X) as on Linux they are never shared across different devices
|
||||
@@ -1814,29 +1693,14 @@ static inline NvU32 nv_default_irq_flags(nv_state_t *nv)
|
||||
return flags;
|
||||
}
|
||||
|
||||
/*
|
||||
* From v3.7-rc1 kernel have stopped exporting get_unused_fd() and started
|
||||
* exporting get_unused_fd_flags(), as of this commit:
|
||||
* 2012-09-26 1a7bd2265fc ("make get_unused_fd_flags() a function")
|
||||
*/
|
||||
#if NV_IS_EXPORT_SYMBOL_PRESENT_get_unused_fd
|
||||
#define NV_GET_UNUSED_FD() get_unused_fd()
|
||||
#else
|
||||
#define NV_GET_UNUSED_FD() get_unused_fd_flags(0)
|
||||
#endif
|
||||
|
||||
#if NV_IS_EXPORT_SYMBOL_PRESENT_get_unused_fd_flags
|
||||
#define NV_GET_UNUSED_FD_FLAGS(flags) get_unused_fd_flags(flags)
|
||||
#else
|
||||
#define NV_GET_UNUSED_FD_FLAGS(flags) (-1)
|
||||
#endif
|
||||
|
||||
#define MODULE_BASE_NAME "nvidia"
|
||||
#define MODULE_INSTANCE_NUMBER 0
|
||||
#define MODULE_INSTANCE_STRING ""
|
||||
#define MODULE_NAME MODULE_BASE_NAME MODULE_INSTANCE_STRING
|
||||
|
||||
NvS32 nv_request_soc_irq(nv_linux_state_t *, NvU32, nv_soc_irq_type_t, NvU32, NvU32, const char*);
|
||||
NV_STATUS nv_imp_icc_get(nv_state_t *nv);
|
||||
void nv_imp_icc_put(nv_state_t *nv);
|
||||
|
||||
static inline void nv_mutex_destroy(struct mutex *lock)
|
||||
{
|
||||
@@ -1886,53 +1750,22 @@ typedef enum
|
||||
NV_NUMA_STATUS_COUNT
|
||||
} nv_numa_status_t;
|
||||
|
||||
#if defined(NV_LINUX_PLATFORM_DEVICE_H_PRESENT)
|
||||
#include <linux/platform_device.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX_MUTEX_H_PRESENT)
|
||||
#include <linux/mutex.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX_RESET_H_PRESENT)
|
||||
#include <linux/reset.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX_DMA_BUF_H_PRESENT)
|
||||
#include <linux/dma-buf.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX_GPIO_H_PRESENT)
|
||||
#include <linux/gpio.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX_OF_GPIO_H_PRESENT)
|
||||
#include <linux/of_gpio.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX_OF_DEVICE_H_PRESENT)
|
||||
#include <linux/of_device.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX_OF_PLATFORM_H_PRESENT)
|
||||
#include <linux/of_platform.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX_INTERCONNECT_H_PRESENT)
|
||||
#include <linux/interconnect.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX_PM_RUNTIME_H_PRESENT)
|
||||
#include <linux/pm_runtime.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX_CLK_H_PRESENT)
|
||||
#include <linux/clk.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX_CLK_PROVIDER_H_PRESENT)
|
||||
#include <linux/clk-provider.h>
|
||||
#endif
|
||||
|
||||
#define NV_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL_GPL(symbol)
|
||||
#define NV_CHECK_EXPORT_SYMBOL(symbol) NV_IS_EXPORT_SYMBOL_PRESENT_##symbol
|
||||
|
||||
@@ -28,12 +28,9 @@
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/sched.h> /* signal_pending, cond_resched */
|
||||
#include <linux/sched.h> /* cond_resched */
|
||||
#include <linux/semaphore.h>
|
||||
|
||||
#if defined(NV_LINUX_SCHED_SIGNAL_H_PRESENT)
|
||||
#include <linux/sched/signal.h> /* signal_pending for kernels >= 4.11 */
|
||||
#endif
|
||||
#include <linux/sched/signal.h> /* signal_pending */
|
||||
|
||||
#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_PREEMPT_RT_FULL)
|
||||
typedef raw_spinlock_t nv_spinlock_t;
|
||||
|
||||
@@ -66,54 +66,17 @@ typedef int vm_fault_t;
|
||||
/*
|
||||
* get_user_pages()
|
||||
*
|
||||
* The 8-argument version of get_user_pages() was deprecated by commit
|
||||
* cde70140fed8 ("mm/gup: Overload get_user_pages() functions") in v4.6-rc1.
|
||||
* (calling get_user_pages with current and current->mm).
|
||||
*
|
||||
* Completely moved to the 6 argument version of get_user_pages() by
|
||||
* commit c12d2da56d0e ("mm/gup: Remove the macro overload API migration
|
||||
* helpers from the get_user*() APIs") in v4.6-rc4.
|
||||
*
|
||||
* write and force parameters were replaced with gup_flags by
|
||||
* commit 768ae309a961 ("mm: replace get_user_pages() write/force parameters
|
||||
* with gup_flags") in v4.9.
|
||||
*
|
||||
* A 7-argument version of get_user_pages was introduced into linux-4.4.y by
|
||||
* commit 8e50b8b07f462 ("mm: replace get_user_pages() write/force parameters
|
||||
* with gup_flags") which cherry-picked the replacement of the write and
|
||||
* force parameters with gup_flags.
|
||||
*
|
||||
* Removed vmas parameter from get_user_pages() by commit 54d020692b34
|
||||
* ("mm/gup: remove unused vmas parameter from get_user_pages()") in v6.5.
|
||||
*
|
||||
*/
|
||||
|
||||
#if defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS)
|
||||
#if !defined(NV_GET_USER_PAGES_HAS_VMAS_ARG)
|
||||
#define NV_GET_USER_PAGES get_user_pages
|
||||
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS_VMAS)
|
||||
#else
|
||||
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages) \
|
||||
get_user_pages(start, nr_pages, flags, pages, NULL)
|
||||
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS_VMAS)
|
||||
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages) \
|
||||
get_user_pages(current, current->mm, start, nr_pages, flags, pages, NULL)
|
||||
#else
|
||||
static inline long NV_GET_USER_PAGES(unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
unsigned int flags,
|
||||
struct page **pages)
|
||||
{
|
||||
int write = flags & FOLL_WRITE;
|
||||
int force = flags & FOLL_FORCE;
|
||||
|
||||
#if defined(NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS)
|
||||
return get_user_pages(start, nr_pages, write, force, pages, NULL);
|
||||
#else
|
||||
// NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS
|
||||
return get_user_pages(current, current->mm, start, nr_pages, write,
|
||||
force, pages, NULL);
|
||||
#endif // NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS
|
||||
}
|
||||
#endif // NV_GET_USER_PAGES_HAS_ARGS_FLAGS
|
||||
#endif
|
||||
|
||||
/*
|
||||
* pin_user_pages_remote()
|
||||
@@ -146,22 +109,12 @@ typedef int vm_fault_t;
|
||||
#endif // NV_PIN_USER_PAGES_REMOTE_PRESENT
|
||||
|
||||
/*
|
||||
* get_user_pages_remote() was added by commit 1e9877902dc7
|
||||
* ("mm/gup: Introduce get_user_pages_remote()") in v4.6.
|
||||
*
|
||||
* Note that get_user_pages_remote() requires the caller to hold a reference on
|
||||
* the task_struct (if non-NULL and if this API has tsk argument) and the mm_struct.
|
||||
* the mm_struct.
|
||||
* This will always be true when using current and current->mm. If the kernel passes
|
||||
* the driver a vma via driver callback, the kernel holds a reference on vma->vm_mm
|
||||
* over that callback.
|
||||
*
|
||||
* get_user_pages_remote() write/force parameters were replaced
|
||||
* with gup_flags by commit 9beae1ea8930 ("mm: replace get_user_pages_remote()
|
||||
* write/force parameters with gup_flags") in v4.9.
|
||||
*
|
||||
* get_user_pages_remote() added 'locked' parameter by commit 5b56d49fc31d
|
||||
* ("mm: add locked parameter to get_user_pages_remote()") in v4.10.
|
||||
*
|
||||
* get_user_pages_remote() removed 'tsk' parameter by
|
||||
* commit 64019a2e467a ("mm/gup: remove task_struct pointer for
|
||||
* all gup code") in v5.9.
|
||||
@@ -171,77 +124,16 @@ typedef int vm_fault_t;
|
||||
*
|
||||
*/
|
||||
|
||||
#if defined(NV_GET_USER_PAGES_REMOTE_PRESENT)
|
||||
#if defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED)
|
||||
#define NV_GET_USER_PAGES_REMOTE get_user_pages_remote
|
||||
#if defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS)
|
||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
|
||||
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, NULL, locked)
|
||||
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK)
|
||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
|
||||
get_user_pages_remote(mm, start, nr_pages, flags, pages, NULL, locked)
|
||||
|
||||
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED_VMAS)
|
||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
|
||||
get_user_pages_remote(mm, start, nr_pages, flags, pages, NULL, locked)
|
||||
|
||||
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED_VMAS)
|
||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
|
||||
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, NULL, locked)
|
||||
|
||||
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_VMAS)
|
||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
|
||||
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, NULL)
|
||||
|
||||
#else
|
||||
// NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE_VMAS
|
||||
static inline long NV_GET_USER_PAGES_REMOTE(struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
unsigned int flags,
|
||||
struct page **pages,
|
||||
int *locked)
|
||||
{
|
||||
int write = flags & FOLL_WRITE;
|
||||
int force = flags & FOLL_FORCE;
|
||||
|
||||
return get_user_pages_remote(NULL, mm, start, nr_pages, write, force,
|
||||
pages, NULL);
|
||||
}
|
||||
#endif // NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED
|
||||
#else
|
||||
#if defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS)
|
||||
static inline long NV_GET_USER_PAGES_REMOTE(struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
unsigned int flags,
|
||||
struct page **pages,
|
||||
int *locked)
|
||||
{
|
||||
int write = flags & FOLL_WRITE;
|
||||
int force = flags & FOLL_FORCE;
|
||||
|
||||
return get_user_pages(NULL, mm, start, nr_pages, write, force, pages, NULL);
|
||||
}
|
||||
|
||||
#else
|
||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
|
||||
get_user_pages(NULL, mm, start, nr_pages, flags, pages, NULL)
|
||||
#endif // NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS
|
||||
#endif // NV_GET_USER_PAGES_REMOTE_PRESENT
|
||||
|
||||
/*
|
||||
* The .virtual_address field was effectively renamed to .address, by these
|
||||
* two commits:
|
||||
*
|
||||
* struct vm_fault: .address was added by:
|
||||
* 2016-12-14 82b0f8c39a3869b6fd2a10e180a862248736ec6f
|
||||
*
|
||||
* struct vm_fault: .virtual_address was removed by:
|
||||
* 2016-12-14 1a29d85eb0f19b7d8271923d8917d7b4f5540b3e
|
||||
*/
|
||||
static inline unsigned long nv_page_fault_va(struct vm_fault *vmf)
|
||||
{
|
||||
#if defined(NV_VM_FAULT_HAS_ADDRESS)
|
||||
return vmf->address;
|
||||
#else
|
||||
return (unsigned long)(vmf->virtual_address);
|
||||
#define NV_GET_USER_PAGES_REMOTE get_user_pages_remote
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void nv_mmap_read_lock(struct mm_struct *mm)
|
||||
{
|
||||
|
||||
@@ -86,12 +86,6 @@ static inline int nv_pci_enable_msix(nv_linux_state_t *nvl, int nvec)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
/*
|
||||
* pci_enable_msix_range() replaced pci_enable_msix() in 3.14-rc1:
|
||||
* 2014-01-03 302a2523c277bea0bbe8340312b09507905849ed
|
||||
*/
|
||||
|
||||
#if defined(NV_PCI_ENABLE_MSIX_RANGE_PRESENT)
|
||||
// We require all the vectors we are requesting so use the same min and max
|
||||
rc = pci_enable_msix_range(nvl->pci_dev, nvl->msix_entries, nvec, nvec);
|
||||
if (rc < 0)
|
||||
@@ -99,13 +93,6 @@ static inline int nv_pci_enable_msix(nv_linux_state_t *nvl, int nvec)
|
||||
return NV_ERR_OPERATING_SYSTEM;
|
||||
}
|
||||
WARN_ON(nvec != rc);
|
||||
#else
|
||||
rc = pci_enable_msix(nvl->pci_dev, nvl->msix_entries, nvec);
|
||||
if (rc != 0)
|
||||
{
|
||||
return NV_ERR_OPERATING_SYSTEM;
|
||||
}
|
||||
#endif
|
||||
|
||||
nvl->num_intr = nvec;
|
||||
return NV_OK;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -36,6 +36,6 @@ int nv_pci_count_devices(void);
|
||||
NvU8 nv_find_pci_capability(struct pci_dev *, NvU8);
|
||||
int nvidia_dev_get_pci_info(const NvU8 *, struct pci_dev **, NvU64 *, NvU64 *);
|
||||
nv_linux_state_t * find_pci(NvU32, NvU8, NvU8, NvU8);
|
||||
NvBool nv_pci_is_valid_topology_for_direct_pci(nv_state_t *, struct device *);
|
||||
|
||||
NvBool nv_pci_is_valid_topology_for_direct_pci(nv_state_t *, struct pci_dev *);
|
||||
NvBool nv_pci_has_common_pci_switch(nv_state_t *nv, struct pci_dev *);
|
||||
#endif
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -63,16 +63,10 @@ static inline pgprot_t pgprot_modify_writecombine(pgprot_t old_prot)
|
||||
extern NvBool nvos_is_chipset_io_coherent(void);
|
||||
/*
|
||||
* Don't rely on the kernel's definition of pgprot_noncached(), as on 64-bit
|
||||
* ARM that's not for system memory, but device memory instead. For I/O cache
|
||||
* coherent systems, use cached mappings instead of uncached.
|
||||
* ARM that's not for system memory, but device memory instead.
|
||||
*/
|
||||
#define NV_PGPROT_UNCACHED(old_prot) \
|
||||
((nvos_is_chipset_io_coherent()) ? \
|
||||
(old_prot) : \
|
||||
__pgprot_modify((old_prot), PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)))
|
||||
#elif defined(NVCPU_PPC64LE)
|
||||
/* Don't attempt to mark sysmem pages as uncached on ppc64le */
|
||||
#define NV_PGPROT_UNCACHED(old_prot) old_prot
|
||||
__pgprot_modify((old_prot), PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
|
||||
#else
|
||||
#define NV_PGPROT_UNCACHED(old_prot) pgprot_noncached(old_prot)
|
||||
#endif
|
||||
@@ -94,32 +88,6 @@ extern NvBool nvos_is_chipset_io_coherent(void);
|
||||
NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot)
|
||||
#define NV_PGPROT_READ_ONLY(old_prot) \
|
||||
__pgprot(pgprot_val((old_prot)) & ~_PAGE_RW)
|
||||
#elif defined(NVCPU_PPC64LE)
|
||||
/*
|
||||
* Some kernels use H_PAGE instead of _PAGE
|
||||
*/
|
||||
#if defined(_PAGE_RW)
|
||||
#define NV_PAGE_RW _PAGE_RW
|
||||
#elif defined(H_PAGE_RW)
|
||||
#define NV_PAGE_RW H_PAGE_RW
|
||||
#else
|
||||
#warning "The kernel does not provide page protection defines!"
|
||||
#endif
|
||||
|
||||
#if defined(_PAGE_4K_PFN)
|
||||
#define NV_PAGE_4K_PFN _PAGE_4K_PFN
|
||||
#elif defined(H_PAGE_4K_PFN)
|
||||
#define NV_PAGE_4K_PFN H_PAGE_4K_PFN
|
||||
#else
|
||||
#undef NV_PAGE_4K_PFN
|
||||
#endif
|
||||
|
||||
#define NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) \
|
||||
pgprot_writecombine(old_prot)
|
||||
/* Don't attempt to mark sysmem pages as write combined on ppc64le */
|
||||
#define NV_PGPROT_WRITE_COMBINED(old_prot) old_prot
|
||||
#define NV_PGPROT_READ_ONLY(old_prot) \
|
||||
__pgprot(pgprot_val((old_prot)) & ~NV_PAGE_RW)
|
||||
#elif defined(NVCPU_RISCV64)
|
||||
#define NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) \
|
||||
pgprot_writecombine(old_prot)
|
||||
|
||||
@@ -29,8 +29,20 @@
|
||||
irqreturn_t nvidia_isr (int, void *);
|
||||
irqreturn_t nvidia_isr_kthread_bh (int, void *);
|
||||
|
||||
#define NV_SUPPORTS_PLATFORM_DEVICE 0
|
||||
int nv_platform_register_driver(void);
|
||||
void nv_platform_unregister_driver(void);
|
||||
int nv_platform_count_devices(void);
|
||||
int nv_soc_register_irqs(nv_state_t *nv);
|
||||
void nv_soc_free_irqs(nv_state_t *nv);
|
||||
|
||||
#define NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE 0
|
||||
#define NV_SUPPORTS_PLATFORM_DEVICE NV_IS_EXPORT_SYMBOL_PRESENT___platform_driver_register
|
||||
|
||||
#if defined(NV_LINUX_PLATFORM_TEGRA_DCE_DCE_CLIENT_IPC_H_PRESENT)
|
||||
#define NV_SUPPORTS_DCE_CLIENT_IPC 1
|
||||
#else
|
||||
#define NV_SUPPORTS_DCE_CLIENT_IPC 0
|
||||
#endif
|
||||
|
||||
#define NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE (NV_SUPPORTS_PLATFORM_DEVICE && NV_SUPPORTS_DCE_CLIENT_IPC)
|
||||
|
||||
#endif
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#define _NV_PROTO_H_
|
||||
|
||||
#include "nv-pci.h"
|
||||
#include "nv-platform.h"
|
||||
|
||||
extern const char *nv_device_name;
|
||||
|
||||
|
||||
@@ -36,13 +36,6 @@
|
||||
#define NV_MAX_ISR_DELAY_MS (NV_MAX_ISR_DELAY_US / 1000)
|
||||
#define NV_NSECS_TO_JIFFIES(nsec) ((nsec) * HZ / 1000000000)
|
||||
|
||||
#if !defined(NV_TIMESPEC64_PRESENT)
|
||||
struct timespec64 {
|
||||
__s64 tv_sec;
|
||||
long tv_nsec;
|
||||
};
|
||||
#endif
|
||||
|
||||
#if !defined(NV_KTIME_GET_RAW_TS64_PRESENT)
|
||||
static inline void ktime_get_raw_ts64(struct timespec64 *ts64)
|
||||
{
|
||||
@@ -53,16 +46,6 @@ static inline void ktime_get_raw_ts64(struct timespec64 *ts64)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined(NV_KTIME_GET_REAL_TS64_PRESENT)
|
||||
static inline void ktime_get_real_ts64(struct timespec64 *ts64)
|
||||
{
|
||||
struct timeval tv;
|
||||
do_gettimeofday(&tv);
|
||||
ts64->tv_sec = tv.tv_sec;
|
||||
ts64->tv_nsec = tv.tv_usec * (NvU64) NSEC_PER_USEC;
|
||||
}
|
||||
#endif
|
||||
|
||||
static NvBool nv_timer_less_than
|
||||
(
|
||||
const struct timespec64 *a,
|
||||
@@ -73,49 +56,6 @@ static NvBool nv_timer_less_than
|
||||
: (a->tv_sec < b->tv_sec);
|
||||
}
|
||||
|
||||
#if !defined(NV_TIMESPEC64_PRESENT)
|
||||
static inline struct timespec64 timespec64_add
|
||||
(
|
||||
const struct timespec64 a,
|
||||
const struct timespec64 b
|
||||
)
|
||||
{
|
||||
struct timespec64 result;
|
||||
|
||||
result.tv_sec = a.tv_sec + b.tv_sec;
|
||||
result.tv_nsec = a.tv_nsec + b.tv_nsec;
|
||||
while (result.tv_nsec >= NSEC_PER_SEC)
|
||||
{
|
||||
++result.tv_sec;
|
||||
result.tv_nsec -= NSEC_PER_SEC;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline struct timespec64 timespec64_sub
|
||||
(
|
||||
const struct timespec64 a,
|
||||
const struct timespec64 b
|
||||
)
|
||||
{
|
||||
struct timespec64 result;
|
||||
|
||||
result.tv_sec = a.tv_sec - b.tv_sec;
|
||||
result.tv_nsec = a.tv_nsec - b.tv_nsec;
|
||||
while (result.tv_nsec < 0)
|
||||
{
|
||||
--(result.tv_sec);
|
||||
result.tv_nsec += NSEC_PER_SEC;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline s64 timespec64_to_ns(struct timespec64 *ts)
|
||||
{
|
||||
return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline NvU64 nv_ktime_get_raw_ns(void)
|
||||
{
|
||||
struct timespec64 ts;
|
||||
|
||||
@@ -42,25 +42,12 @@ static inline void nv_timer_callback_typed_data(struct timer_list *timer)
|
||||
nv_timer->nv_timer_callback(nv_timer);
|
||||
}
|
||||
|
||||
static inline void nv_timer_callback_anon_data(unsigned long arg)
|
||||
{
|
||||
struct nv_timer *nv_timer = (struct nv_timer *)arg;
|
||||
|
||||
nv_timer->nv_timer_callback(nv_timer);
|
||||
}
|
||||
|
||||
static inline void nv_timer_setup(struct nv_timer *nv_timer,
|
||||
void (*callback)(struct nv_timer *nv_timer))
|
||||
{
|
||||
nv_timer->nv_timer_callback = callback;
|
||||
|
||||
#if defined(NV_TIMER_SETUP_PRESENT)
|
||||
timer_setup(&nv_timer->kernel_timer, nv_timer_callback_typed_data, 0);
|
||||
#else
|
||||
init_timer(&nv_timer->kernel_timer);
|
||||
nv_timer->kernel_timer.function = nv_timer_callback_anon_data;
|
||||
nv_timer->kernel_timer.data = (unsigned long)nv_timer;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void nv_timer_delete_sync(struct timer_list *timer)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -50,6 +50,9 @@ extern nv_cap_t *nvidia_caps_root;
|
||||
|
||||
extern const NvBool nv_is_rm_firmware_supported_os;
|
||||
|
||||
#include <nvi2c.h>
|
||||
#include <nvimpshared.h>
|
||||
|
||||
#include <nv-kernel-interface-api.h>
|
||||
|
||||
#define GPU_UUID_LEN (16)
|
||||
@@ -83,6 +86,18 @@ extern const NvBool nv_is_rm_firmware_supported_os;
|
||||
|
||||
#define NV_RM_DEVICE_INTR_ADDRESS 0x100
|
||||
|
||||
/*
|
||||
* Clock domain identifier, which is used for fetching the engine
|
||||
* load backed by the specified clock domain for Tegra platforms
|
||||
* conforming linux devfreq framework to realize dynamic frequency
|
||||
* scaling.
|
||||
*/
|
||||
typedef enum _TEGRASOC_DEVFREQ_CLK
|
||||
{
|
||||
TEGRASOC_DEVFREQ_CLK_GPC,
|
||||
TEGRASOC_DEVFREQ_CLK_NVD,
|
||||
} TEGRASOC_DEVFREQ_CLK;
|
||||
|
||||
/*!
|
||||
* @brief The order of the display clocks in the below defined enum
|
||||
* should be synced with below mapping array and macro.
|
||||
@@ -105,6 +120,12 @@ typedef enum _TEGRASOC_WHICH_CLK
|
||||
TEGRASOC_WHICH_CLK_NVDISPLAY_DISP,
|
||||
TEGRASOC_WHICH_CLK_NVDISPLAY_P0,
|
||||
TEGRASOC_WHICH_CLK_NVDISPLAY_P1,
|
||||
TEGRASOC_WHICH_CLK_NVDISPLAY_P2,
|
||||
TEGRASOC_WHICH_CLK_NVDISPLAY_P3,
|
||||
TEGRASOC_WHICH_CLK_NVDISPLAY_P4,
|
||||
TEGRASOC_WHICH_CLK_NVDISPLAY_P5,
|
||||
TEGRASOC_WHICH_CLK_NVDISPLAY_P6,
|
||||
TEGRASOC_WHICH_CLK_NVDISPLAY_P7,
|
||||
TEGRASOC_WHICH_CLK_DPAUX0,
|
||||
TEGRASOC_WHICH_CLK_FUSE,
|
||||
TEGRASOC_WHICH_CLK_DSIPLL_VCO,
|
||||
@@ -123,9 +144,21 @@ typedef enum _TEGRASOC_WHICH_CLK
|
||||
TEGRASOC_WHICH_CLK_VPLL0_REF,
|
||||
TEGRASOC_WHICH_CLK_VPLL0,
|
||||
TEGRASOC_WHICH_CLK_VPLL1,
|
||||
TEGRASOC_WHICH_CLK_VPLL2,
|
||||
TEGRASOC_WHICH_CLK_VPLL3,
|
||||
TEGRASOC_WHICH_CLK_VPLL4,
|
||||
TEGRASOC_WHICH_CLK_VPLL5,
|
||||
TEGRASOC_WHICH_CLK_VPLL6,
|
||||
TEGRASOC_WHICH_CLK_VPLL7,
|
||||
TEGRASOC_WHICH_CLK_NVDISPLAY_P0_REF,
|
||||
TEGRASOC_WHICH_CLK_RG0,
|
||||
TEGRASOC_WHICH_CLK_RG1,
|
||||
TEGRASOC_WHICH_CLK_RG2,
|
||||
TEGRASOC_WHICH_CLK_RG3,
|
||||
TEGRASOC_WHICH_CLK_RG4,
|
||||
TEGRASOC_WHICH_CLK_RG5,
|
||||
TEGRASOC_WHICH_CLK_RG6,
|
||||
TEGRASOC_WHICH_CLK_RG7,
|
||||
TEGRASOC_WHICH_CLK_DISPPLL,
|
||||
TEGRASOC_WHICH_CLK_DISPHUBPLL,
|
||||
TEGRASOC_WHICH_CLK_DSI_LP,
|
||||
@@ -133,9 +166,20 @@ typedef enum _TEGRASOC_WHICH_CLK
|
||||
TEGRASOC_WHICH_CLK_DSI_PIXEL,
|
||||
TEGRASOC_WHICH_CLK_PRE_SOR0,
|
||||
TEGRASOC_WHICH_CLK_PRE_SOR1,
|
||||
TEGRASOC_WHICH_CLK_PRE_SOR2,
|
||||
TEGRASOC_WHICH_CLK_PRE_SOR3,
|
||||
TEGRASOC_WHICH_CLK_DP_LINKA_REF,
|
||||
TEGRASOC_WHICH_CLK_DP_LINKB_REF,
|
||||
TEGRASOC_WHICH_CLK_DP_LINKC_REF,
|
||||
TEGRASOC_WHICH_CLK_DP_LINKD_REF,
|
||||
TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT,
|
||||
TEGRASOC_WHICH_CLK_SOR_LINKB_INPUT,
|
||||
TEGRASOC_WHICH_CLK_SOR_LINKC_INPUT,
|
||||
TEGRASOC_WHICH_CLK_SOR_LINKD_INPUT,
|
||||
TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO,
|
||||
TEGRASOC_WHICH_CLK_SOR_LINKB_AFIFO,
|
||||
TEGRASOC_WHICH_CLK_SOR_LINKC_AFIFO,
|
||||
TEGRASOC_WHICH_CLK_SOR_LINKD_AFIFO,
|
||||
TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M,
|
||||
TEGRASOC_WHICH_CLK_RG0_M,
|
||||
TEGRASOC_WHICH_CLK_RG1_M,
|
||||
@@ -144,17 +188,36 @@ typedef enum _TEGRASOC_WHICH_CLK
|
||||
TEGRASOC_WHICH_CLK_PLLHUB,
|
||||
TEGRASOC_WHICH_CLK_SOR0,
|
||||
TEGRASOC_WHICH_CLK_SOR1,
|
||||
TEGRASOC_WHICH_CLK_SOR2,
|
||||
TEGRASOC_WHICH_CLK_SOR3,
|
||||
TEGRASOC_WHICH_CLK_SOR_PADA_INPUT,
|
||||
TEGRASOC_WHICH_CLK_SOR_PADB_INPUT,
|
||||
TEGRASOC_WHICH_CLK_SOR_PADC_INPUT,
|
||||
TEGRASOC_WHICH_CLK_SOR_PADD_INPUT,
|
||||
TEGRASOC_WHICH_CLK_SOR0_PAD,
|
||||
TEGRASOC_WHICH_CLK_SOR1_PAD,
|
||||
TEGRASOC_WHICH_CLK_SOR2_PAD,
|
||||
TEGRASOC_WHICH_CLK_SOR3_PAD,
|
||||
TEGRASOC_WHICH_CLK_PRE_SF0,
|
||||
TEGRASOC_WHICH_CLK_SF0,
|
||||
TEGRASOC_WHICH_CLK_SF1,
|
||||
TEGRASOC_WHICH_CLK_SF2,
|
||||
TEGRASOC_WHICH_CLK_SF3,
|
||||
TEGRASOC_WHICH_CLK_SF4,
|
||||
TEGRASOC_WHICH_CLK_SF5,
|
||||
TEGRASOC_WHICH_CLK_SF6,
|
||||
TEGRASOC_WHICH_CLK_SF7,
|
||||
TEGRASOC_WHICH_CLK_DSI_PAD_INPUT,
|
||||
TEGRASOC_WHICH_CLK_PRE_SOR0_REF,
|
||||
TEGRASOC_WHICH_CLK_PRE_SOR1_REF,
|
||||
TEGRASOC_WHICH_CLK_SOR0_PLL_REF,
|
||||
TEGRASOC_WHICH_CLK_SOR1_PLL_REF,
|
||||
TEGRASOC_WHICH_CLK_SOR2_PLL_REF,
|
||||
TEGRASOC_WHICH_CLK_SOR3_PLL_REF,
|
||||
TEGRASOC_WHICH_CLK_SOR0_REF,
|
||||
TEGRASOC_WHICH_CLK_SOR1_REF,
|
||||
TEGRASOC_WHICH_CLK_SOR2_REF,
|
||||
TEGRASOC_WHICH_CLK_SOR3_REF,
|
||||
TEGRASOC_WHICH_CLK_OSC,
|
||||
TEGRASOC_WHICH_CLK_DSC,
|
||||
TEGRASOC_WHICH_CLK_MAUD,
|
||||
@@ -168,6 +231,18 @@ typedef enum _TEGRASOC_WHICH_CLK
|
||||
TEGRASOC_WHICH_CLK_PLLA_DISP,
|
||||
TEGRASOC_WHICH_CLK_PLLA_DISPHUB,
|
||||
TEGRASOC_WHICH_CLK_PLLA,
|
||||
TEGRASOC_WHICH_CLK_VPLLX_SOR0_MUXED,
|
||||
TEGRASOC_WHICH_CLK_VPLLX_SOR1_MUXED,
|
||||
TEGRASOC_WHICH_CLK_VPLLX_SOR2_MUXED,
|
||||
TEGRASOC_WHICH_CLK_VPLLX_SOR3_MUXED,
|
||||
TEGRASOC_WHICH_CLK_SF0_SOR,
|
||||
TEGRASOC_WHICH_CLK_SF1_SOR,
|
||||
TEGRASOC_WHICH_CLK_SF2_SOR,
|
||||
TEGRASOC_WHICH_CLK_SF3_SOR,
|
||||
TEGRASOC_WHICH_CLK_SF4_SOR,
|
||||
TEGRASOC_WHICH_CLK_SF5_SOR,
|
||||
TEGRASOC_WHICH_CLK_SF6_SOR,
|
||||
TEGRASOC_WHICH_CLK_SF7_SOR,
|
||||
TEGRASOC_WHICH_CLK_EMC,
|
||||
TEGRASOC_WHICH_CLK_GPU_FIRST,
|
||||
TEGRASOC_WHICH_CLK_GPU_SYS = TEGRASOC_WHICH_CLK_GPU_FIRST,
|
||||
@@ -339,12 +414,8 @@ typedef struct nv_soc_irq_info_s {
|
||||
#define NV_MAX_SOC_IRQS 10
|
||||
#define NV_MAX_DPAUX_NUM_DEVICES 4
|
||||
|
||||
#define NV_MAX_SOC_DPAUX_NUM_DEVICES 2
|
||||
#define NV_MAX_SOC_DPAUX_NUM_DEVICES 4
|
||||
|
||||
|
||||
#define NV_IGPU_LEGACY_STALL_IRQ 70
|
||||
#define NV_IGPU_MAX_STALL_IRQS 3
|
||||
#define NV_IGPU_MAX_NONSTALL_IRQS 1
|
||||
/*
|
||||
* per device state
|
||||
*/
|
||||
@@ -390,7 +461,6 @@ typedef struct nv_state_t
|
||||
nv_aperture_t *mipical_regs;
|
||||
nv_aperture_t *fb, ud;
|
||||
nv_aperture_t *simregs;
|
||||
nv_aperture_t *emc_regs;
|
||||
|
||||
NvU32 num_dpaux_instance;
|
||||
NvU32 interrupt_line;
|
||||
@@ -404,12 +474,14 @@ typedef struct nv_state_t
|
||||
NvU32 soc_dcb_size;
|
||||
NvU32 disp_sw_soc_chip_id;
|
||||
NvBool soc_is_dpalt_mode_supported;
|
||||
NvBool soc_is_hfrp_supported;
|
||||
|
||||
NvU32 igpu_stall_irq[NV_IGPU_MAX_STALL_IRQS];
|
||||
NvU32 igpu_nonstall_irq;
|
||||
NvU32 num_stall_irqs;
|
||||
NvU64 dma_mask;
|
||||
|
||||
|
||||
NvBool is_tegra_pci_igpu;
|
||||
NvBool supports_tegra_igpu_rg;
|
||||
NvBool is_tegra_pci_igpu_rg_enabled;
|
||||
|
||||
NvBool primary_vga;
|
||||
|
||||
NvU32 sim_env;
|
||||
@@ -488,6 +560,13 @@ typedef struct nv_state_t
|
||||
/* Bool to check if the GPU has a coherent sysmem link */
|
||||
NvBool coherent;
|
||||
|
||||
/*
|
||||
* Bool to check if GPU memory is backed by struct page.
|
||||
* False for non-coherent platforms. May also be false
|
||||
* on coherent platforms if GPU memory is not onlined to the kernel.
|
||||
*/
|
||||
NvBool mem_has_struct_page;
|
||||
|
||||
/* OS detected GPU has ATS capability */
|
||||
NvBool ats_support;
|
||||
/*
|
||||
@@ -508,6 +587,9 @@ typedef struct nv_state_t
|
||||
|
||||
/* Console is managed by drm drivers or NVKMS */
|
||||
NvBool client_managed_console;
|
||||
|
||||
/* Bool to check if power management is supported */
|
||||
NvBool is_pm_supported;
|
||||
} nv_state_t;
|
||||
|
||||
#define NVFP_TYPE_NONE 0x0
|
||||
@@ -574,24 +656,23 @@ typedef NV_STATUS (*nvPmaEvictRangeCallback)(void *, NvU64, NvU64, nvgpuGpuMemor
|
||||
* flags
|
||||
*/
|
||||
|
||||
#define NV_FLAG_OPEN 0x0001
|
||||
#define NV_FLAG_EXCLUDE 0x0002
|
||||
#define NV_FLAG_CONTROL 0x0004
|
||||
// Unused 0x0008
|
||||
#define NV_FLAG_SOC_DISPLAY 0x0010
|
||||
#define NV_FLAG_USES_MSI 0x0020
|
||||
#define NV_FLAG_USES_MSIX 0x0040
|
||||
#define NV_FLAG_PASSTHRU 0x0080
|
||||
#define NV_FLAG_SUSPENDED 0x0100
|
||||
#define NV_FLAG_SOC_IGPU 0x0200
|
||||
#define NV_FLAG_OPEN 0x0001
|
||||
#define NV_FLAG_EXCLUDE 0x0002
|
||||
#define NV_FLAG_CONTROL 0x0004
|
||||
#define NV_FLAG_PCI_P2P_UNSUPPORTED_CHIPSET 0x0008
|
||||
#define NV_FLAG_SOC_DISPLAY 0x0010
|
||||
#define NV_FLAG_USES_MSI 0x0020
|
||||
#define NV_FLAG_USES_MSIX 0x0040
|
||||
#define NV_FLAG_PASSTHRU 0x0080
|
||||
#define NV_FLAG_SUSPENDED 0x0100
|
||||
/* To be set when an FLR needs to be triggered after device shut down. */
|
||||
#define NV_FLAG_TRIGGER_FLR 0x0400
|
||||
#define NV_FLAG_PERSISTENT_SW_STATE 0x0800
|
||||
#define NV_FLAG_IN_RECOVERY 0x1000
|
||||
#define NV_FLAG_PCI_REMOVE_IN_PROGRESS 0x2000
|
||||
#define NV_FLAG_UNBIND_LOCK 0x4000
|
||||
#define NV_FLAG_TRIGGER_FLR 0x0400
|
||||
#define NV_FLAG_PERSISTENT_SW_STATE 0x0800
|
||||
#define NV_FLAG_IN_RECOVERY 0x1000
|
||||
#define NV_FLAG_PCI_REMOVE_IN_PROGRESS 0x2000
|
||||
#define NV_FLAG_UNBIND_LOCK 0x4000
|
||||
/* To be set when GPU is not present on the bus, to help device teardown */
|
||||
#define NV_FLAG_IN_SURPRISE_REMOVAL 0x8000
|
||||
#define NV_FLAG_IN_SURPRISE_REMOVAL 0x8000
|
||||
|
||||
typedef enum
|
||||
{
|
||||
@@ -631,15 +712,20 @@ typedef struct
|
||||
const char *db_support;
|
||||
} nv_power_info_t;
|
||||
|
||||
typedef enum
|
||||
{
|
||||
NV_MEMORY_TYPE_SYSTEM, /* Memory mapped for ROM, SBIOS and physical RAM. */
|
||||
NV_MEMORY_TYPE_REGISTERS,
|
||||
NV_MEMORY_TYPE_FRAMEBUFFER,
|
||||
NV_MEMORY_TYPE_DEVICE_MMIO, /* All kinds of MMIO referred by NVRM e.g. BARs and MCFG of device */
|
||||
} nv_memory_type_t;
|
||||
|
||||
#define NV_PRIMARY_VGA(nv) ((nv)->primary_vga)
|
||||
|
||||
#define NV_IS_CTL_DEVICE(nv) ((nv)->flags & NV_FLAG_CONTROL)
|
||||
#define NV_IS_SOC_DISPLAY_DEVICE(nv) \
|
||||
((nv)->flags & NV_FLAG_SOC_DISPLAY)
|
||||
|
||||
#define NV_IS_SOC_IGPU_DEVICE(nv) \
|
||||
((nv)->flags & NV_FLAG_SOC_IGPU)
|
||||
|
||||
#define NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv) \
|
||||
(((nv)->flags & NV_FLAG_IN_SURPRISE_REMOVAL) != 0)
|
||||
|
||||
@@ -789,9 +875,9 @@ static inline NvBool IS_IMEM_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
|
||||
|
||||
NvU32 NV_API_CALL nv_get_dev_minor (nv_state_t *);
|
||||
void* NV_API_CALL nv_alloc_kernel_mapping (nv_state_t *, void *, NvU64, NvU32, NvU64, void **);
|
||||
NV_STATUS NV_API_CALL nv_free_kernel_mapping (nv_state_t *, void *, void *, void *);
|
||||
void NV_API_CALL nv_free_kernel_mapping (nv_state_t *, void *, void *, void *);
|
||||
NV_STATUS NV_API_CALL nv_alloc_user_mapping (nv_state_t *, void *, NvU64, NvU32, NvU64, NvU32, NvU64 *, void **);
|
||||
NV_STATUS NV_API_CALL nv_free_user_mapping (nv_state_t *, void *, NvU64, void *);
|
||||
void NV_API_CALL nv_free_user_mapping (nv_state_t *, void *, NvU64, void *);
|
||||
NV_STATUS NV_API_CALL nv_add_mapping_context_to_file (nv_state_t *, nv_usermap_access_params_t*, NvU32, void *, NvU64, NvU32);
|
||||
|
||||
NvU64 NV_API_CALL nv_get_kern_phys_address (NvU64);
|
||||
@@ -813,7 +899,8 @@ void NV_API_CALL nv_unregister_peer_io_mem(nv_state_t *, void *);
|
||||
|
||||
struct sg_table;
|
||||
|
||||
NV_STATUS NV_API_CALL nv_register_sgt (nv_state_t *, NvU64 *, NvU64, NvU32, void **, struct sg_table *, void *);
|
||||
NV_STATUS NV_API_CALL nv_register_sgt (nv_state_t *, NvU64 *, NvU64, NvU32, void **,
|
||||
struct sg_table *, void *, NvBool);
|
||||
void NV_API_CALL nv_unregister_sgt (nv_state_t *, struct sg_table **, void **, void *);
|
||||
NV_STATUS NV_API_CALL nv_register_phys_pages (nv_state_t *, NvU64 *, NvU64, NvU32, void **);
|
||||
void NV_API_CALL nv_unregister_phys_pages (nv_state_t *, void *);
|
||||
@@ -824,12 +911,14 @@ NV_STATUS NV_API_CALL nv_dma_map_alloc (nv_dma_device_t *, NvU64, NvU6
|
||||
NV_STATUS NV_API_CALL nv_dma_unmap_alloc (nv_dma_device_t *, NvU64, NvU64 *, void **);
|
||||
|
||||
NV_STATUS NV_API_CALL nv_dma_map_peer (nv_dma_device_t *, nv_dma_device_t *, NvU8, NvU64, NvU64 *);
|
||||
NV_STATUS NV_API_CALL nv_dma_map_non_pci_peer (nv_dma_device_t *, NvU64, NvU64 *);
|
||||
void NV_API_CALL nv_dma_unmap_peer (nv_dma_device_t *, NvU64, NvU64);
|
||||
|
||||
NV_STATUS NV_API_CALL nv_dma_map_mmio (nv_dma_device_t *, NvU64, NvU64 *);
|
||||
void NV_API_CALL nv_dma_unmap_mmio (nv_dma_device_t *, NvU64, NvU64);
|
||||
|
||||
void NV_API_CALL nv_dma_cache_invalidate (nv_dma_device_t *, void *);
|
||||
NvBool NV_API_CALL nv_grdma_pci_topology_supported(nv_state_t *, nv_dma_device_t *);
|
||||
|
||||
NvS32 NV_API_CALL nv_start_rc_timer (nv_state_t *);
|
||||
NvS32 NV_API_CALL nv_stop_rc_timer (nv_state_t *);
|
||||
@@ -902,8 +991,8 @@ struct drm_gem_object;
|
||||
|
||||
NV_STATUS NV_API_CALL nv_dma_import_sgt (nv_dma_device_t *, struct sg_table *, struct drm_gem_object *);
|
||||
void NV_API_CALL nv_dma_release_sgt(struct sg_table *, struct drm_gem_object *);
|
||||
NV_STATUS NV_API_CALL nv_dma_import_dma_buf (nv_dma_device_t *, struct dma_buf *, NvU32 *, struct sg_table **, nv_dma_buf_t **);
|
||||
NV_STATUS NV_API_CALL nv_dma_import_from_fd (nv_dma_device_t *, NvS32, NvU32 *, struct sg_table **, nv_dma_buf_t **);
|
||||
NV_STATUS NV_API_CALL nv_dma_import_dma_buf (nv_dma_device_t *, struct dma_buf *, NvBool, NvU32 *, struct sg_table **, nv_dma_buf_t **);
|
||||
NV_STATUS NV_API_CALL nv_dma_import_from_fd (nv_dma_device_t *, NvS32, NvBool, NvU32 *, struct sg_table **, nv_dma_buf_t **);
|
||||
void NV_API_CALL nv_dma_release_dma_buf (nv_dma_buf_t *);
|
||||
|
||||
void NV_API_CALL nv_schedule_uvm_isr (nv_state_t *);
|
||||
@@ -914,6 +1003,10 @@ void NV_API_CALL nv_schedule_uvm_resume_p2p (NvU8 *);
|
||||
NvBool NV_API_CALL nv_platform_supports_s0ix (void);
|
||||
NvBool NV_API_CALL nv_s2idle_pm_configured (void);
|
||||
|
||||
NvBool NV_API_CALL nv_pci_tegra_register_power_domain (nv_state_t *, NvBool);
|
||||
NvBool NV_API_CALL nv_pci_tegra_pm_init (nv_state_t *);
|
||||
void NV_API_CALL nv_pci_tegra_pm_deinit (nv_state_t *);
|
||||
|
||||
NvBool NV_API_CALL nv_is_chassis_notebook (void);
|
||||
void NV_API_CALL nv_allow_runtime_suspend (nv_state_t *nv);
|
||||
void NV_API_CALL nv_disallow_runtime_suspend (nv_state_t *nv);
|
||||
@@ -922,9 +1015,58 @@ typedef void (*nvTegraDceClientIpcCallback)(NvU32, NvU32, NvU32, void *, void *)
|
||||
|
||||
NV_STATUS NV_API_CALL nv_get_num_phys_pages (void *, NvU32 *);
|
||||
NV_STATUS NV_API_CALL nv_get_phys_pages (void *, void *, NvU32 *);
|
||||
|
||||
void NV_API_CALL nv_get_disp_smmu_stream_ids (nv_state_t *, NvU32 *, NvU32 *);
|
||||
|
||||
typedef struct TEGRA_IMP_IMPORT_DATA TEGRA_IMP_IMPORT_DATA;
|
||||
typedef struct nv_i2c_msg_s nv_i2c_msg_t;
|
||||
|
||||
NV_STATUS NV_API_CALL nv_bpmp_send_mrq (nv_state_t *, NvU32, const void *, NvU32, void *, NvU32, NvS32 *, NvS32 *);
|
||||
NV_STATUS NV_API_CALL nv_i2c_transfer(nv_state_t *, NvU32, NvU8, nv_i2c_msg_t *, int);
|
||||
void NV_API_CALL nv_i2c_unregister_clients(nv_state_t *);
|
||||
NV_STATUS NV_API_CALL nv_i2c_bus_status(nv_state_t *, NvU32, NvS32 *, NvS32 *);
|
||||
NV_STATUS NV_API_CALL nv_imp_get_import_data (TEGRA_IMP_IMPORT_DATA *);
|
||||
NV_STATUS NV_API_CALL nv_imp_enable_disable_rfl (nv_state_t *nv, NvBool bEnable);
|
||||
NV_STATUS NV_API_CALL nv_imp_icc_set_bw (nv_state_t *nv, NvU32 avg_bw_kbps, NvU32 floor_bw_kbps);
|
||||
NV_STATUS NV_API_CALL nv_get_num_dpaux_instances(nv_state_t *nv, NvU32 *num_instances);
|
||||
NV_STATUS NV_API_CALL nv_get_tegra_brightness_level(nv_state_t *, NvU32 *);
|
||||
NV_STATUS NV_API_CALL nv_set_tegra_brightness_level(nv_state_t *, NvU32);
|
||||
|
||||
NV_STATUS NV_API_CALL nv_soc_device_reset (nv_state_t *);
|
||||
NV_STATUS NV_API_CALL nv_soc_pm_powergate (nv_state_t *);
|
||||
NV_STATUS NV_API_CALL nv_soc_pm_unpowergate (nv_state_t *);
|
||||
NV_STATUS NV_API_CALL nv_gpio_get_pin_state(nv_state_t *, NvU32, NvU32 *);
|
||||
void NV_API_CALL nv_gpio_set_pin_state(nv_state_t *, NvU32, NvU32);
|
||||
NV_STATUS NV_API_CALL nv_gpio_set_pin_direction(nv_state_t *, NvU32, NvU32);
|
||||
NV_STATUS NV_API_CALL nv_gpio_get_pin_direction(nv_state_t *, NvU32, NvU32 *);
|
||||
NV_STATUS NV_API_CALL nv_gpio_get_pin_number(nv_state_t *, NvU32, NvU32 *);
|
||||
NvBool NV_API_CALL nv_gpio_get_pin_interrupt_status(nv_state_t *, NvU32, NvU32);
|
||||
NV_STATUS NV_API_CALL nv_gpio_set_pin_interrupt(nv_state_t *, NvU32, NvU32);
|
||||
NvU32 NV_API_CALL nv_tegra_get_rm_interface_type(NvU32);
|
||||
NV_STATUS NV_API_CALL nv_tegra_dce_register_ipc_client(NvU32, void *, nvTegraDceClientIpcCallback, NvU32 *);
|
||||
NV_STATUS NV_API_CALL nv_tegra_dce_client_ipc_send_recv(NvU32, void *, NvU32);
|
||||
NV_STATUS NV_API_CALL nv_tegra_dce_unregister_ipc_client(NvU32);
|
||||
NV_STATUS NV_API_CALL nv_dsi_parse_panel_props(nv_state_t *, void *);
|
||||
NvBool NV_API_CALL nv_dsi_is_panel_connected(nv_state_t *);
|
||||
NV_STATUS NV_API_CALL nv_dsi_panel_enable(nv_state_t *, void *);
|
||||
NV_STATUS NV_API_CALL nv_dsi_panel_reset(nv_state_t *, void *);
|
||||
void NV_API_CALL nv_dsi_panel_disable(nv_state_t *, void *);
|
||||
void NV_API_CALL nv_dsi_panel_cleanup(nv_state_t *, void *);
|
||||
NV_STATUS NV_API_CALL nv_soc_mipi_cal_reset(nv_state_t *);
|
||||
NvU32 NV_API_CALL nv_soc_fuse_register_read (NvU32 addr);
|
||||
NvBool NV_API_CALL nv_get_hdcp_enabled(nv_state_t *nv);
|
||||
NV_STATUS NV_API_CALL nv_get_valid_window_head_mask(nv_state_t *nv, NvU64 *);
|
||||
NV_STATUS NV_API_CALL nv_dp_uphy_pll_init(nv_state_t *, NvU32, NvU32);
|
||||
NV_STATUS NV_API_CALL nv_dp_uphy_pll_deinit(nv_state_t *);
|
||||
NV_STATUS NV_API_CALL nv_soc_i2c_hsp_semaphore_acquire(NvU32 ownerId, NvBool bAcquire, NvU64 timeout);
|
||||
typedef void (*nv_soc_tsec_cb_func_t)(void*, void*);
|
||||
NvU32 NV_API_CALL nv_soc_tsec_send_cmd(void* cmd, nv_soc_tsec_cb_func_t cb_func, void* cb_context);
|
||||
NvU32 NV_API_CALL nv_soc_tsec_event_register(nv_soc_tsec_cb_func_t cb_func, void* cb_context, NvBool is_init_event);
|
||||
NvU32 NV_API_CALL nv_soc_tsec_event_unregister(NvBool is_init_event);
|
||||
void* NV_API_CALL nv_soc_tsec_alloc_mem_desc(NvU32 num_bytes, NvU32 *flcn_addr);
|
||||
void NV_API_CALL nv_soc_tsec_free_mem_desc(void *mem_desc);
|
||||
NvBool NV_API_CALL nv_is_clk_enabled (nv_state_t *, TEGRASOC_WHICH_CLK);
|
||||
NV_STATUS NV_API_CALL nv_set_parent (nv_state_t *, TEGRASOC_WHICH_CLK, TEGRASOC_WHICH_CLK);
|
||||
NV_STATUS NV_API_CALL nv_get_parent (nv_state_t *, TEGRASOC_WHICH_CLK, TEGRASOC_WHICH_CLK*);
|
||||
NV_STATUS NV_API_CALL nv_clk_get_handles (nv_state_t *);
|
||||
void NV_API_CALL nv_clk_clear_handles (nv_state_t *);
|
||||
NV_STATUS NV_API_CALL nv_enable_clk (nv_state_t *, TEGRASOC_WHICH_CLK);
|
||||
@@ -961,6 +1103,7 @@ NvBool NV_API_CALL rm_isr (nvidia_stack_t *, nv_state_t *
|
||||
void NV_API_CALL rm_isr_bh (nvidia_stack_t *, nv_state_t *);
|
||||
void NV_API_CALL rm_isr_bh_unlocked (nvidia_stack_t *, nv_state_t *);
|
||||
NvBool NV_API_CALL rm_is_msix_allowed (nvidia_stack_t *, nv_state_t *);
|
||||
NV_STATUS NV_API_CALL rm_pmu_perfmon_get_load (nvidia_stack_t *, nv_state_t *, NvU32 *, TEGRASOC_DEVFREQ_CLK);
|
||||
NV_STATUS NV_API_CALL rm_power_management (nvidia_stack_t *, nv_state_t *, nv_pm_action_t);
|
||||
NV_STATUS NV_API_CALL rm_stop_user_channels (nvidia_stack_t *, nv_state_t *);
|
||||
NV_STATUS NV_API_CALL rm_restart_user_channels (nvidia_stack_t *, nv_state_t *);
|
||||
@@ -1008,14 +1151,16 @@ void NV_API_CALL rm_request_dnotifier_state (nvidia_stack_t *, n
|
||||
void NV_API_CALL rm_disable_gpu_state_persistence (nvidia_stack_t *sp, nv_state_t *);
|
||||
NV_STATUS NV_API_CALL rm_p2p_init_mapping (nvidia_stack_t *, NvU64, NvU64 *, NvU64 *, NvU64 *, NvU64 *, NvU64, NvU64, NvU64, NvU64, void (*)(void *), void *);
|
||||
NV_STATUS NV_API_CALL rm_p2p_destroy_mapping (nvidia_stack_t *, NvU64);
|
||||
NV_STATUS NV_API_CALL rm_p2p_get_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU8 **, void *);
|
||||
NV_STATUS NV_API_CALL rm_p2p_get_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU8 **, void *, NvBool *);
|
||||
NV_STATUS NV_API_CALL rm_p2p_get_gpu_info (nvidia_stack_t *, NvU64, NvU64, NvU8 **, void **);
|
||||
NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent (nvidia_stack_t *, NvU64, NvU64, void **, NvU64 *, NvU32 *, NvBool, void *, void *, void **);
|
||||
NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent (nvidia_stack_t *, NvU64, NvU64, void **, NvU64 *, NvU32 *, NvBool, void *, void *, void **, NvBool *);
|
||||
NV_STATUS NV_API_CALL rm_p2p_register_callback (nvidia_stack_t *, NvU64, NvU64, NvU64, void *, void (*)(void *), void *);
|
||||
NV_STATUS NV_API_CALL rm_p2p_put_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, void *);
|
||||
NV_STATUS NV_API_CALL rm_p2p_put_pages_persistent(nvidia_stack_t *, void *, void *, void *);
|
||||
NV_STATUS NV_API_CALL rm_p2p_dma_map_pages (nvidia_stack_t *, nv_dma_device_t *, NvU8 *, NvU64, NvU32, NvU64 *, void **);
|
||||
NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *, void **);
|
||||
NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle,
|
||||
NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *, void **,
|
||||
NvBool *, NvU32 *, NvBool *, nv_memory_type_t *);
|
||||
void NV_API_CALL rm_dma_buf_undup_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle);
|
||||
NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *,
|
||||
NvHandle, NvHandle, MemoryRange,
|
||||
@@ -1026,7 +1171,7 @@ void NV_API_CALL rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t
|
||||
NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *,
|
||||
nv_state_t *, NvHandle, NvHandle,
|
||||
NvU8, NvHandle *, NvHandle *,
|
||||
NvHandle *, void **, NvBool *);
|
||||
NvHandle *, void **, NvBool *, NvBool *);
|
||||
void NV_API_CALL rm_dma_buf_put_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, void *);
|
||||
|
||||
void NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd);
|
||||
@@ -1046,6 +1191,7 @@ NV_STATUS NV_API_CALL rm_set_external_kernel_client_count(nvidia_stack_t *, nv_
|
||||
NV_STATUS NV_API_CALL rm_schedule_gpu_wakeup(nvidia_stack_t *, nv_state_t *);
|
||||
NvBool NV_API_CALL rm_disable_iomap_wc(void);
|
||||
|
||||
void NV_API_CALL rm_init_tegra_dynamic_power_management(nvidia_stack_t *, nv_state_t *);
|
||||
void NV_API_CALL rm_init_dynamic_power_management(nvidia_stack_t *, nv_state_t *, NvBool);
|
||||
void NV_API_CALL rm_cleanup_dynamic_power_management(nvidia_stack_t *, nv_state_t *);
|
||||
void NV_API_CALL rm_enable_dynamic_power_management(nvidia_stack_t *, nv_state_t *);
|
||||
@@ -1070,7 +1216,7 @@ NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU
|
||||
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8);
|
||||
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *,
|
||||
NvU64 *, NvU64 *, NvU32 *, NvBool *, NvU8 *);
|
||||
NV_STATUS NV_API_CALL nv_vgpu_update_sysfs_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU32);
|
||||
NV_STATUS NV_API_CALL nv_vgpu_update_sysfs_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU32 *);
|
||||
NV_STATUS NV_API_CALL nv_vgpu_get_hbm_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU64 *);
|
||||
NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *);
|
||||
NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *, NvU32, NvBool *);
|
||||
|
||||
@@ -46,6 +46,7 @@ struct UvmOpsUvmEvents;
|
||||
#include "nvgputypes.h"
|
||||
#include "nvstatus.h"
|
||||
#include "nv_uvm_types.h"
|
||||
#include "nv_uvm_user_types.h"
|
||||
|
||||
|
||||
// Define the type here as it's Linux specific, used only by the Linux specific
|
||||
|
||||
@@ -22,7 +22,8 @@
|
||||
*/
|
||||
|
||||
//
|
||||
// This file provides common types for both UVM driver and RM's UVM interface.
|
||||
// This file provides common types for both the UVM kernel driver and RM's UVM
|
||||
// interface.
|
||||
//
|
||||
|
||||
#ifndef _NV_UVM_TYPES_H_
|
||||
@@ -32,21 +33,9 @@
|
||||
#include "nvstatus.h"
|
||||
#include "nvgputypes.h"
|
||||
#include "nvCpuUuid.h"
|
||||
#include "nv_uvm_user_types.h" // For UvmGpuCachingType, UvmGpuMappingType, etc
|
||||
|
||||
|
||||
//
|
||||
// Default Page Size if left "0" because in RM BIG page size is default & there
|
||||
// are multiple BIG page sizes in RM. These defines are used as flags to "0"
|
||||
// should be OK when user is not sure which pagesize allocation it wants
|
||||
//
|
||||
#define UVM_PAGE_SIZE_DEFAULT 0x0ULL
|
||||
#define UVM_PAGE_SIZE_4K 0x1000ULL
|
||||
#define UVM_PAGE_SIZE_64K 0x10000ULL
|
||||
#define UVM_PAGE_SIZE_128K 0x20000ULL
|
||||
#define UVM_PAGE_SIZE_2M 0x200000ULL
|
||||
#define UVM_PAGE_SIZE_512M 0x20000000ULL
|
||||
#define UVM_PAGE_SIZE_256G 0x4000000000ULL
|
||||
|
||||
//
|
||||
// When modifying flags, make sure they are compatible with the mirrored
|
||||
// PMA_* flags in phys_mem_allocator.h.
|
||||
@@ -81,9 +70,6 @@
|
||||
//
|
||||
#define UVM_PMA_CALLED_FROM_PMA_EVICTION 16384
|
||||
|
||||
#define UVM_UUID_LEN 16
|
||||
#define UVM_SW_OBJ_SUBCHANNEL 5
|
||||
|
||||
typedef unsigned long long UvmGpuPointer;
|
||||
|
||||
//
|
||||
@@ -447,80 +433,22 @@ typedef struct UvmGpuAllocInfo_tag
|
||||
// SEV or GPU CC modes are enabled. Ignored otherwise
|
||||
} UvmGpuAllocInfo;
|
||||
|
||||
typedef enum
|
||||
{
|
||||
UVM_VIRT_MODE_NONE = 0, // Baremetal or passthrough virtualization
|
||||
UVM_VIRT_MODE_LEGACY = 1, // Virtualization without SRIOV support
|
||||
UVM_VIRT_MODE_SRIOV_HEAVY = 2, // Virtualization with SRIOV Heavy configured
|
||||
UVM_VIRT_MODE_SRIOV_STANDARD = 3, // Virtualization with SRIOV Standard configured
|
||||
UVM_VIRT_MODE_COUNT = 4,
|
||||
} UVM_VIRT_MODE;
|
||||
|
||||
// !!! The following enums (with UvmRm prefix) are defined and documented in
|
||||
// mm/uvm/interface/uvm_types.h and must be mirrored. Please refer to that file
|
||||
// for more details.
|
||||
|
||||
// UVM GPU mapping types
|
||||
typedef enum
|
||||
{
|
||||
UvmRmGpuMappingTypeDefault = 0,
|
||||
UvmRmGpuMappingTypeReadWriteAtomic = 1,
|
||||
UvmRmGpuMappingTypeReadWrite = 2,
|
||||
UvmRmGpuMappingTypeReadOnly = 3,
|
||||
UvmRmGpuMappingTypeCount = 4
|
||||
} UvmRmGpuMappingType;
|
||||
|
||||
// UVM GPU caching types
|
||||
typedef enum
|
||||
{
|
||||
UvmRmGpuCachingTypeDefault = 0,
|
||||
UvmRmGpuCachingTypeForceUncached = 1,
|
||||
UvmRmGpuCachingTypeForceCached = 2,
|
||||
UvmRmGpuCachingTypeCount = 3
|
||||
} UvmRmGpuCachingType;
|
||||
|
||||
// UVM GPU format types
|
||||
typedef enum {
|
||||
UvmRmGpuFormatTypeDefault = 0,
|
||||
UvmRmGpuFormatTypeBlockLinear = 1,
|
||||
UvmRmGpuFormatTypeCount = 2
|
||||
} UvmRmGpuFormatType;
|
||||
|
||||
// UVM GPU Element bits types
|
||||
typedef enum {
|
||||
UvmRmGpuFormatElementBitsDefault = 0,
|
||||
UvmRmGpuFormatElementBits8 = 1,
|
||||
UvmRmGpuFormatElementBits16 = 2,
|
||||
// Cuda does not support 24-bit width
|
||||
UvmRmGpuFormatElementBits32 = 4,
|
||||
UvmRmGpuFormatElementBits64 = 5,
|
||||
UvmRmGpuFormatElementBits128 = 6,
|
||||
UvmRmGpuFormatElementBitsCount = 7
|
||||
} UvmRmGpuFormatElementBits;
|
||||
|
||||
// UVM GPU Compression types
|
||||
typedef enum {
|
||||
UvmRmGpuCompressionTypeDefault = 0,
|
||||
UvmRmGpuCompressionTypeEnabledNoPlc = 1,
|
||||
UvmRmGpuCompressionTypeCount = 2
|
||||
} UvmRmGpuCompressionType;
|
||||
|
||||
typedef struct UvmGpuExternalMappingInfo_tag
|
||||
{
|
||||
// In: GPU caching ability.
|
||||
UvmRmGpuCachingType cachingType;
|
||||
UvmGpuCachingType cachingType;
|
||||
|
||||
// In: Virtual permissions.
|
||||
UvmRmGpuMappingType mappingType;
|
||||
UvmGpuMappingType mappingType;
|
||||
|
||||
// In: RM virtual mapping memory format
|
||||
UvmRmGpuFormatType formatType;
|
||||
UvmGpuFormatType formatType;
|
||||
|
||||
// In: RM virtual mapping element bits
|
||||
UvmRmGpuFormatElementBits elementBits;
|
||||
UvmGpuFormatElementBits elementBits;
|
||||
|
||||
// In: RM virtual compression type
|
||||
UvmRmGpuCompressionType compressionType;
|
||||
UvmGpuCompressionType compressionType;
|
||||
|
||||
// In: Size of the buffer to store PTEs (in bytes).
|
||||
NvU64 pteBufferSize;
|
||||
@@ -546,6 +474,9 @@ typedef struct UvmGpuExternalMappingInfo_tag
|
||||
|
||||
// Out: PTE size (in bytes)
|
||||
NvU32 pteSize;
|
||||
|
||||
// Out: UVM needs to invalidate L2 at unmap
|
||||
NvBool bNeedL2InvalidateAtUnmap;
|
||||
} UvmGpuExternalMappingInfo;
|
||||
|
||||
typedef struct UvmGpuExternalPhysAddrInfo_tag
|
||||
@@ -553,7 +484,7 @@ typedef struct UvmGpuExternalPhysAddrInfo_tag
|
||||
// In: Virtual permissions. Returns
|
||||
// NV_ERR_INVALID_ACCESS_TYPE if input is
|
||||
// inaccurate
|
||||
UvmRmGpuMappingType mappingType;
|
||||
UvmGpuMappingType mappingType;
|
||||
|
||||
// In: Size of the buffer to store PhysAddrs (in bytes).
|
||||
NvU64 physAddrBufferSize;
|
||||
@@ -603,6 +534,11 @@ typedef struct UvmGpuP2PCapsParams_tag
|
||||
// second, not taking into account the protocols overhead. The reported
|
||||
// bandwidth for indirect peers is zero.
|
||||
NvU32 totalLinkLineRateMBps;
|
||||
|
||||
// Out: IOMMU/DMA mappings of bar1 of the respective peer vidmem.
|
||||
// Size is 0 if bar1 p2p is not supported.
|
||||
NvU64 bar1DmaAddress[2];
|
||||
NvU64 bar1DmaSize[2];
|
||||
} UvmGpuP2PCapsParams;
|
||||
|
||||
// Platform-wide information
|
||||
@@ -746,6 +682,9 @@ typedef struct UvmGpuInfo_tag
|
||||
|
||||
// GPU supports ATS capability
|
||||
NvBool atsSupport;
|
||||
|
||||
// GPU supports Non-PASID ATS capability
|
||||
NvBool nonPasidAtsSupport;
|
||||
} UvmGpuInfo;
|
||||
|
||||
typedef struct UvmGpuFbInfo_tag
|
||||
@@ -759,6 +698,7 @@ typedef struct UvmGpuFbInfo_tag
|
||||
NvBool bZeroFb; // Zero FB mode enabled.
|
||||
NvU64 maxVidmemPageSize; // Largest GPU page size to access vidmem.
|
||||
NvBool bStaticBar1Enabled; // Static BAR1 mode is enabled
|
||||
NvBool bStaticBar1WriteCombined; // Write combined is enabled
|
||||
NvU64 staticBar1StartOffset; // The start offset of the the static mapping
|
||||
NvU64 staticBar1Size; // The size of the static mapping
|
||||
NvU32 heapStart; // The start offset of heap in KB, helpful for MIG
|
||||
@@ -795,19 +735,6 @@ typedef struct UvmPmaAllocationOptions_tag
|
||||
NvU32 resultFlags; // valid if the allocation function returns NV_OK
|
||||
} UvmPmaAllocationOptions;
|
||||
|
||||
//
|
||||
// Mirrored in PMA (PMA_STATS)
|
||||
//
|
||||
typedef struct UvmPmaStatistics_tag
|
||||
{
|
||||
volatile NvU64 numPages2m; // PMA-wide 2MB pages count across all regions
|
||||
volatile NvU64 numFreePages64k; // PMA-wide free 64KB page count across all regions
|
||||
volatile NvU64 numFreePages2m; // PMA-wide free 2MB pages count across all regions
|
||||
volatile NvU64 numPages2mProtected; // PMA-wide 2MB pages count in protected memory
|
||||
volatile NvU64 numFreePages64kProtected; // PMA-wide free 64KB page count in protected memory
|
||||
volatile NvU64 numFreePages2mProtected; // PMA-wide free 2MB pages count in protected memory
|
||||
} UvmPmaStatistics;
|
||||
|
||||
/*******************************************************************************
|
||||
uvmEventSuspend
|
||||
This function will be called by the GPU driver to signal to UVM that the
|
||||
@@ -1100,14 +1027,6 @@ typedef struct UvmGpuAccessCntrInfo_tag
|
||||
NvHandle accessCntrBufferHandle;
|
||||
} UvmGpuAccessCntrInfo;
|
||||
|
||||
typedef enum
|
||||
{
|
||||
UVM_ACCESS_COUNTER_GRANULARITY_64K = 1,
|
||||
UVM_ACCESS_COUNTER_GRANULARITY_2M = 2,
|
||||
UVM_ACCESS_COUNTER_GRANULARITY_16M = 3,
|
||||
UVM_ACCESS_COUNTER_GRANULARITY_16G = 4,
|
||||
} UVM_ACCESS_COUNTER_GRANULARITY;
|
||||
|
||||
typedef struct UvmGpuAccessCntrConfig_tag
|
||||
{
|
||||
NvU32 granularity;
|
||||
|
||||
166
kernel-open/common/inc/nv_uvm_user_types.h
Normal file
166
kernel-open/common/inc/nv_uvm_user_types.h
Normal file
@@ -0,0 +1,166 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
//
|
||||
// This file provides common types for the UVM kernel driver, UVM user layer,
|
||||
// and RM's UVM interface.
|
||||
//
|
||||
|
||||
#ifndef _NV_UVM_USER_TYPES_H_
|
||||
#define _NV_UVM_USER_TYPES_H_
|
||||
|
||||
#include "nvtypes.h"
|
||||
|
||||
//
|
||||
// Default Page Size if left "0" because in RM BIG page size is default & there
|
||||
// are multiple BIG page sizes in RM. These defines are used as flags to "0"
|
||||
// should be OK when user is not sure which pagesize allocation it wants
|
||||
//
|
||||
#define UVM_PAGE_SIZE_DEFAULT 0x0ULL
|
||||
#define UVM_PAGE_SIZE_4K 0x1000ULL
|
||||
#define UVM_PAGE_SIZE_64K 0x10000ULL
|
||||
#define UVM_PAGE_SIZE_128K 0x20000ULL
|
||||
#define UVM_PAGE_SIZE_2M 0x200000ULL
|
||||
#define UVM_PAGE_SIZE_512M 0x20000000ULL
|
||||
#define UVM_PAGE_SIZE_256G 0x4000000000ULL
|
||||
|
||||
typedef enum
|
||||
{
|
||||
UVM_VIRT_MODE_NONE = 0, // Baremetal or passthrough virtualization
|
||||
UVM_VIRT_MODE_LEGACY = 1, // Virtualization without SRIOV support
|
||||
UVM_VIRT_MODE_SRIOV_HEAVY = 2, // Virtualization with SRIOV Heavy configured
|
||||
UVM_VIRT_MODE_SRIOV_STANDARD = 3, // Virtualization with SRIOV Standard configured
|
||||
UVM_VIRT_MODE_COUNT = 4,
|
||||
} UVM_VIRT_MODE;
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// UVM GPU mapping types
|
||||
//
|
||||
// These types indicate the kinds of accesses allowed from a given GPU at the
|
||||
// specified virtual address range. There are 3 basic kinds of accesses: read,
|
||||
// write and atomics. Each type indicates what kinds of accesses are allowed.
|
||||
// Accesses of any disallowed kind are fatal. The "Default" type specifies that
|
||||
// the UVM driver should decide on the types of accesses allowed.
|
||||
//------------------------------------------------------------------------------
|
||||
typedef enum
|
||||
{
|
||||
UvmGpuMappingTypeDefault = 0,
|
||||
UvmGpuMappingTypeReadWriteAtomic = 1,
|
||||
UvmGpuMappingTypeReadWrite = 2,
|
||||
UvmGpuMappingTypeReadOnly = 3,
|
||||
UvmGpuMappingTypeCount = 4
|
||||
} UvmGpuMappingType;
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// UVM GPU caching types
|
||||
//
|
||||
// These types indicate the cacheability of the specified virtual address range
|
||||
// from a given GPU. The "Default" type specifies that the UVM driver should
|
||||
// set caching on or off as required to follow the UVM coherence model. The
|
||||
// "ForceUncached" and "ForceCached" types will always turn caching off or on
|
||||
// respectively. These two types override the cacheability specified by the UVM
|
||||
// coherence model.
|
||||
//------------------------------------------------------------------------------
|
||||
typedef enum
|
||||
{
|
||||
UvmGpuCachingTypeDefault = 0,
|
||||
UvmGpuCachingTypeForceUncached = 1,
|
||||
UvmGpuCachingTypeForceCached = 2,
|
||||
UvmGpuCachingTypeCount = 3
|
||||
} UvmGpuCachingType;
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// UVM GPU format types
|
||||
//
|
||||
// These types indicate the memory format of the specified virtual address
|
||||
// range for a given GPU. The "Default" type specifies that the UVM driver will
|
||||
// detect the format based on the allocation and is mutually inclusive with
|
||||
// UvmGpuFormatElementBitsDefault.
|
||||
//------------------------------------------------------------------------------
|
||||
typedef enum {
|
||||
UvmGpuFormatTypeDefault = 0,
|
||||
UvmGpuFormatTypeBlockLinear = 1,
|
||||
UvmGpuFormatTypeCount = 2
|
||||
} UvmGpuFormatType;
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// UVM GPU Element bits types
|
||||
//
|
||||
// These types indicate the element size of the specified virtual address range
|
||||
// for a given GPU. The "Default" type specifies that the UVM driver will
|
||||
// detect the element size based on the allocation and is mutually inclusive
|
||||
// with UvmGpuFormatTypeDefault. The element size is specified in bits:
|
||||
// UvmGpuFormatElementBits8 uses the 8-bits format.
|
||||
//------------------------------------------------------------------------------
|
||||
typedef enum {
|
||||
UvmGpuFormatElementBitsDefault = 0,
|
||||
UvmGpuFormatElementBits8 = 1,
|
||||
UvmGpuFormatElementBits16 = 2,
|
||||
// Cuda does not support 24-bit width
|
||||
UvmGpuFormatElementBits32 = 4,
|
||||
UvmGpuFormatElementBits64 = 5,
|
||||
UvmGpuFormatElementBits128 = 6,
|
||||
UvmGpuFormatElementBitsCount = 7
|
||||
} UvmGpuFormatElementBits;
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// UVM GPU Compression types
|
||||
//
|
||||
// These types indicate the compression type of the specified virtual address
|
||||
// range for a given GPU. The "Default" type specifies that the UVM driver will
|
||||
// detect the compression attributes based on the allocation. Any type other
|
||||
// than the default will override the compression behavior of the physical
|
||||
// allocation. UvmGpuCompressionTypeEnabledNoPlc will disable PLC but enables
|
||||
// generic compression. UvmGpuCompressionTypeEnabledNoPlc type is only supported
|
||||
// on Turing plus GPUs. Since UvmGpuCompressionTypeEnabledNoPlc type enables
|
||||
// generic compression, it can only be used when the compression attribute of
|
||||
// the underlying physical allocation is enabled.
|
||||
//------------------------------------------------------------------------------
|
||||
typedef enum {
|
||||
UvmGpuCompressionTypeDefault = 0,
|
||||
UvmGpuCompressionTypeEnabledNoPlc = 1,
|
||||
UvmGpuCompressionTypeCount = 2
|
||||
} UvmGpuCompressionType;
|
||||
|
||||
//
|
||||
// Mirrored in PMA (PMA_STATS)
|
||||
//
|
||||
typedef struct UvmPmaStatistics_tag
|
||||
{
|
||||
volatile NvU64 numPages2m; // PMA-wide 2MB pages count across all regions
|
||||
volatile NvU64 numFreePages64k; // PMA-wide free 64KB page count across all regions
|
||||
volatile NvU64 numFreePages2m; // PMA-wide free 2MB pages count across all regions
|
||||
volatile NvU64 numPages2mProtected; // PMA-wide 2MB pages count in protected memory
|
||||
volatile NvU64 numFreePages64kProtected; // PMA-wide free 64KB page count in protected memory
|
||||
volatile NvU64 numFreePages2mProtected; // PMA-wide free 2MB pages count in protected memory
|
||||
} UvmPmaStatistics;
|
||||
|
||||
typedef enum
|
||||
{
|
||||
UVM_ACCESS_COUNTER_GRANULARITY_64K = 1,
|
||||
UVM_ACCESS_COUNTER_GRANULARITY_2M = 2,
|
||||
UVM_ACCESS_COUNTER_GRANULARITY_16M = 3,
|
||||
UVM_ACCESS_COUNTER_GRANULARITY_16G = 4,
|
||||
} UVM_ACCESS_COUNTER_GRANULARITY;
|
||||
|
||||
#endif // _NV_UVM_USER_TYPES_H_
|
||||
@@ -562,9 +562,6 @@ struct NvKmsKapiCreateSurfaceParams {
|
||||
* explicit_layout is NV_TRUE and layout is
|
||||
* NvKmsSurfaceMemoryLayoutBlockLinear */
|
||||
NvU8 log2GobsPerBlockY;
|
||||
|
||||
/* [IN] Whether a surface can be updated directly on the screen */
|
||||
NvBool noDisplayCaching;
|
||||
};
|
||||
|
||||
enum NvKmsKapiAllocationType {
|
||||
@@ -573,6 +570,28 @@ enum NvKmsKapiAllocationType {
|
||||
NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN = 2,
|
||||
};
|
||||
|
||||
struct NvKmsKapiAllocateMemoryParams {
|
||||
/* [IN] BlockLinear or Pitch */
|
||||
enum NvKmsSurfaceMemoryLayout layout;
|
||||
|
||||
/* [IN] Allocation type */
|
||||
enum NvKmsKapiAllocationType type;
|
||||
|
||||
/* [IN] Size, in bytes, of the memory to allocate */
|
||||
NvU64 size;
|
||||
|
||||
/* [IN] Whether memory can be updated directly on the screen */
|
||||
NvBool noDisplayCaching;
|
||||
|
||||
/* [IN] Whether to allocate memory from video memory or system memory */
|
||||
NvBool useVideoMemory;
|
||||
|
||||
/* [IN/OUT] For input, non-zero if compression backing store should be
|
||||
* allocated for the memory, for output, non-zero if compression backing
|
||||
* store was allocated for the memory */
|
||||
NvU8 *compressible;
|
||||
};
|
||||
|
||||
typedef enum NvKmsKapiRegisterWaiterResultRec {
|
||||
NVKMS_KAPI_REG_WAITER_FAILED,
|
||||
NVKMS_KAPI_REG_WAITER_SUCCESS,
|
||||
@@ -602,14 +621,19 @@ struct NvKmsKapiFunctionsTable {
|
||||
} systemInfo;
|
||||
|
||||
/*!
|
||||
* Enumerate the available physical GPUs that can be used with NVKMS.
|
||||
* Enumerate the available GPUs that can be used with NVKMS.
|
||||
*
|
||||
* \param [out] gpuInfo The information of the enumerated GPUs.
|
||||
* It is an array of NVIDIA_MAX_GPUS elements.
|
||||
* The gpuCallback will be called with a NvKmsKapiGpuInfo for each
|
||||
* physical and MIG GPU currently available in the system.
|
||||
*
|
||||
* \param [in] gpuCallback Client function to handle each GPU.
|
||||
*
|
||||
* \return Count of enumerated gpus.
|
||||
*/
|
||||
NvU32 (*enumerateGpus)(struct NvKmsKapiGpuInfo *kapiGpuInfo);
|
||||
NvU32 (*enumerateGpus)
|
||||
(
|
||||
void (*gpuCallback)(const struct NvKmsKapiGpuInfo *info)
|
||||
);
|
||||
|
||||
/*!
|
||||
* Allocate an NVK device using which you can query/allocate resources on
|
||||
@@ -839,66 +863,22 @@ struct NvKmsKapiFunctionsTable {
|
||||
);
|
||||
|
||||
/*!
|
||||
* Allocate some unformatted video memory of the specified size.
|
||||
* Allocate some unformatted video or system memory of the specified size.
|
||||
*
|
||||
* This function allocates video memory on the specified GPU.
|
||||
* It should be suitable for mapping on the CPU as a pitch
|
||||
* linear or block-linear surface.
|
||||
* This function allocates video or system memory on the specified GPU. It
|
||||
* should be suitable for mapping on the CPU as a pitch linear or
|
||||
* block-linear surface.
|
||||
*
|
||||
* \param [in] device A device allocated using allocateDevice().
|
||||
* \param [in] device A device allocated using allocateDevice().
|
||||
*
|
||||
* \param [in] layout BlockLinear or Pitch.
|
||||
*
|
||||
* \param [in] type Allocation type.
|
||||
*
|
||||
* \param [in] size Size, in bytes, of the memory to allocate.
|
||||
*
|
||||
* \param [in/out] compressible For input, non-zero if compression
|
||||
* backing store should be allocated for
|
||||
* the memory, for output, non-zero if
|
||||
* compression backing store was
|
||||
* allocated for the memory.
|
||||
* \param [in/out] params Parameters required for memory allocation.
|
||||
*
|
||||
* \return An valid memory handle on success, NULL on failure.
|
||||
*/
|
||||
struct NvKmsKapiMemory* (*allocateVideoMemory)
|
||||
struct NvKmsKapiMemory* (*allocateMemory)
|
||||
(
|
||||
struct NvKmsKapiDevice *device,
|
||||
enum NvKmsSurfaceMemoryLayout layout,
|
||||
enum NvKmsKapiAllocationType type,
|
||||
NvU64 size,
|
||||
NvU8 *compressible
|
||||
);
|
||||
|
||||
/*!
|
||||
* Allocate some unformatted system memory of the specified size.
|
||||
*
|
||||
* This function allocates system memory . It should be suitable
|
||||
* for mapping on the CPU as a pitch linear or block-linear surface.
|
||||
*
|
||||
* \param [in] device A device allocated using allocateDevice().
|
||||
*
|
||||
* \param [in] layout BlockLinear or Pitch.
|
||||
*
|
||||
* \param [in] type Allocation type.
|
||||
*
|
||||
* \param [in] size Size, in bytes, of the memory to allocate.
|
||||
*
|
||||
* \param [in/out] compressible For input, non-zero if compression
|
||||
* backing store should be allocated for
|
||||
* the memory, for output, non-zero if
|
||||
* compression backing store was
|
||||
* allocated for the memory.
|
||||
*
|
||||
* \return An valid memory handle on success, NULL on failure.
|
||||
*/
|
||||
struct NvKmsKapiMemory* (*allocateSystemMemory)
|
||||
(
|
||||
struct NvKmsKapiDevice *device,
|
||||
enum NvKmsSurfaceMemoryLayout layout,
|
||||
enum NvKmsKapiAllocationType type,
|
||||
NvU64 size,
|
||||
NvU8 *compressible
|
||||
struct NvKmsKapiAllocateMemoryParams *params
|
||||
);
|
||||
|
||||
/*!
|
||||
|
||||
@@ -580,7 +580,6 @@ nvMaskPos32(const NvU32 mask, const NvU32 bitIdx)
|
||||
n64 = BIT_IDX_64(LOWESTBIT(n64));\
|
||||
}
|
||||
|
||||
|
||||
// Destructive operation on n32
|
||||
#define HIGHESTBITIDX_32(n32) \
|
||||
{ \
|
||||
@@ -592,6 +591,17 @@ nvMaskPos32(const NvU32 mask, const NvU32 bitIdx)
|
||||
n32 = count; \
|
||||
}
|
||||
|
||||
// Destructive operation on n64
|
||||
#define HIGHESTBITIDX_64(n64) \
|
||||
{ \
|
||||
NvU64 count = 0; \
|
||||
while (n64 >>= 1) \
|
||||
{ \
|
||||
count++; \
|
||||
} \
|
||||
n64 = count; \
|
||||
}
|
||||
|
||||
// Destructive operation on n32
|
||||
#define ROUNDUP_POW2(n32) \
|
||||
{ \
|
||||
|
||||
@@ -164,6 +164,7 @@ NV_STATUS_CODE(NV_ERR_RESOURCE_RETIREMENT_ERROR, 0x00000086, "An error occ
|
||||
NV_STATUS_CODE(NV_ERR_FABRIC_STATE_OUT_OF_SYNC, 0x00000087, "NVLink fabric state cached by the driver is out of sync")
|
||||
NV_STATUS_CODE(NV_ERR_BUFFER_FULL, 0x00000088, "Buffer is full")
|
||||
NV_STATUS_CODE(NV_ERR_BUFFER_EMPTY, 0x00000089, "Buffer is empty")
|
||||
NV_STATUS_CODE(NV_ERR_MC_FLA_OFFSET_TABLE_FULL, 0x0000008A, "Multicast FLA offset table has no available slots")
|
||||
|
||||
// Warnings:
|
||||
NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch")
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -24,10 +24,6 @@
|
||||
#ifndef NVTYPES_INCLUDED
|
||||
#define NVTYPES_INCLUDED
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "cpuopsys.h"
|
||||
|
||||
#ifndef NVTYPES_USE_STDINT
|
||||
@@ -55,6 +51,10 @@ extern "C" {
|
||||
#endif
|
||||
#endif // __cplusplus
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if defined(MAKE_NV64TYPES_8BYTES_ALIGNED) && defined(__i386__)
|
||||
// ensure or force 8-bytes alignment of NV 64-bit types
|
||||
#define OPTIONAL_ALIGN8_ATTR __attribute__((aligned(8)))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -70,144 +70,145 @@ typedef struct os_wait_queue os_wait_queue;
|
||||
* ---------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
NV_STATUS NV_API_CALL os_alloc_mem (void **, NvU64);
|
||||
void NV_API_CALL os_free_mem (void *);
|
||||
NV_STATUS NV_API_CALL os_get_current_time (NvU32 *, NvU32 *);
|
||||
NvU64 NV_API_CALL os_get_current_tick (void);
|
||||
NvU64 NV_API_CALL os_get_current_tick_hr (void);
|
||||
NvU64 NV_API_CALL os_get_tick_resolution (void);
|
||||
NV_STATUS NV_API_CALL os_delay (NvU32);
|
||||
NV_STATUS NV_API_CALL os_delay_us (NvU32);
|
||||
NvU64 NV_API_CALL os_get_cpu_frequency (void);
|
||||
NvU32 NV_API_CALL os_get_current_process (void);
|
||||
void NV_API_CALL os_get_current_process_name (char *, NvU32);
|
||||
NV_STATUS NV_API_CALL os_get_current_thread (NvU64 *);
|
||||
char* NV_API_CALL os_string_copy (char *, const char *);
|
||||
NvU32 NV_API_CALL os_string_length (const char *);
|
||||
NvU32 NV_API_CALL os_strtoul (const char *, char **, NvU32);
|
||||
NvS32 NV_API_CALL os_string_compare (const char *, const char *);
|
||||
NvS32 NV_API_CALL os_snprintf (char *, NvU32, const char *, ...);
|
||||
NvS32 NV_API_CALL os_vsnprintf (char *, NvU32, const char *, va_list);
|
||||
void NV_API_CALL os_log_error (const char *, va_list);
|
||||
void* NV_API_CALL os_mem_copy (void *, const void *, NvU32);
|
||||
NV_STATUS NV_API_CALL os_memcpy_from_user (void *, const void *, NvU32);
|
||||
NV_STATUS NV_API_CALL os_memcpy_to_user (void *, const void *, NvU32);
|
||||
void* NV_API_CALL os_mem_set (void *, NvU8, NvU32);
|
||||
NvS32 NV_API_CALL os_mem_cmp (const NvU8 *, const NvU8 *, NvU32);
|
||||
void* NV_API_CALL os_pci_init_handle (NvU32, NvU8, NvU8, NvU8, NvU16 *, NvU16 *);
|
||||
NV_STATUS NV_API_CALL os_pci_read_byte (void *, NvU32, NvU8 *);
|
||||
NV_STATUS NV_API_CALL os_pci_read_word (void *, NvU32, NvU16 *);
|
||||
NV_STATUS NV_API_CALL os_pci_read_dword (void *, NvU32, NvU32 *);
|
||||
NV_STATUS NV_API_CALL os_pci_write_byte (void *, NvU32, NvU8);
|
||||
NV_STATUS NV_API_CALL os_pci_write_word (void *, NvU32, NvU16);
|
||||
NV_STATUS NV_API_CALL os_pci_write_dword (void *, NvU32, NvU32);
|
||||
NvBool NV_API_CALL os_pci_remove_supported (void);
|
||||
void NV_API_CALL os_pci_remove (void *);
|
||||
void* NV_API_CALL os_map_kernel_space (NvU64, NvU64, NvU32);
|
||||
void NV_API_CALL os_unmap_kernel_space (void *, NvU64);
|
||||
NV_STATUS NV_API_CALL os_alloc_mem (void **, NvU64);
|
||||
void NV_API_CALL os_free_mem (void *);
|
||||
NV_STATUS NV_API_CALL os_get_system_time (NvU32 *, NvU32 *);
|
||||
NvU64 NV_API_CALL os_get_monotonic_time_ns (void);
|
||||
NvU64 NV_API_CALL os_get_monotonic_time_ns_hr (void);
|
||||
NvU64 NV_API_CALL os_get_monotonic_tick_resolution_ns (void);
|
||||
NV_STATUS NV_API_CALL os_delay (NvU32);
|
||||
NV_STATUS NV_API_CALL os_delay_us (NvU32);
|
||||
NvU64 NV_API_CALL os_get_cpu_frequency (void);
|
||||
NvU32 NV_API_CALL os_get_current_process (void);
|
||||
void NV_API_CALL os_get_current_process_name (char *, NvU32);
|
||||
NV_STATUS NV_API_CALL os_get_current_thread (NvU64 *);
|
||||
char* NV_API_CALL os_string_copy (char *, const char *);
|
||||
NvU32 NV_API_CALL os_string_length (const char *);
|
||||
NvU32 NV_API_CALL os_strtoul (const char *, char **, NvU32);
|
||||
NvS32 NV_API_CALL os_string_compare (const char *, const char *);
|
||||
NvS32 NV_API_CALL os_snprintf (char *, NvU32, const char *, ...);
|
||||
NvS32 NV_API_CALL os_vsnprintf (char *, NvU32, const char *, va_list);
|
||||
void NV_API_CALL os_log_error (const char *, va_list);
|
||||
void* NV_API_CALL os_mem_copy (void *, const void *, NvU32);
|
||||
NV_STATUS NV_API_CALL os_memcpy_from_user (void *, const void *, NvU32);
|
||||
NV_STATUS NV_API_CALL os_memcpy_to_user (void *, const void *, NvU32);
|
||||
void* NV_API_CALL os_mem_set (void *, NvU8, NvU32);
|
||||
NvS32 NV_API_CALL os_mem_cmp (const NvU8 *, const NvU8 *, NvU32);
|
||||
void* NV_API_CALL os_pci_init_handle (NvU32, NvU8, NvU8, NvU8, NvU16 *, NvU16 *);
|
||||
NV_STATUS NV_API_CALL os_pci_read_byte (void *, NvU32, NvU8 *);
|
||||
NV_STATUS NV_API_CALL os_pci_read_word (void *, NvU32, NvU16 *);
|
||||
NV_STATUS NV_API_CALL os_pci_read_dword (void *, NvU32, NvU32 *);
|
||||
NV_STATUS NV_API_CALL os_pci_write_byte (void *, NvU32, NvU8);
|
||||
NV_STATUS NV_API_CALL os_pci_write_word (void *, NvU32, NvU16);
|
||||
NV_STATUS NV_API_CALL os_pci_write_dword (void *, NvU32, NvU32);
|
||||
NvBool NV_API_CALL os_pci_remove_supported (void);
|
||||
void NV_API_CALL os_pci_remove (void *);
|
||||
void* NV_API_CALL os_map_kernel_space (NvU64, NvU64, NvU32);
|
||||
void NV_API_CALL os_unmap_kernel_space (void *, NvU64);
|
||||
#if defined(NV_VMWARE)
|
||||
void* NV_API_CALL os_map_user_space (MemoryArea *, NvU32, NvU32, void **);
|
||||
void NV_API_CALL os_unmap_user_space (void *, NvU64, void *);
|
||||
void* NV_API_CALL os_map_user_space (MemoryArea *, NvU32, NvU32, void **);
|
||||
void NV_API_CALL os_unmap_user_space (void *, NvU64, void *);
|
||||
#endif
|
||||
NV_STATUS NV_API_CALL os_flush_cpu_cache_all (void);
|
||||
NV_STATUS NV_API_CALL os_flush_user_cache (void);
|
||||
NV_STATUS NV_API_CALL os_flush_cpu_cache_all (void);
|
||||
NV_STATUS NV_API_CALL os_flush_user_cache (void);
|
||||
void NV_API_CALL os_flush_cpu_write_combine_buffer(void);
|
||||
NvU8 NV_API_CALL os_io_read_byte (NvU32);
|
||||
NvU16 NV_API_CALL os_io_read_word (NvU32);
|
||||
NvU32 NV_API_CALL os_io_read_dword (NvU32);
|
||||
void NV_API_CALL os_io_write_byte (NvU32, NvU8);
|
||||
void NV_API_CALL os_io_write_word (NvU32, NvU16);
|
||||
void NV_API_CALL os_io_write_dword (NvU32, NvU32);
|
||||
NvBool NV_API_CALL os_is_administrator (void);
|
||||
NvBool NV_API_CALL os_check_access (RsAccessRight accessRight);
|
||||
void NV_API_CALL os_dbg_init (void);
|
||||
void NV_API_CALL os_dbg_breakpoint (void);
|
||||
void NV_API_CALL os_dbg_set_level (NvU32);
|
||||
NvU32 NV_API_CALL os_get_cpu_count (void);
|
||||
NvU32 NV_API_CALL os_get_cpu_number (void);
|
||||
void NV_API_CALL os_disable_console_access (void);
|
||||
void NV_API_CALL os_enable_console_access (void);
|
||||
NV_STATUS NV_API_CALL os_registry_init (void);
|
||||
NvU64 NV_API_CALL os_get_max_user_va (void);
|
||||
NV_STATUS NV_API_CALL os_schedule (void);
|
||||
NV_STATUS NV_API_CALL os_alloc_spinlock (void **);
|
||||
void NV_API_CALL os_free_spinlock (void *);
|
||||
NvU64 NV_API_CALL os_acquire_spinlock (void *);
|
||||
void NV_API_CALL os_release_spinlock (void *, NvU64);
|
||||
NV_STATUS NV_API_CALL os_queue_work_item (struct os_work_queue *, void *);
|
||||
NV_STATUS NV_API_CALL os_flush_work_queue (struct os_work_queue *, NvBool);
|
||||
NvBool NV_API_CALL os_is_queue_flush_ongoing (struct os_work_queue *);
|
||||
NV_STATUS NV_API_CALL os_alloc_mutex (void **);
|
||||
void NV_API_CALL os_free_mutex (void *);
|
||||
NV_STATUS NV_API_CALL os_acquire_mutex (void *);
|
||||
NV_STATUS NV_API_CALL os_cond_acquire_mutex (void *);
|
||||
void NV_API_CALL os_release_mutex (void *);
|
||||
void* NV_API_CALL os_alloc_semaphore (NvU32);
|
||||
void NV_API_CALL os_free_semaphore (void *);
|
||||
NV_STATUS NV_API_CALL os_acquire_semaphore (void *);
|
||||
NV_STATUS NV_API_CALL os_cond_acquire_semaphore (void *);
|
||||
NV_STATUS NV_API_CALL os_release_semaphore (void *);
|
||||
void* NV_API_CALL os_alloc_rwlock (void);
|
||||
void NV_API_CALL os_free_rwlock (void *);
|
||||
NV_STATUS NV_API_CALL os_acquire_rwlock_read (void *);
|
||||
NV_STATUS NV_API_CALL os_acquire_rwlock_write (void *);
|
||||
NV_STATUS NV_API_CALL os_cond_acquire_rwlock_read (void *);
|
||||
NV_STATUS NV_API_CALL os_cond_acquire_rwlock_write(void *);
|
||||
void NV_API_CALL os_release_rwlock_read (void *);
|
||||
void NV_API_CALL os_release_rwlock_write (void *);
|
||||
NvBool NV_API_CALL os_semaphore_may_sleep (void);
|
||||
NV_STATUS NV_API_CALL os_get_version_info (os_version_info*);
|
||||
NV_STATUS NV_API_CALL os_get_is_openrm (NvBool *);
|
||||
NvBool NV_API_CALL os_is_isr (void);
|
||||
NvBool NV_API_CALL os_pat_supported (void);
|
||||
void NV_API_CALL os_dump_stack (void);
|
||||
NvBool NV_API_CALL os_is_efi_enabled (void);
|
||||
NvBool NV_API_CALL os_is_xen_dom0 (void);
|
||||
NvBool NV_API_CALL os_is_vgx_hyper (void);
|
||||
NV_STATUS NV_API_CALL os_inject_vgx_msi (NvU16, NvU64, NvU32);
|
||||
NvBool NV_API_CALL os_is_grid_supported (void);
|
||||
NvU32 NV_API_CALL os_get_grid_csp_support (void);
|
||||
void NV_API_CALL os_bug_check (NvU32, const char *);
|
||||
NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32);
|
||||
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **);
|
||||
NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *, NvU32);
|
||||
NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *);
|
||||
NV_STATUS NV_API_CALL os_get_euid (NvU32 *);
|
||||
NV_STATUS NV_API_CALL os_get_smbios_header (NvU64 *pSmbsAddr);
|
||||
NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi (NvU32 *);
|
||||
void NV_API_CALL os_add_record_for_crashLog (void *, NvU32);
|
||||
void NV_API_CALL os_delete_record_for_crashLog (void *);
|
||||
NV_STATUS NV_API_CALL os_call_vgpu_vfio (void *, NvU32);
|
||||
NV_STATUS NV_API_CALL os_device_vm_present (void);
|
||||
NV_STATUS NV_API_CALL os_numa_memblock_size (NvU64 *);
|
||||
NV_STATUS NV_API_CALL os_alloc_pages_node (NvS32, NvU32, NvU32, NvU64 *);
|
||||
NV_STATUS NV_API_CALL os_get_page (NvU64 address);
|
||||
NV_STATUS NV_API_CALL os_put_page (NvU64 address);
|
||||
NvU32 NV_API_CALL os_get_page_refcount (NvU64 address);
|
||||
NvU32 NV_API_CALL os_count_tail_pages (NvU64 address);
|
||||
void NV_API_CALL os_free_pages_phys (NvU64, NvU32);
|
||||
NV_STATUS NV_API_CALL os_open_temporary_file (void **);
|
||||
void NV_API_CALL os_close_file (void *);
|
||||
NV_STATUS NV_API_CALL os_write_file (void *, NvU8 *, NvU64, NvU64);
|
||||
NV_STATUS NV_API_CALL os_read_file (void *, NvU8 *, NvU64, NvU64);
|
||||
NV_STATUS NV_API_CALL os_open_readonly_file (const char *, void **);
|
||||
NV_STATUS NV_API_CALL os_open_and_read_file (const char *, NvU8 *, NvU64);
|
||||
NvBool NV_API_CALL os_is_nvswitch_present (void);
|
||||
NV_STATUS NV_API_CALL os_get_random_bytes (NvU8 *, NvU16);
|
||||
NV_STATUS NV_API_CALL os_alloc_wait_queue (os_wait_queue **);
|
||||
void NV_API_CALL os_free_wait_queue (os_wait_queue *);
|
||||
void NV_API_CALL os_wait_uninterruptible (os_wait_queue *);
|
||||
void NV_API_CALL os_wait_interruptible (os_wait_queue *);
|
||||
void NV_API_CALL os_wake_up (os_wait_queue *);
|
||||
nv_cap_t* NV_API_CALL os_nv_cap_init (const char *);
|
||||
nv_cap_t* NV_API_CALL os_nv_cap_create_dir_entry (nv_cap_t *, const char *, int);
|
||||
nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry (nv_cap_t *, const char *, int);
|
||||
void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *);
|
||||
int NV_API_CALL os_nv_cap_validate_and_dup_fd(const nv_cap_t *, int);
|
||||
void NV_API_CALL os_nv_cap_close_fd (int);
|
||||
NvS32 NV_API_CALL os_imex_channel_get (NvU64);
|
||||
NvS32 NV_API_CALL os_imex_channel_count (void);
|
||||
NvU8 NV_API_CALL os_io_read_byte (NvU32);
|
||||
NvU16 NV_API_CALL os_io_read_word (NvU32);
|
||||
NvU32 NV_API_CALL os_io_read_dword (NvU32);
|
||||
void NV_API_CALL os_io_write_byte (NvU32, NvU8);
|
||||
void NV_API_CALL os_io_write_word (NvU32, NvU16);
|
||||
void NV_API_CALL os_io_write_dword (NvU32, NvU32);
|
||||
NvBool NV_API_CALL os_is_administrator (void);
|
||||
NvBool NV_API_CALL os_check_access (RsAccessRight accessRight);
|
||||
void NV_API_CALL os_dbg_init (void);
|
||||
void NV_API_CALL os_dbg_breakpoint (void);
|
||||
void NV_API_CALL os_dbg_set_level (NvU32);
|
||||
NvU32 NV_API_CALL os_get_cpu_count (void);
|
||||
NvU32 NV_API_CALL os_get_cpu_number (void);
|
||||
void NV_API_CALL os_disable_console_access (void);
|
||||
void NV_API_CALL os_enable_console_access (void);
|
||||
NV_STATUS NV_API_CALL os_registry_init (void);
|
||||
NvU64 NV_API_CALL os_get_max_user_va (void);
|
||||
NV_STATUS NV_API_CALL os_schedule (void);
|
||||
NV_STATUS NV_API_CALL os_alloc_spinlock (void **);
|
||||
void NV_API_CALL os_free_spinlock (void *);
|
||||
NvU64 NV_API_CALL os_acquire_spinlock (void *);
|
||||
void NV_API_CALL os_release_spinlock (void *, NvU64);
|
||||
NV_STATUS NV_API_CALL os_queue_work_item (struct os_work_queue *, void *);
|
||||
NV_STATUS NV_API_CALL os_flush_work_queue (struct os_work_queue *, NvBool);
|
||||
NvBool NV_API_CALL os_is_queue_flush_ongoing (struct os_work_queue *);
|
||||
NV_STATUS NV_API_CALL os_alloc_mutex (void **);
|
||||
void NV_API_CALL os_free_mutex (void *);
|
||||
NV_STATUS NV_API_CALL os_acquire_mutex (void *);
|
||||
NV_STATUS NV_API_CALL os_cond_acquire_mutex (void *);
|
||||
void NV_API_CALL os_release_mutex (void *);
|
||||
void* NV_API_CALL os_alloc_semaphore (NvU32);
|
||||
void NV_API_CALL os_free_semaphore (void *);
|
||||
NV_STATUS NV_API_CALL os_acquire_semaphore (void *);
|
||||
NV_STATUS NV_API_CALL os_cond_acquire_semaphore (void *);
|
||||
NV_STATUS NV_API_CALL os_release_semaphore (void *);
|
||||
void* NV_API_CALL os_alloc_rwlock (void);
|
||||
void NV_API_CALL os_free_rwlock (void *);
|
||||
NV_STATUS NV_API_CALL os_acquire_rwlock_read (void *);
|
||||
NV_STATUS NV_API_CALL os_acquire_rwlock_write (void *);
|
||||
NV_STATUS NV_API_CALL os_cond_acquire_rwlock_read (void *);
|
||||
NV_STATUS NV_API_CALL os_cond_acquire_rwlock_write (void *);
|
||||
void NV_API_CALL os_release_rwlock_read (void *);
|
||||
void NV_API_CALL os_release_rwlock_write (void *);
|
||||
NvBool NV_API_CALL os_semaphore_may_sleep (void);
|
||||
NV_STATUS NV_API_CALL os_get_version_info (os_version_info*);
|
||||
NV_STATUS NV_API_CALL os_get_is_openrm (NvBool *);
|
||||
NvBool NV_API_CALL os_is_isr (void);
|
||||
NvBool NV_API_CALL os_pat_supported (void);
|
||||
void NV_API_CALL os_dump_stack (void);
|
||||
NvBool NV_API_CALL os_is_efi_enabled (void);
|
||||
NvBool NV_API_CALL os_is_xen_dom0 (void);
|
||||
NvBool NV_API_CALL os_is_vgx_hyper (void);
|
||||
NV_STATUS NV_API_CALL os_inject_vgx_msi (NvU16, NvU64, NvU32);
|
||||
NvBool NV_API_CALL os_is_grid_supported (void);
|
||||
NvU32 NV_API_CALL os_get_grid_csp_support (void);
|
||||
void NV_API_CALL os_bug_check (NvU32, const char *);
|
||||
NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32);
|
||||
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **);
|
||||
NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *, NvU32);
|
||||
NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *);
|
||||
NV_STATUS NV_API_CALL os_get_euid (NvU32 *);
|
||||
NV_STATUS NV_API_CALL os_get_smbios_header (NvU64 *pSmbsAddr);
|
||||
NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi (NvU32 *);
|
||||
void NV_API_CALL os_add_record_for_crashLog (void *, NvU32);
|
||||
void NV_API_CALL os_delete_record_for_crashLog (void *);
|
||||
NV_STATUS NV_API_CALL os_call_vgpu_vfio (void *, NvU32);
|
||||
NV_STATUS NV_API_CALL os_device_vm_present (void);
|
||||
NV_STATUS NV_API_CALL os_numa_memblock_size (NvU64 *);
|
||||
NV_STATUS NV_API_CALL os_alloc_pages_node (NvS32, NvU32, NvU32, NvU64 *);
|
||||
NV_STATUS NV_API_CALL os_get_page (NvU64 address);
|
||||
NV_STATUS NV_API_CALL os_put_page (NvU64 address);
|
||||
NvU32 NV_API_CALL os_get_page_refcount (NvU64 address);
|
||||
NvU32 NV_API_CALL os_count_tail_pages (NvU64 address);
|
||||
void NV_API_CALL os_free_pages_phys (NvU64, NvU32);
|
||||
NV_STATUS NV_API_CALL os_open_temporary_file (void **);
|
||||
void NV_API_CALL os_close_file (void *);
|
||||
NV_STATUS NV_API_CALL os_write_file (void *, NvU8 *, NvU64, NvU64);
|
||||
NV_STATUS NV_API_CALL os_read_file (void *, NvU8 *, NvU64, NvU64);
|
||||
NV_STATUS NV_API_CALL os_open_readonly_file (const char *, void **);
|
||||
NV_STATUS NV_API_CALL os_open_and_read_file (const char *, NvU8 *, NvU64);
|
||||
NvBool NV_API_CALL os_is_nvswitch_present (void);
|
||||
NV_STATUS NV_API_CALL os_get_random_bytes (NvU8 *, NvU16);
|
||||
NV_STATUS NV_API_CALL os_alloc_wait_queue (os_wait_queue **);
|
||||
void NV_API_CALL os_free_wait_queue (os_wait_queue *);
|
||||
void NV_API_CALL os_wait_uninterruptible (os_wait_queue *);
|
||||
void NV_API_CALL os_wait_interruptible (os_wait_queue *);
|
||||
void NV_API_CALL os_wake_up (os_wait_queue *);
|
||||
nv_cap_t* NV_API_CALL os_nv_cap_init (const char *);
|
||||
nv_cap_t* NV_API_CALL os_nv_cap_create_dir_entry (nv_cap_t *, const char *, int);
|
||||
nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry (nv_cap_t *, const char *, int);
|
||||
void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *);
|
||||
int NV_API_CALL os_nv_cap_validate_and_dup_fd (const nv_cap_t *, int);
|
||||
void NV_API_CALL os_nv_cap_close_fd (int);
|
||||
NvS32 NV_API_CALL os_imex_channel_get (NvU64);
|
||||
NvS32 NV_API_CALL os_imex_channel_count (void);
|
||||
|
||||
NV_STATUS NV_API_CALL os_get_tegra_platform (NvU32 *);
|
||||
enum os_pci_req_atomics_type {
|
||||
OS_INTF_PCIE_REQ_ATOMICS_32BIT,
|
||||
OS_INTF_PCIE_REQ_ATOMICS_64BIT,
|
||||
@@ -223,8 +224,11 @@ void* NV_API_CALL os_get_pid_info(void);
|
||||
void NV_API_CALL os_put_pid_info(void *pid_info);
|
||||
NV_STATUS NV_API_CALL os_find_ns_pid(void *pid_info, NvU32 *ns_pid);
|
||||
NvBool NV_API_CALL os_is_init_ns(void);
|
||||
NV_STATUS NV_API_CALL os_iommu_sva_bind(void *arg, void **handle, NvU32 *pasid);
|
||||
void NV_API_CALL os_iommu_sva_unbind(void *handle);
|
||||
|
||||
extern NvU32 os_page_size;
|
||||
extern NvU64 os_page_size;
|
||||
extern NvU64 os_max_page_size;
|
||||
extern NvU64 os_page_mask;
|
||||
extern NvU8 os_page_shift;
|
||||
extern NvBool os_cc_enabled;
|
||||
@@ -266,4 +270,9 @@ int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...);
|
||||
#define NV_LOCK_USER_PAGES_FLAGS_WRITE_NO 0x00000000
|
||||
#define NV_LOCK_USER_PAGES_FLAGS_WRITE_YES 0x00000001
|
||||
|
||||
// NV OS Tegra platform type defines
|
||||
#define NV_OS_TEGRA_PLATFORM_SIM 0
|
||||
#define NV_OS_TEGRA_PLATFORM_FPGA 1
|
||||
#define NV_OS_TEGRA_PLATFORM_SILICON 2
|
||||
|
||||
#endif /* OS_INTERFACE_H */
|
||||
|
||||
@@ -27,6 +27,8 @@ typedef enum
|
||||
{
|
||||
NV_OS_GPIO_FUNC_HOTPLUG_A,
|
||||
NV_OS_GPIO_FUNC_HOTPLUG_B,
|
||||
NV_OS_GPIO_FUNC_HOTPLUG_C,
|
||||
NV_OS_GPIO_FUNC_HOTPLUG_D,
|
||||
} NV_OS_GPIO_FUNC_NAMES;
|
||||
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user