550.40.07

This commit is contained in:
Bernhard Stoeckner
2024-01-24 17:51:53 +01:00
parent bb2dac1f20
commit 91676d6628
1411 changed files with 261367 additions and 145959 deletions

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -39,5 +39,6 @@
#define NV_ESC_QUERY_DEVICE_INTR (NV_IOCTL_BASE + 13)
#define NV_ESC_SYS_PARAMS (NV_IOCTL_BASE + 14)
#define NV_ESC_EXPORT_TO_DMABUF_FD (NV_IOCTL_BASE + 17)
#define NV_ESC_WAIT_OPEN_COMPLETE (NV_IOCTL_BASE + 18)
#endif

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -142,4 +142,10 @@ typedef struct nv_ioctl_export_to_dma_buf_fd
NvU32 status;
} nv_ioctl_export_to_dma_buf_fd_t;
typedef struct nv_ioctl_wait_open_complete
{
int rc;
NvU32 adapterStatus;
} nv_ioctl_wait_open_complete_t;
#endif

View File

@@ -35,6 +35,7 @@
#include "os-interface.h"
#include "nv-timer.h"
#include "nv-time.h"
#include "nv-chardev-numbers.h"
#define NV_KERNEL_NAME "Linux"
@@ -406,37 +407,6 @@ extern int nv_pat_mode;
#define NV_GFP_DMA32 (NV_GFP_KERNEL)
#endif
extern NvBool nvos_is_chipset_io_coherent(void);
#if defined(NVCPU_X86_64)
#define CACHE_FLUSH() asm volatile("wbinvd":::"memory")
#define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory")
#elif defined(NVCPU_AARCH64)
static inline void nv_flush_cache_cpu(void *info)
{
if (!nvos_is_chipset_io_coherent())
{
#if defined(NV_FLUSH_CACHE_ALL_PRESENT)
flush_cache_all();
#else
WARN_ONCE(0, "NVRM: kernel does not support flush_cache_all()\n");
#endif
}
}
#define CACHE_FLUSH() nv_flush_cache_cpu(NULL)
#define CACHE_FLUSH_ALL() on_each_cpu(nv_flush_cache_cpu, NULL, 1)
#define WRITE_COMBINE_FLUSH() mb()
#elif defined(NVCPU_PPC64LE)
#define CACHE_FLUSH() asm volatile("sync; \n" \
"isync; \n" ::: "memory")
#define WRITE_COMBINE_FLUSH() CACHE_FLUSH()
#elif defined(NVCPU_RISCV64)
#define CACHE_FLUSH() mb()
#define WRITE_COMBINE_FLUSH() CACHE_FLUSH()
#else
#error "CACHE_FLUSH() and WRITE_COMBINE_FLUSH() need to be defined for this architecture."
#endif
typedef enum
{
NV_MEMORY_TYPE_SYSTEM, /* Memory mapped for ROM, SBIOS and physical RAM. */
@@ -1380,7 +1350,19 @@ typedef struct nv_dma_map_s {
i < dm->mapping.discontig.submap_count; \
i++, sm = &dm->mapping.discontig.submaps[i])
/*
* On 4K ARM kernels, use max submap size a multiple of 64K to keep nv-p2p happy.
* Despite 4K OS pages, we still use 64K P2P pages due to dependent modules still using 64K.
* Instead of using (4G-4K), use max submap size as (4G-64K) since the mapped IOVA range
* must be aligned at 64K boundary.
*/
#if defined(CONFIG_ARM64_4K_PAGES)
#define NV_DMA_U32_MAX_4K_PAGES ((NvU32)((NV_U32_MAX >> PAGE_SHIFT) + 1))
#define NV_DMA_SUBMAP_MAX_PAGES ((NvU32)(NV_DMA_U32_MAX_4K_PAGES - 16))
#else
#define NV_DMA_SUBMAP_MAX_PAGES ((NvU32)(NV_U32_MAX >> PAGE_SHIFT))
#endif
#define NV_DMA_SUBMAP_IDX_TO_PAGE_IDX(s) (s * NV_DMA_SUBMAP_MAX_PAGES)
/*
@@ -1460,6 +1442,11 @@ typedef struct coherent_link_info_s {
* baremetal OS environment it is System Physical Address(SPA) and in the case
* of virutalized OS environment it is Intermediate Physical Address(IPA) */
NvU64 gpu_mem_pa;
/* Physical address of the reserved portion of the GPU memory, applicable
* only in Grace Hopper self hosted passthrough virtualizatioan platform. */
NvU64 rsvd_mem_pa;
/* Bitmap of NUMA node ids, corresponding to the reserved PXMs,
* available for adding GPU memory to the kernel as system RAM */
DECLARE_BITMAP(free_node_bitmap, MAX_NUMNODES);
@@ -1607,6 +1594,26 @@ typedef struct nv_linux_state_s {
struct nv_dma_device dma_dev;
struct nv_dma_device niso_dma_dev;
/*
* Background kthread for handling deferred open operations
* (e.g. from O_NONBLOCK).
*
* Adding to open_q and reading/writing is_accepting_opens
* are protected by nvl->open_q_lock (not nvl->ldata_lock).
* This allows new deferred open operations to be enqueued without
* blocking behind previous ones (which hold nvl->ldata_lock).
*
* Adding to open_q is only safe if is_accepting_opens is true.
* This prevents open operations from racing with device removal.
*
* Stopping open_q is only safe after setting is_accepting_opens to false.
* This ensures that the open_q (and the larger nvl structure) will
* outlive any of the open operations enqueued.
*/
nv_kthread_q_t open_q;
NvBool is_accepting_opens;
struct semaphore open_q_lock;
} nv_linux_state_t;
extern nv_linux_state_t *nv_linux_devices;
@@ -1656,7 +1663,7 @@ typedef struct
nvidia_stack_t *sp;
nv_alloc_t *free_list;
void *nvptr;
nv_linux_state_t *nvptr;
nvidia_event_t *event_data_head, *event_data_tail;
NvBool dataless_event_pending;
nv_spinlock_t fp_lock;
@@ -1667,6 +1674,12 @@ typedef struct
nv_alloc_mapping_context_t mmap_context;
struct address_space mapping;
nv_kthread_q_item_t open_q_item;
struct completion open_complete;
nv_linux_state_t *deferred_open_nvl;
int open_rc;
NV_STATUS adapter_status;
struct list_head entry;
} nv_linux_file_private_t;
@@ -1675,6 +1688,21 @@ static inline nv_linux_file_private_t *nv_get_nvlfp_from_nvfp(nv_file_private_t
return container_of(nvfp, nv_linux_file_private_t, nvfp);
}
static inline int nv_wait_open_complete_interruptible(nv_linux_file_private_t *nvlfp)
{
return wait_for_completion_interruptible(&nvlfp->open_complete);
}
static inline void nv_wait_open_complete(nv_linux_file_private_t *nvlfp)
{
wait_for_completion(&nvlfp->open_complete);
}
static inline NvBool nv_is_open_complete(nv_linux_file_private_t *nvlfp)
{
return completion_done(&nvlfp->open_complete);
}
#define NV_SET_FILE_PRIVATE(filep,data) ((filep)->private_data = (data))
#define NV_GET_LINUX_FILE_PRIVATE(filep) ((nv_linux_file_private_t *)(filep)->private_data)
@@ -1756,12 +1784,18 @@ static inline NV_STATUS nv_check_gpu_state(nv_state_t *nv)
extern NvU32 NVreg_EnableUserNUMAManagement;
extern NvU32 NVreg_RegisterPCIDriver;
extern NvU32 NVreg_EnableResizableBar;
extern NvU32 NVreg_EnableNonblockingOpen;
extern NvU32 num_probed_nv_devices;
extern NvU32 num_nv_devices;
#define NV_FILE_INODE(file) (file)->f_inode
static inline int nv_is_control_device(struct inode *inode)
{
return (minor((inode)->i_rdev) == NV_MINOR_DEVICE_NUMBER_CONTROL_DEVICE);
}
#if defined(NV_DOM0_KERNEL_PRESENT) || defined(NV_VGPU_KVM_BUILD)
#define NV_VGX_HYPER
#if defined(NV_XEN_IOEMU_INJECT_MSI)
@@ -2040,4 +2074,7 @@ typedef enum
#include <linux/clk-provider.h>
#endif
#define NV_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL_GPL(symbol)
#define NV_CHECK_EXPORT_SYMBOL(symbol) NV_IS_EXPORT_SYMBOL_PRESENT_##symbol
#endif /* _NV_LINUX_H_ */

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2017-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -37,6 +37,7 @@
#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_PREEMPT_RT_FULL)
typedef raw_spinlock_t nv_spinlock_t;
#define NV_DEFINE_SPINLOCK(lock) DEFINE_RAW_SPINLOCK(lock)
#define NV_SPIN_LOCK_INIT(lock) raw_spin_lock_init(lock)
#define NV_SPIN_LOCK_IRQ(lock) raw_spin_lock_irq(lock)
#define NV_SPIN_UNLOCK_IRQ(lock) raw_spin_unlock_irq(lock)
@@ -47,6 +48,7 @@ typedef raw_spinlock_t nv_spinlock_t;
#define NV_SPIN_UNLOCK_WAIT(lock) raw_spin_unlock_wait(lock)
#else
typedef spinlock_t nv_spinlock_t;
#define NV_DEFINE_SPINLOCK(lock) DEFINE_SPINLOCK(lock)
#define NV_SPIN_LOCK_INIT(lock) spin_lock_init(lock)
#define NV_SPIN_LOCK_IRQ(lock) spin_lock_irq(lock)
#define NV_SPIN_UNLOCK_IRQ(lock) spin_unlock_irq(lock)

View File

@@ -44,12 +44,18 @@ typedef int vm_fault_t;
#include <linux/mm.h>
#include <linux/sched.h>
#if defined(NV_PIN_USER_PAGES_PRESENT)
/*
* FreeBSD's pin_user_pages's conftest breaks since pin_user_pages is an inline
* function. Because it simply maps to get_user_pages, we can just replace
* NV_PIN_USER_PAGES with NV_GET_USER_PAGES on FreeBSD
*/
#if defined(NV_PIN_USER_PAGES_PRESENT) && !defined(NV_BSD)
#if defined(NV_PIN_USER_PAGES_HAS_ARGS_VMAS)
#define NV_PIN_USER_PAGES pin_user_pages
#define NV_PIN_USER_PAGES(start, nr_pages, gup_flags, pages) \
pin_user_pages(start, nr_pages, gup_flags, pages, NULL)
#else
#define NV_PIN_USER_PAGES(start, nr_pages, gup_flags, pages, vmas) \
pin_user_pages(start, nr_pages, gup_flags, pages)
#define NV_PIN_USER_PAGES pin_user_pages
#endif // NV_PIN_USER_PAGES_HAS_ARGS_VMAS
#define NV_UNPIN_USER_PAGE unpin_user_page
#else
@@ -80,29 +86,28 @@ typedef int vm_fault_t;
*/
#if defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS)
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages, vmas) \
get_user_pages(start, nr_pages, flags, pages)
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS_VMAS)
#define NV_GET_USER_PAGES get_user_pages
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS_VMAS)
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages) \
get_user_pages(start, nr_pages, flags, pages, NULL)
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS_VMAS)
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages, vmas) \
get_user_pages(current, current->mm, start, nr_pages, flags, pages, vmas)
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages) \
get_user_pages(current, current->mm, start, nr_pages, flags, pages, NULL)
#else
static inline long NV_GET_USER_PAGES(unsigned long start,
unsigned long nr_pages,
unsigned int flags,
struct page **pages,
struct vm_area_struct **vmas)
struct page **pages)
{
int write = flags & FOLL_WRITE;
int force = flags & FOLL_FORCE;
#if defined(NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS)
return get_user_pages(start, nr_pages, write, force, pages, vmas);
return get_user_pages(start, nr_pages, write, force, pages, NULL);
#else
// NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS
return get_user_pages(current, current->mm, start, nr_pages, write,
force, pages, vmas);
force, pages, NULL);
#endif // NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS
}
#endif // NV_GET_USER_PAGES_HAS_ARGS_FLAGS
@@ -124,13 +129,13 @@ typedef int vm_fault_t;
#if defined(NV_PIN_USER_PAGES_REMOTE_PRESENT)
#if defined(NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS)
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
pin_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas, locked)
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
pin_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, NULL, locked)
#elif defined(NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS)
#define NV_PIN_USER_PAGES_REMOTE pin_user_pages_remote
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
pin_user_pages_remote(mm, start, nr_pages, flags, pages, NULL, locked)
#else
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
pin_user_pages_remote(mm, start, nr_pages, flags, pages, locked)
#define NV_PIN_USER_PAGES_REMOTE pin_user_pages_remote
#endif // NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS
#else
#define NV_PIN_USER_PAGES_REMOTE NV_GET_USER_PAGES_REMOTE
@@ -166,19 +171,19 @@ typedef int vm_fault_t;
#if defined(NV_GET_USER_PAGES_REMOTE_PRESENT)
#if defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED)
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
get_user_pages_remote(mm, start, nr_pages, flags, pages, locked)
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED_VMAS)
#define NV_GET_USER_PAGES_REMOTE get_user_pages_remote
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED_VMAS)
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
get_user_pages_remote(mm, start, nr_pages, flags, pages, NULL, locked)
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED_VMAS)
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas, locked)
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, NULL, locked)
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_VMAS)
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas)
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, NULL)
#else
// NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE_VMAS
@@ -187,14 +192,13 @@ typedef int vm_fault_t;
unsigned long nr_pages,
unsigned int flags,
struct page **pages,
struct vm_area_struct **vmas,
int *locked)
{
int write = flags & FOLL_WRITE;
int force = flags & FOLL_FORCE;
return get_user_pages_remote(NULL, mm, start, nr_pages, write, force,
pages, vmas);
pages, NULL);
}
#endif // NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED
#else
@@ -204,18 +208,17 @@ typedef int vm_fault_t;
unsigned long nr_pages,
unsigned int flags,
struct page **pages,
struct vm_area_struct **vmas,
int *locked)
{
int write = flags & FOLL_WRITE;
int force = flags & FOLL_FORCE;
return get_user_pages(NULL, mm, start, nr_pages, write, force, pages, vmas);
return get_user_pages(NULL, mm, start, nr_pages, write, force, pages, NULL);
}
#else
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
get_user_pages(NULL, mm, start, nr_pages, flags, pages, vmas)
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
get_user_pages(NULL, mm, start, nr_pages, flags, pages, NULL)
#endif // NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS
#endif // NV_GET_USER_PAGES_REMOTE_PRESENT

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -60,6 +60,7 @@ static inline pgprot_t pgprot_modify_writecombine(pgprot_t old_prot)
#endif /* !defined(NV_VMWARE) */
#if defined(NVCPU_AARCH64)
extern NvBool nvos_is_chipset_io_coherent(void);
/*
* Don't rely on the kernel's definition of pgprot_noncached(), as on 64-bit
* ARM that's not for system memory, but device memory instead. For I/O cache

View File

@@ -92,6 +92,24 @@ typedef struct file_operations nv_proc_ops_t;
#endif
#define NV_DEFINE_SINGLE_PROCFS_FILE_HELPER(name, lock) \
static ssize_t nv_procfs_read_lock_##name( \
struct file *file, \
char __user *buf, \
size_t size, \
loff_t *ppos \
) \
{ \
int ret; \
ret = nv_down_read_interruptible(&lock); \
if (ret < 0) \
{ \
return ret; \
} \
size = seq_read(file, buf, size, ppos); \
up_read(&lock); \
return size; \
} \
\
static int nv_procfs_open_##name( \
struct inode *inode, \
struct file *filep \
@@ -104,11 +122,6 @@ typedef struct file_operations nv_proc_ops_t;
{ \
return ret; \
} \
ret = nv_down_read_interruptible(&lock); \
if (ret < 0) \
{ \
single_release(inode, filep); \
} \
return ret; \
} \
\
@@ -117,7 +130,6 @@ typedef struct file_operations nv_proc_ops_t;
struct file *filep \
) \
{ \
up_read(&lock); \
return single_release(inode, filep); \
}
@@ -127,46 +139,7 @@ typedef struct file_operations nv_proc_ops_t;
static const nv_proc_ops_t nv_procfs_##name##_fops = { \
NV_PROC_OPS_SET_OWNER() \
.NV_PROC_OPS_OPEN = nv_procfs_open_##name, \
.NV_PROC_OPS_READ = seq_read, \
.NV_PROC_OPS_LSEEK = seq_lseek, \
.NV_PROC_OPS_RELEASE = nv_procfs_release_##name, \
};
#define NV_DEFINE_SINGLE_PROCFS_FILE_READ_WRITE(name, lock, \
write_callback) \
NV_DEFINE_SINGLE_PROCFS_FILE_HELPER(name, lock) \
\
static ssize_t nv_procfs_write_##name( \
struct file *file, \
const char __user *buf, \
size_t size, \
loff_t *ppos \
) \
{ \
ssize_t ret; \
struct seq_file *s; \
\
s = file->private_data; \
if (s == NULL) \
{ \
return -EIO; \
} \
\
ret = write_callback(s, buf + *ppos, size - *ppos); \
if (ret == 0) \
{ \
/* avoid infinite loop */ \
ret = -EIO; \
} \
return ret; \
} \
\
static const nv_proc_ops_t nv_procfs_##name##_fops = { \
NV_PROC_OPS_SET_OWNER() \
.NV_PROC_OPS_OPEN = nv_procfs_open_##name, \
.NV_PROC_OPS_READ = seq_read, \
.NV_PROC_OPS_WRITE = nv_procfs_write_##name, \
.NV_PROC_OPS_READ = nv_procfs_read_lock_##name, \
.NV_PROC_OPS_LSEEK = seq_lseek, \
.NV_PROC_OPS_RELEASE = nv_procfs_release_##name, \
};

View File

@@ -88,4 +88,7 @@ int nv_linux_add_device_locked(nv_linux_state_t *);
void nv_linux_remove_device_locked(nv_linux_state_t *);
NvBool nv_acpi_power_resource_method_present(struct pci_dev *);
int nv_linux_init_open_q(nv_linux_state_t *);
void nv_linux_stop_open_q(nv_linux_state_t *);
#endif /* _NV_PROTO_H_ */

View File

@@ -1,55 +0,0 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_REGISTER_MODULE_H_
#define _NV_REGISTER_MODULE_H_
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include "nvtypes.h"
typedef struct nvidia_module_s {
struct module *owner;
/* nvidia0, nvidia1 ..*/
const char *module_name;
/* module instance */
NvU32 instance;
/* file operations */
int (*open)(struct inode *, struct file *filp);
int (*close)(struct inode *, struct file *filp);
int (*mmap)(struct file *filp, struct vm_area_struct *vma);
int (*ioctl)(struct inode *, struct file * file, unsigned int cmd, unsigned long arg);
unsigned int (*poll)(struct file * file, poll_table *wait);
} nvidia_module_t;
int nvidia_register_module(nvidia_module_t *);
int nvidia_unregister_module(nvidia_module_t *);
#endif

View File

@@ -221,7 +221,6 @@ typedef struct
#define NV_RM_PAGE_MASK (NV_RM_PAGE_SIZE - 1)
#define NV_RM_TO_OS_PAGE_SHIFT (os_page_shift - NV_RM_PAGE_SHIFT)
#define NV_RM_PAGES_PER_OS_PAGE (1U << NV_RM_TO_OS_PAGE_SHIFT)
#define NV_RM_PAGES_TO_OS_PAGES(count) \
((((NvUPtr)(count)) >> NV_RM_TO_OS_PAGE_SHIFT) + \
((((count) & ((1 << NV_RM_TO_OS_PAGE_SHIFT) - 1)) != 0) ? 1 : 0))
@@ -467,12 +466,6 @@ typedef struct nv_state_t
NvHandle hDisp;
} rmapi;
/* Bool to check if ISO iommu enabled */
NvBool iso_iommu_present;
/* Bool to check if NISO iommu enabled */
NvBool niso_iommu_present;
/* Bool to check if dma-buf is supported */
NvBool dma_buf_supported;
@@ -484,6 +477,22 @@ typedef struct nv_state_t
/* Bool to check if the GPU has a coherent sysmem link */
NvBool coherent;
/*
* NUMA node ID of the CPU to which the GPU is attached.
* Holds NUMA_NO_NODE on platforms that don't support NUMA configuration.
*/
NvS32 cpu_numa_node_id;
struct {
/* Bool to check if ISO iommu enabled */
NvBool iso_iommu_present;
/* Bool to check if NISO iommu enabled */
NvBool niso_iommu_present;
/* Display SMMU Stream IDs */
NvU32 dispIsoStreamId;
NvU32 dispNisoStreamId;
} iommus;
} nv_state_t;
// These define need to be in sync with defines in system.h
@@ -613,10 +622,10 @@ typedef enum
(((nv)->flags & NV_FLAG_IN_SURPRISE_REMOVAL) != 0)
#define NV_SOC_IS_ISO_IOMMU_PRESENT(nv) \
((nv)->iso_iommu_present)
((nv)->iommus.iso_iommu_present)
#define NV_SOC_IS_NISO_IOMMU_PRESENT(nv) \
((nv)->niso_iommu_present)
((nv)->iommus.niso_iommu_present)
/*
* GPU add/remove events
*/
@@ -779,8 +788,6 @@ NV_STATUS NV_API_CALL nv_register_phys_pages (nv_state_t *, NvU64 *, NvU64,
void NV_API_CALL nv_unregister_phys_pages (nv_state_t *, void *);
NV_STATUS NV_API_CALL nv_dma_map_sgt (nv_dma_device_t *, NvU64, NvU64 *, NvU32, void **);
NV_STATUS NV_API_CALL nv_dma_map_pages (nv_dma_device_t *, NvU64, NvU64 *, NvBool, NvU32, void **);
NV_STATUS NV_API_CALL nv_dma_unmap_pages (nv_dma_device_t *, NvU64, NvU64 *, void **);
NV_STATUS NV_API_CALL nv_dma_map_alloc (nv_dma_device_t *, NvU64, NvU64 *, NvBool, void **);
NV_STATUS NV_API_CALL nv_dma_unmap_alloc (nv_dma_device_t *, NvU64, NvU64 *, void **);
@@ -830,7 +837,7 @@ void NV_API_CALL nv_put_firmware(const void *);
nv_file_private_t* NV_API_CALL nv_get_file_private(NvS32, NvBool, void **);
void NV_API_CALL nv_put_file_private(void *);
NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU32 *, NvS32 *);
NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU64 *, NvU32 *, NvS32 *);
NV_STATUS NV_API_CALL nv_get_egm_info(nv_state_t *, NvU64 *, NvU64 *, NvS32 *);
NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *, NvU64 *, NvU64 *, void**);
@@ -877,9 +884,9 @@ struct drm_gem_object;
NV_STATUS NV_API_CALL nv_dma_import_sgt (nv_dma_device_t *, struct sg_table *, struct drm_gem_object *);
void NV_API_CALL nv_dma_release_sgt(struct sg_table *, struct drm_gem_object *);
NV_STATUS NV_API_CALL nv_dma_import_dma_buf (nv_dma_device_t *, struct dma_buf *, NvU32 *, void **, struct sg_table **, nv_dma_buf_t **);
NV_STATUS NV_API_CALL nv_dma_import_from_fd (nv_dma_device_t *, NvS32, NvU32 *, void **, struct sg_table **, nv_dma_buf_t **);
void NV_API_CALL nv_dma_release_dma_buf (void *, nv_dma_buf_t *);
NV_STATUS NV_API_CALL nv_dma_import_dma_buf (nv_dma_device_t *, struct dma_buf *, NvU32 *, struct sg_table **, nv_dma_buf_t **);
NV_STATUS NV_API_CALL nv_dma_import_from_fd (nv_dma_device_t *, NvS32, NvU32 *, struct sg_table **, nv_dma_buf_t **);
void NV_API_CALL nv_dma_release_dma_buf (nv_dma_buf_t *);
void NV_API_CALL nv_schedule_uvm_isr (nv_state_t *);
@@ -895,6 +902,8 @@ typedef void (*nvTegraDceClientIpcCallback)(NvU32, NvU32, NvU32, void *, void *)
NV_STATUS NV_API_CALL nv_get_num_phys_pages (void *, NvU32 *);
NV_STATUS NV_API_CALL nv_get_phys_pages (void *, void *, NvU32 *);
void NV_API_CALL nv_get_disp_smmu_stream_ids (nv_state_t *, NvU32 *, NvU32 *);
/*
* ---------------------------------------------------------------------------
*
@@ -921,6 +930,7 @@ NV_STATUS NV_API_CALL rm_ioctl (nvidia_stack_t *, nv_state_t *
NvBool NV_API_CALL rm_isr (nvidia_stack_t *, nv_state_t *, NvU32 *);
void NV_API_CALL rm_isr_bh (nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_isr_bh_unlocked (nvidia_stack_t *, nv_state_t *);
NvBool NV_API_CALL rm_is_msix_allowed (nvidia_stack_t *, nv_state_t *);
NV_STATUS NV_API_CALL rm_power_management (nvidia_stack_t *, nv_state_t *, nv_pm_action_t);
NV_STATUS NV_API_CALL rm_stop_user_channels (nvidia_stack_t *, nv_state_t *);
NV_STATUS NV_API_CALL rm_restart_user_channels (nvidia_stack_t *, nv_state_t *);
@@ -940,6 +950,7 @@ void NV_API_CALL rm_parse_option_string (nvidia_stack_t *, const char *
char* NV_API_CALL rm_remove_spaces (const char *);
char* NV_API_CALL rm_string_token (char **, const char);
void NV_API_CALL rm_vgpu_vfio_set_driver_vm(nvidia_stack_t *, NvBool);
NV_STATUS NV_API_CALL rm_get_adapter_status_external(nvidia_stack_t *, nv_state_t *);
NV_STATUS NV_API_CALL rm_run_rc_callback (nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_execute_work_item (nvidia_stack_t *, void *);

View File

@@ -62,10 +62,10 @@ typedef struct
/*******************************************************************************
nvUvmInterfaceRegisterGpu
Registers the GPU with the provided UUID for use. A GPU must be registered
before its UUID can be used with any other API. This call is ref-counted so
every nvUvmInterfaceRegisterGpu must be paired with a corresponding
nvUvmInterfaceUnregisterGpu.
Registers the GPU with the provided physical UUID for use. A GPU must be
registered before its UUID can be used with any other API. This call is
ref-counted so every nvUvmInterfaceRegisterGpu must be paired with a
corresponding nvUvmInterfaceUnregisterGpu.
You don't need to call nvUvmInterfaceSessionCreate before calling this.
@@ -79,12 +79,13 @@ NV_STATUS nvUvmInterfaceRegisterGpu(const NvProcessorUuid *gpuUuid, UvmGpuPlatfo
/*******************************************************************************
nvUvmInterfaceUnregisterGpu
Unregisters the GPU with the provided UUID. This drops the ref count from
nvUvmInterfaceRegisterGpu. Once the reference count goes to 0 the device may
no longer be accessible until the next nvUvmInterfaceRegisterGpu call. No
automatic resource freeing is performed, so only make the last unregister
call after destroying all your allocations associated with that UUID (such
as those from nvUvmInterfaceAddressSpaceCreate).
Unregisters the GPU with the provided physical UUID. This drops the ref
count from nvUvmInterfaceRegisterGpu. Once the reference count goes to 0
the device may no longer be accessible until the next
nvUvmInterfaceRegisterGpu call. No automatic resource freeing is performed,
so only make the last unregister call after destroying all your allocations
associated with that UUID (such as those from
nvUvmInterfaceAddressSpaceCreate).
If the UUID is not found, no operation is performed.
*/
@@ -121,10 +122,10 @@ NV_STATUS nvUvmInterfaceSessionDestroy(uvmGpuSessionHandle session);
nvUvmInterfaceDeviceCreate
Creates a device object under the given session for the GPU with the given
UUID. Also creates a partition object for the device iff bCreateSmcPartition
is true and pGpuInfo->smcEnabled is true. pGpuInfo->smcUserClientInfo will
be used to determine the SMC partition in this case. A device handle is
returned in the device output parameter.
physical UUID. Also creates a partition object for the device iff
bCreateSmcPartition is true and pGpuInfo->smcEnabled is true.
pGpuInfo->smcUserClientInfo will be used to determine the SMC partition in
this case. A device handle is returned in the device output parameter.
Error codes:
NV_ERR_GENERIC
@@ -161,6 +162,7 @@ void nvUvmInterfaceDeviceDestroy(uvmGpuDeviceHandle device);
NV_STATUS nvUvmInterfaceAddressSpaceCreate(uvmGpuDeviceHandle device,
unsigned long long vaBase,
unsigned long long vaSize,
NvBool enableAts,
uvmGpuAddressSpaceHandle *vaSpace,
UvmGpuAddressSpaceInfo *vaSpaceInfo);
@@ -422,33 +424,6 @@ NV_STATUS nvUvmInterfacePmaPinPages(void *pPma,
NvU64 pageSize,
NvU32 flags);
/*******************************************************************************
nvUvmInterfacePmaUnpinPages
This function will unpin the physical memory allocated using PMA. The pages
passed as input must be already pinned, else this function will return an
error and rollback any change if any page is not previously marked "pinned".
Behaviour is undefined if any blacklisted pages are unpinned.
Arguments:
pPma[IN] - Pointer to PMA object.
pPages[IN] - Array of pointers, containing the PA base
address of each page to be unpinned.
pageCount [IN] - Number of pages required to be unpinned.
pageSize [IN] - Page size of each page to be unpinned.
Error codes:
NV_ERR_INVALID_ARGUMENT - Invalid input arguments.
NV_ERR_GENERIC - Unexpected error. We try hard to avoid
returning this error code as is not very
informative.
NV_ERR_NOT_SUPPORTED - Operation not supported on broken FB
*/
NV_STATUS nvUvmInterfacePmaUnpinPages(void *pPma,
NvU64 *pPages,
NvLength pageCount,
NvU64 pageSize);
/*******************************************************************************
nvUvmInterfaceMemoryFree
@@ -638,6 +613,8 @@ NV_STATUS nvUvmInterfaceQueryCopyEnginesCaps(uvmGpuDeviceHandle device,
nvUvmInterfaceGetGpuInfo
Return various gpu info, refer to the UvmGpuInfo struct for details.
The input UUID is for the physical GPU and the pGpuClientInfo identifies
the SMC partition if SMC is enabled and the partition exists.
If no gpu matching the uuid is found, an error will be returned.
On Ampere+ GPUs, pGpuClientInfo contains SMC information provided by the
@@ -645,6 +622,9 @@ NV_STATUS nvUvmInterfaceQueryCopyEnginesCaps(uvmGpuDeviceHandle device,
Error codes:
NV_ERR_GENERIC
NV_ERR_NO_MEMORY
NV_ERR_GPU_UUID_NOT_FOUND
NV_ERR_INSUFFICIENT_PERMISSIONS
NV_ERR_INSUFFICIENT_RESOURCES
*/
NV_STATUS nvUvmInterfaceGetGpuInfo(const NvProcessorUuid *gpuUuid,
@@ -857,7 +837,7 @@ NV_STATUS nvUvmInterfaceGetEccInfo(uvmGpuDeviceHandle device,
UVM GPU UNLOCK
Arguments:
gpuUuid[IN] - UUID of the GPU to operate on
device[IN] - Device handle associated with the gpu
bOwnInterrupts - Set to NV_TRUE for UVM to take ownership of the
replayable page fault interrupts. Set to NV_FALSE
to return ownership of the page fault interrupts
@@ -973,6 +953,7 @@ NV_STATUS nvUvmInterfaceGetNonReplayableFaults(UvmGpuFaultInfo *pFaultInfo,
NOTES:
- This function DOES NOT acquire the RM API or GPU locks. That is because
it is called during fault servicing, which could produce deadlocks.
- This function should not be called when interrupts are disabled.
Arguments:
device[IN] - Device handle associated with the gpu
@@ -982,6 +963,27 @@ NV_STATUS nvUvmInterfaceGetNonReplayableFaults(UvmGpuFaultInfo *pFaultInfo,
*/
NV_STATUS nvUvmInterfaceFlushReplayableFaultBuffer(uvmGpuDeviceHandle device);
/*******************************************************************************
nvUvmInterfaceTogglePrefetchFaults
This function sends an RPC to GSP in order to toggle the prefetch fault PRI.
NOTES:
- This function DOES NOT acquire the RM API or GPU locks. That is because
it is called during fault servicing, which could produce deadlocks.
- This function should not be called when interrupts are disabled.
Arguments:
pFaultInfo[IN] - Information provided by RM for fault handling.
Used for obtaining the device handle without locks.
bEnable[IN] - Instructs RM whether to toggle generating faults on
prefetch on/off.
Error codes:
NV_ERR_INVALID_ARGUMENT
*/
NV_STATUS nvUvmInterfaceTogglePrefetchFaults(UvmGpuFaultInfo *pFaultInfo, NvBool bEnable);
/*******************************************************************************
nvUvmInterfaceInitAccessCntrInfo
@@ -1087,7 +1089,8 @@ void nvUvmInterfaceDeRegisterUvmOps(void);
Error codes:
NV_ERR_INVALID_ARGUMENT
NV_ERR_OBJECT_NOT_FOUND : If device object associated with the uuids aren't found.
NV_ERR_OBJECT_NOT_FOUND : If device object associated with the device
handles isn't found.
*/
NV_STATUS nvUvmInterfaceP2pObjectCreate(uvmGpuDeviceHandle device1,
uvmGpuDeviceHandle device2,
@@ -1140,6 +1143,8 @@ void nvUvmInterfaceP2pObjectDestroy(uvmGpuSessionHandle session,
NV_ERR_NOT_READY - Returned when querying the PTEs requires a deferred setup
which has not yet completed. It is expected that the caller
will reattempt the call until a different code is returned.
As an example, multi-node systems which require querying
PTEs from the Fabric Manager may return this code.
*/
NV_STATUS nvUvmInterfaceGetExternalAllocPtes(uvmGpuAddressSpaceHandle vaSpace,
NvHandle hMemory,
@@ -1449,18 +1454,7 @@ NV_STATUS nvUvmInterfacePagingChannelPushStream(UvmGpuPagingChannelHandle channe
NvU32 methodStreamSize);
/*******************************************************************************
CSL Interface and Locking
The following functions do not acquire the RM API or GPU locks and must not be called
concurrently with the same UvmCslContext parameter in different threads. The caller must
guarantee this exclusion.
* nvUvmInterfaceCslRotateIv
* nvUvmInterfaceCslEncrypt
* nvUvmInterfaceCslDecrypt
* nvUvmInterfaceCslSign
* nvUvmInterfaceCslQueryMessagePool
* nvUvmInterfaceCslIncrementIv
Cryptography Services Library (CSL) Interface
*/
/*******************************************************************************
@@ -1471,8 +1465,11 @@ NV_STATUS nvUvmInterfacePagingChannelPushStream(UvmGpuPagingChannelHandle channe
The lifetime of the context is the same as the lifetime of the secure channel
it is paired with.
Locking: This function acquires an API lock.
Memory : This function dynamically allocates memory.
Arguments:
uvmCslContext[IN/OUT] - The CSL context.
uvmCslContext[IN/OUT] - The CSL context associated with a channel.
channel[IN] - Handle to a secure channel.
Error codes:
@@ -1490,11 +1487,33 @@ NV_STATUS nvUvmInterfaceCslInitContext(UvmCslContext *uvmCslContext,
If context is already deinitialized then function returns immediately.
Locking: This function does not acquire an API or GPU lock.
Memory : This function may free memory.
Arguments:
uvmCslContext[IN] - The CSL context.
uvmCslContext[IN] - The CSL context associated with a channel.
*/
void nvUvmInterfaceDeinitCslContext(UvmCslContext *uvmCslContext);
/*******************************************************************************
nvUvmInterfaceCslUpdateContext
Updates a context after a key rotation event and can only be called once per
key rotation event. Following a key rotation event, and before
nvUvmInterfaceCslUpdateContext is called, data encrypted by the GPU with the
previous key can be decrypted with nvUvmInterfaceCslDecrypt.
Locking: This function acquires an API lock.
Memory : This function does not dynamically allocate memory.
Arguments:
uvmCslContext[IN] - The CSL context associated with a channel.
Error codes:
NV_ERR_INVALID_ARGUMENT - The CSL context is not associated with a channel.
*/
NV_STATUS nvUvmInterfaceCslUpdateContext(UvmCslContext *uvmCslContext);
/*******************************************************************************
nvUvmInterfaceCslRotateIv
@@ -1509,11 +1528,13 @@ void nvUvmInterfaceDeinitCslContext(UvmCslContext *uvmCslContext);
the channel must be idle before calling this function. This function can be
called regardless of the value of the IV's message counter.
See "CSL Interface and Locking" for locking requirements.
This function does not perform dynamic memory allocation.
Locking: This function attempts to acquire the GPU lock.
In case of failure to acquire the return code
is NV_ERR_STATE_IN_USE.
Memory : This function does not dynamically allocate memory.
Arguments:
uvmCslContext[IN/OUT] - The CSL context.
uvmCslContext[IN/OUT] - The CSL context associated with a channel.
operation[IN] - Either
- UVM_CSL_OPERATION_ENCRYPT
- UVM_CSL_OPERATION_DECRYPT
@@ -1521,7 +1542,11 @@ Arguments:
Error codes:
NV_ERR_INSUFFICIENT_RESOURCES - The rotate operation would cause a counter
to overflow.
NV_ERR_STATE_IN_USE - Unable to acquire lock / resource. Caller
can retry at a later time.
NV_ERR_INVALID_ARGUMENT - Invalid value for operation.
NV_ERR_GENERIC - A failure other than _STATE_IN_USE occurred
when attempting to acquire a lock.
*/
NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext,
UvmCslOperation operation);
@@ -1538,11 +1563,13 @@ NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext,
The encryptIV can be obtained from nvUvmInterfaceCslIncrementIv.
However, it is optional. If it is NULL, the next IV in line will be used.
See "CSL Interface and Locking" for locking requirements.
This function does not perform dynamic memory allocation.
Locking: This function does not acquire an API or GPU lock.
If called concurrently in different threads with the same UvmCslContext
the caller must guarantee exclusion.
Memory : This function does not dynamically allocate memory.
Arguments:
uvmCslContext[IN/OUT] - The CSL context.
uvmCslContext[IN/OUT] - The CSL context associated with a channel.
bufferSize[IN] - Size of the input and output buffers in
units of bytes. Value can range from 1 byte
to (2^32) - 1 bytes.
@@ -1553,8 +1580,9 @@ Arguments:
Its size is UVM_CSL_CRYPT_AUTH_TAG_SIZE_BYTES.
Error codes:
NV_ERR_INVALID_ARGUMENT - The size of the data is 0 bytes.
- The encryptIv has already been used.
NV_ERR_INVALID_ARGUMENT - The CSL context is not associated with a channel.
- The size of the data is 0 bytes.
- The encryptIv has already been used.
*/
NV_STATUS nvUvmInterfaceCslEncrypt(UvmCslContext *uvmCslContext,
NvU32 bufferSize,
@@ -1573,8 +1601,10 @@ NV_STATUS nvUvmInterfaceCslEncrypt(UvmCslContext *uvmCslContext,
maximized when the input and output buffers are 16-byte aligned. This is
natural alignment for AES block.
See "CSL Interface and Locking" for locking requirements.
This function does not perform dynamic memory allocation.
Locking: This function does not acquire an API or GPU lock.
If called concurrently in different threads with the same UvmCslContext
the caller must guarantee exclusion.
Memory : This function does not dynamically allocate memory.
Arguments:
uvmCslContext[IN/OUT] - The CSL context.
@@ -1616,11 +1646,13 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
Auth and input buffers must not overlap. If they do then calling this function produces
undefined behavior.
See "CSL Interface and Locking" for locking requirements.
This function does not perform dynamic memory allocation.
Locking: This function does not acquire an API or GPU lock.
If called concurrently in different threads with the same UvmCslContext
the caller must guarantee exclusion.
Memory : This function does not dynamically allocate memory.
Arguments:
uvmCslContext[IN/OUT] - The CSL context.
uvmCslContext[IN/OUT] - The CSL context associated with a channel.
bufferSize[IN] - Size of the input buffer in units of bytes.
Value can range from 1 byte to (2^32) - 1 bytes.
inputBuffer[IN] - Address of plaintext input buffer.
@@ -1629,7 +1661,8 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
Error codes:
NV_ERR_INSUFFICIENT_RESOURCES - The signing operation would cause a counter overflow to occur.
NV_ERR_INVALID_ARGUMENT - The size of the data is 0 bytes.
NV_ERR_INVALID_ARGUMENT - The CSL context is not associated with a channel.
- The size of the data is 0 bytes.
*/
NV_STATUS nvUvmInterfaceCslSign(UvmCslContext *uvmCslContext,
NvU32 bufferSize,
@@ -1641,8 +1674,10 @@ NV_STATUS nvUvmInterfaceCslSign(UvmCslContext *uvmCslContext,
Returns the number of messages that can be encrypted before the message counter will overflow.
See "CSL Interface and Locking" for locking requirements.
This function does not perform dynamic memory allocation.
Locking: This function does not acquire an API or GPU lock.
Memory : This function does not dynamically allocate memory.
If called concurrently in different threads with the same UvmCslContext
the caller must guarantee exclusion.
Arguments:
uvmCslContext[IN/OUT] - The CSL context.
@@ -1666,8 +1701,10 @@ NV_STATUS nvUvmInterfaceCslQueryMessagePool(UvmCslContext *uvmCslContext,
can be used in nvUvmInterfaceCslEncrypt. If operation is UVM_CSL_OPERATION_DECRYPT then
the returned IV can be used in nvUvmInterfaceCslDecrypt.
See "CSL Interface and Locking" for locking requirements.
This function does not perform dynamic memory allocation.
Locking: This function does not acquire an API or GPU lock.
If called concurrently in different threads with the same UvmCslContext
the caller must guarantee exclusion.
Memory : This function does not dynamically allocate memory.
Arguments:
uvmCslContext[IN/OUT] - The CSL context.
@@ -1675,7 +1712,7 @@ Arguments:
- UVM_CSL_OPERATION_ENCRYPT
- UVM_CSL_OPERATION_DECRYPT
increment[IN] - The amount by which the IV is incremented. Can be 0.
iv[out] - If non-NULL, a buffer to store the incremented IV.
iv[OUT] - If non-NULL, a buffer to store the incremented IV.
Error codes:
NV_ERR_INVALID_ARGUMENT - The value of the operation parameter is illegal.
@@ -1687,4 +1724,29 @@ NV_STATUS nvUvmInterfaceCslIncrementIv(UvmCslContext *uvmCslContext,
NvU64 increment,
UvmCslIv *iv);
/*******************************************************************************
nvUvmInterfaceCslLogExternalEncryption
Checks and logs information about non-CSL encryptions, such as those that
originate from the GPU.
This function does not modify elements of the UvmCslContext.
Locking: This function does not acquire an API or GPU lock.
Memory : This function does not dynamically allocate memory.
If called concurrently in different threads with the same UvmCslContext
the caller must guarantee exclusion.
Arguments:
uvmCslContext[IN/OUT] - The CSL context.
bufferSize[OUT] - The size of the buffer encrypted by the
external entity in units of bytes.
Error codes:
NV_ERR_INSUFFICIENT_RESOURCES - The device encryption would cause a counter
to overflow.
*/
NV_STATUS nvUvmInterfaceCslLogExternalEncryption(UvmCslContext *uvmCslContext,
NvU32 bufferSize);
#endif // _NV_UVM_INTERFACE_H_

View File

@@ -131,6 +131,8 @@ typedef struct UvmGpuMemoryInfo_tag
// This is only valid if deviceDescendant is NV_TRUE.
// When egm is NV_TRUE, this is also the UUID of the GPU
// for which EGM is local.
// If the GPU has SMC enabled, the UUID is the GI UUID.
// Otherwise, it is the UUID for the physical GPU.
// Note: If the allocation is owned by a device in
// an SLI group and the allocation is broadcast
// across the SLI group, this UUID will be any one
@@ -544,6 +546,10 @@ typedef struct UvmGpuP2PCapsParams_tag
// the GPUs are direct peers.
NvU32 peerIds[2];
// Out: peerId[i] contains gpu[i]'s EGM peer id of gpu[1 - i]. Only defined
// if the GPUs are direct peers and EGM enabled in the system.
NvU32 egmPeerIds[2];
// Out: UVM_LINK_TYPE
NvU32 p2pLink;
@@ -572,8 +578,11 @@ typedef struct UvmPlatformInfo_tag
// Out: ATS (Address Translation Services) is supported
NvBool atsSupported;
// Out: AMD SEV (Secure Encrypted Virtualization) is enabled
NvBool sevEnabled;
// Out: True if HW trusted execution, such as AMD's SEV-SNP or Intel's TDX,
// is enabled in the VM, indicating that Confidential Computing must be
// also enabled in the GPU(s); these two security features are either both
// enabled, or both disabled.
NvBool confComputingEnabled;
} UvmPlatformInfo;
typedef struct UvmGpuClientInfo_tag
@@ -604,7 +613,8 @@ typedef struct UvmGpuInfo_tag
// Printable gpu name
char name[UVM_GPU_NAME_LENGTH];
// Uuid of this gpu
// Uuid of the physical GPU or GI UUID if nvUvmInterfaceGetGpuInfo()
// requested information for a valid SMC partition.
NvProcessorUuid uuid;
// Gpu architecture; NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_*
@@ -688,8 +698,12 @@ typedef struct UvmGpuInfo_tag
NvU64 nvswitchMemoryWindowStart;
// local EGM properties
// NV_TRUE if EGM is enabled
NvBool egmEnabled;
// Peer ID to reach local EGM when EGM is enabled
NvU8 egmPeerId;
// EGM base address to offset in the GMMU PTE entry for EGM mappings
NvU64 egmBaseAddr;
} UvmGpuInfo;
typedef struct UvmGpuFbInfo_tag
@@ -778,14 +792,14 @@ typedef NV_STATUS (*uvmEventResume_t) (void);
/*******************************************************************************
uvmEventStartDevice
This function will be called by the GPU driver once it has finished its
initialization to tell the UVM driver that this GPU has come up.
initialization to tell the UVM driver that this physical GPU has come up.
*/
typedef NV_STATUS (*uvmEventStartDevice_t) (const NvProcessorUuid *pGpuUuidStruct);
/*******************************************************************************
uvmEventStopDevice
This function will be called by the GPU driver to let UVM know that a GPU
is going down.
This function will be called by the GPU driver to let UVM know that a
physical GPU is going down.
*/
typedef NV_STATUS (*uvmEventStopDevice_t) (const NvProcessorUuid *pGpuUuidStruct);
@@ -816,7 +830,7 @@ typedef NV_STATUS (*uvmEventServiceInterrupt_t) (void *pDeviceObject,
/*******************************************************************************
uvmEventIsrTopHalf_t
This function will be called by the GPU driver to let UVM know
that an interrupt has occurred.
that an interrupt has occurred on the given physical GPU.
Returns:
NV_OK if the UVM driver handled the interrupt
@@ -923,11 +937,6 @@ typedef struct UvmGpuFaultInfo_tag
// CSL context used for performing decryption of replayable faults when
// Confidential Computing is enabled.
UvmCslContext cslCtx;
// Indicates whether UVM owns the replayable fault buffer.
// The value of this field is always NV_TRUE When Confidential Computing
// is disabled.
NvBool bUvmOwnsHwFaultBuffer;
} replayable;
struct
{

View File

@@ -58,6 +58,7 @@ typedef NvU32 NvKmsFrameLockHandle;
typedef NvU32 NvKmsDeferredRequestFifoHandle;
typedef NvU32 NvKmsSwapGroupHandle;
typedef NvU32 NvKmsVblankSyncObjectHandle;
typedef NvU32 NvKmsVblankSemControlHandle;
struct NvKmsSize {
NvU16 width;

View File

@@ -490,6 +490,8 @@ typedef enum NvKmsKapiRegisterWaiterResultRec {
NVKMS_KAPI_REG_WAITER_ALREADY_SIGNALLED,
} NvKmsKapiRegisterWaiterResult;
typedef void NvKmsKapiSuspendResumeCallbackFunc(NvBool suspend);
struct NvKmsKapiFunctionsTable {
/*!
@@ -1399,6 +1401,15 @@ struct NvKmsKapiFunctionsTable {
NvU64 index,
NvU64 new_value
);
/*!
* Set the callback function for suspending and resuming the display system.
*/
void
(*setSuspendResumeCallback)
(
NvKmsKapiSuspendResumeCallbackFunc *function
);
};
/** @} */

View File

@@ -919,6 +919,9 @@ static NV_FORCEINLINE void *NV_NVUPTR_TO_PTR(NvUPtr address)
//
#define NV_BIT_SET_128(b, lo, hi) { nvAssert( (b) < 128 ); if ( (b) < 64 ) (lo) |= NVBIT64(b); else (hi) |= NVBIT64( b & 0x3F ); }
// Get the number of elements the specified fixed-size array
#define NV_ARRAY_ELEMENTS(x) ((sizeof(x)/sizeof((x)[0])))
#ifdef __cplusplus
}
#endif //__cplusplus

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -150,6 +150,7 @@ NV_STATUS_CODE(NV_ERR_NVLINK_CONFIGURATION_ERROR, 0x00000078, "Nvlink Confi
NV_STATUS_CODE(NV_ERR_RISCV_ERROR, 0x00000079, "Generic RISC-V assert or halt")
NV_STATUS_CODE(NV_ERR_FABRIC_MANAGER_NOT_PRESENT, 0x0000007A, "Fabric Manager is not loaded")
NV_STATUS_CODE(NV_ERR_ALREADY_SIGNALLED, 0x0000007B, "Semaphore Surface value already >= requested wait value")
NV_STATUS_CODE(NV_ERR_QUEUE_TASK_SLOT_NOT_AVAILABLE, 0x0000007C, "PMU RPC error due to no queue slot available for this event")
// Warnings:
NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch")

View File

@@ -145,7 +145,12 @@ typedef signed short NvS16; /* -32768 to 32767 */
#endif
// Macro to build an NvU32 from four bytes, listed from msb to lsb
#define NvU32_BUILD(a, b, c, d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
#define NvU32_BUILD(a, b, c, d) \
((NvU32)( \
(((NvU32)(a) & 0xff) << 24) | \
(((NvU32)(b) & 0xff) << 16) | \
(((NvU32)(c) & 0xff) << 8) | \
(((NvU32)(d) & 0xff))))
#if NVTYPES_USE_STDINT
typedef uint32_t NvV32; /* "void": enumerated or multiple fields */

View File

@@ -67,7 +67,6 @@ typedef struct os_wait_queue os_wait_queue;
* ---------------------------------------------------------------------------
*/
NvU64 NV_API_CALL os_get_num_phys_pages (void);
NV_STATUS NV_API_CALL os_alloc_mem (void **, NvU64);
void NV_API_CALL os_free_mem (void *);
NV_STATUS NV_API_CALL os_get_current_time (NvU32 *, NvU32 *);
@@ -105,7 +104,6 @@ void* NV_API_CALL os_map_kernel_space (NvU64, NvU64, NvU32);
void NV_API_CALL os_unmap_kernel_space (void *, NvU64);
void* NV_API_CALL os_map_user_space (NvU64, NvU64, NvU32, NvU32, void **);
void NV_API_CALL os_unmap_user_space (void *, NvU64, void *);
NV_STATUS NV_API_CALL os_flush_cpu_cache (void);
NV_STATUS NV_API_CALL os_flush_cpu_cache_all (void);
NV_STATUS NV_API_CALL os_flush_user_cache (void);
void NV_API_CALL os_flush_cpu_write_combine_buffer(void);
@@ -230,14 +228,12 @@ extern NvBool os_dma_buf_enabled;
* ---------------------------------------------------------------------------
*/
#define NV_DBG_INFO 0x1
#define NV_DBG_SETUP 0x2
#define NV_DBG_INFO 0x0
#define NV_DBG_SETUP 0x1
#define NV_DBG_USERERRORS 0x2
#define NV_DBG_WARNINGS 0x3
#define NV_DBG_ERRORS 0x4
#define NV_DBG_HW_ERRORS 0x5
#define NV_DBG_FATAL 0x6
#define NV_DBG_FORCE_LEVEL(level) ((level) | (1 << 8))
void NV_API_CALL out_string(const char *str);
int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...);

View File

@@ -37,7 +37,7 @@ NV_STATUS NV_API_CALL rm_gpu_ops_create_session (nvidia_stack_t *, nvgpuSessio
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_session (nvidia_stack_t *, nvgpuSessionHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_device_create (nvidia_stack_t *, nvgpuSessionHandle_t, const nvgpuInfo_t *, const NvProcessorUuid *, nvgpuDeviceHandle_t *, NvBool);
NV_STATUS NV_API_CALL rm_gpu_ops_device_destroy (nvidia_stack_t *, nvgpuDeviceHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_address_space_create(nvidia_stack_t *, nvgpuDeviceHandle_t, unsigned long long, unsigned long long, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_address_space_create(nvidia_stack_t *, nvgpuDeviceHandle_t, unsigned long long, unsigned long long, NvBool, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_dup_address_space(nvidia_stack_t *, nvgpuDeviceHandle_t, NvHandle, NvHandle, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_address_space_destroy(nvidia_stack_t *, nvgpuAddressSpaceHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_fb(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvLength, NvU64 *, nvgpuAllocInfo_t);
@@ -45,7 +45,6 @@ NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_fb(nvidia_stack_t *, nvgpuAddres
NV_STATUS NV_API_CALL rm_gpu_ops_pma_alloc_pages(nvidia_stack_t *, void *, NvLength, NvU32 , nvgpuPmaAllocationOptions_t, NvU64 *);
NV_STATUS NV_API_CALL rm_gpu_ops_pma_free_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32, NvU32);
NV_STATUS NV_API_CALL rm_gpu_ops_pma_pin_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32, NvU32);
NV_STATUS NV_API_CALL rm_gpu_ops_pma_unpin_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32);
NV_STATUS NV_API_CALL rm_gpu_ops_get_pma_object(nvidia_stack_t *, nvgpuDeviceHandle_t, void **, const nvgpuPmaStatistics_t *);
NV_STATUS NV_API_CALL rm_gpu_ops_pma_register_callbacks(nvidia_stack_t *sp, void *, nvPmaEvictPagesCallback, nvPmaEvictRangeCallback, void *);
void NV_API_CALL rm_gpu_ops_pma_unregister_callbacks(nvidia_stack_t *sp, void *);
@@ -77,6 +76,7 @@ NV_STATUS NV_API_CALL rm_gpu_ops_init_fault_info(nvidia_stack_t *, nvgpuDeviceH
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_fault_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFaultInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_get_non_replayable_faults(nvidia_stack_t *, nvgpuFaultInfo_t, void *, NvU32 *);
NV_STATUS NV_API_CALL rm_gpu_ops_flush_replayable_fault_buffer(nvidia_stack_t *, nvgpuDeviceHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_toggle_prefetch_faults(nvidia_stack_t *, nvgpuFaultInfo_t, NvBool);
NV_STATUS NV_API_CALL rm_gpu_ops_has_pending_non_replayable_faults(nvidia_stack_t *, nvgpuFaultInfo_t, NvBool *);
NV_STATUS NV_API_CALL rm_gpu_ops_init_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t, NvU32);
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t);
@@ -103,6 +103,7 @@ NV_STATUS NV_API_CALL rm_gpu_ops_paging_channel_push_stream(nvidia_stack_t *, n
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_init(nvidia_stack_t *, struct ccslContext_t **, nvgpuChannelHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_clear(nvidia_stack_t *, struct ccslContext_t *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_update(nvidia_stack_t *, struct ccslContext_t *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *, NvU8 *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt_with_iv(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8*, NvU8 *, NvU8 *);
@@ -110,5 +111,6 @@ NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_decrypt(nvidia_stack_t *, struct ccslCont
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_sign(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_query_message_pool(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU64 *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_increment_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU64, NvU8 *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_log_device_encryption(nvidia_stack_t *, struct ccslContext_t *, NvU32);
#endif