Compare commits

..

10 Commits
565 ... 575.64

Author SHA1 Message Date
Maneet Singh
fade1f7b20 575.64 2025-06-16 19:28:19 -07:00
Maneet Singh
30e15d79de 575.57.08 2025-05-29 10:58:21 -07:00
Andy Ritger
e00332b05f 575.51.03 2025-05-01 22:14:31 -07:00
Bernhard Stoeckner
4159579888 575.51.02 2025-04-17 19:35:59 +02:00
Bernhard Stoeckner
e8113f665d 570.133.20 2025-04-17 17:56:49 +02:00
Bernhard Stoeckner
c5e439fea4 570.133.07 2025-03-19 14:13:05 +01:00
Bernhard Stoeckner
25bef4626e 570.124.06 2025-03-03 19:08:20 +01:00
Bernhard Stoeckner
129479b1b7 570.124.04 2025-02-27 17:32:23 +01:00
Bernhard Stoeckner
81fe4fb417 570.86.16 2025-01-30 17:37:03 +01:00
Bernhard Stoeckner
54d69484da 570.86.15 2025-01-27 19:36:56 +01:00
1539 changed files with 412875 additions and 237850 deletions

1550
README.md

File diff suppressed because it is too large Load Diff

View File

@@ -57,25 +57,32 @@ ifeq ($(NV_UNDEF_BEHAVIOR_SANITIZER),1)
UBSAN_SANITIZE := y
endif
#
# Command to create a symbolic link, explicitly resolving the symlink target
# to an absolute path to abstract away the difference between Linux < 6.13,
# where the CWD is the Linux kernel source tree for Kbuild extmod builds, and
# Linux >= 6.13, where the CWD is the external module source tree.
#
# This is used to create the nv*-kernel.o -> nv*-kernel.o_binary symlinks for
# kernel modules which use precompiled binary object files.
#
quiet_cmd_symlink = SYMLINK $@
cmd_symlink = ln -sf $(abspath $<) $@
$(foreach _module, $(NV_KERNEL_MODULES), \
$(eval include $(src)/$(_module)/$(_module).Kbuild))
#
# Define CFLAGS that apply to all the NVIDIA kernel modules. EXTRA_CFLAGS
# is deprecated since 2.6.24 in favor of ccflags-y, but we need to support
# older kernels which do not have ccflags-y. Newer kernels append
# $(EXTRA_CFLAGS) to ccflags-y for compatibility.
#
EXTRA_CFLAGS += -I$(src)/common/inc
EXTRA_CFLAGS += -I$(src)
EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"565.77\"
ccflags-y += -I$(src)/common/inc
ccflags-y += -I$(src)
ccflags-y += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args
ccflags-y += -D__KERNEL__ -DMODULE -DNVRM
ccflags-y += -DNV_VERSION_STRING=\"575.64\"
ifneq ($(SYSSRCHOST1X),)
EXTRA_CFLAGS += -I$(SYSSRCHOST1X)
ccflags-y += -I$(SYSSRCHOST1X)
endif
# Some Android kernels prohibit driver use of filesystem functions like
@@ -85,57 +92,57 @@ endif
PLATFORM_IS_ANDROID ?= 0
ifeq ($(PLATFORM_IS_ANDROID),1)
EXTRA_CFLAGS += -DNV_FILESYSTEM_ACCESS_AVAILABLE=0
ccflags-y += -DNV_FILESYSTEM_ACCESS_AVAILABLE=0
else
EXTRA_CFLAGS += -DNV_FILESYSTEM_ACCESS_AVAILABLE=1
ccflags-y += -DNV_FILESYSTEM_ACCESS_AVAILABLE=1
endif
EXTRA_CFLAGS += -Wno-unused-function
ccflags-y += -Wno-unused-function
ifneq ($(NV_BUILD_TYPE),debug)
EXTRA_CFLAGS += -Wuninitialized
ccflags-y += -Wuninitialized
endif
EXTRA_CFLAGS += -fno-strict-aliasing
ccflags-y += -fno-strict-aliasing
ifeq ($(ARCH),arm64)
EXTRA_CFLAGS += -mstrict-align
ccflags-y += -mstrict-align
endif
ifeq ($(NV_BUILD_TYPE),debug)
EXTRA_CFLAGS += -g
ccflags-y += -g
endif
EXTRA_CFLAGS += -ffreestanding
ccflags-y += -ffreestanding
ifeq ($(ARCH),arm64)
EXTRA_CFLAGS += -mgeneral-regs-only -march=armv8-a
EXTRA_CFLAGS += $(call cc-option,-mno-outline-atomics,)
ccflags-y += -mgeneral-regs-only -march=armv8-a
ccflags-y += $(call cc-option,-mno-outline-atomics,)
endif
ifeq ($(ARCH),x86_64)
EXTRA_CFLAGS += -mno-red-zone -mcmodel=kernel
ccflags-y += -mno-red-zone -mcmodel=kernel
endif
ifeq ($(ARCH),powerpc)
EXTRA_CFLAGS += -mlittle-endian -mno-strict-align
ccflags-y += -mlittle-endian -mno-strict-align
endif
EXTRA_CFLAGS += -DNV_UVM_ENABLE
EXTRA_CFLAGS += $(call cc-option,-Werror=undef,)
EXTRA_CFLAGS += -DNV_SPECTRE_V2=$(NV_SPECTRE_V2)
EXTRA_CFLAGS += -DNV_KERNEL_INTERFACE_LAYER
ccflags-y += -DNV_UVM_ENABLE
ccflags-y += $(call cc-option,-Werror=undef,)
ccflags-y += -DNV_SPECTRE_V2=$(NV_SPECTRE_V2)
ccflags-y += -DNV_KERNEL_INTERFACE_LAYER
#
# Detect SGI UV systems and apply system-specific optimizations.
#
ifneq ($(wildcard /proc/sgi_uv),)
EXTRA_CFLAGS += -DNV_CONFIG_X86_UV
ccflags-y += -DNV_CONFIG_X86_UV
endif
ifdef VGX_FORCE_VFIO_PCI_CORE
EXTRA_CFLAGS += -DNV_VGPU_FORCE_VFIO_PCI_CORE
ccflags-y += -DNV_VGPU_FORCE_VFIO_PCI_CORE
endif
WARNINGS_AS_ERRORS ?=
@@ -169,7 +176,8 @@ NV_CONFTEST_CMD := /bin/sh $(NV_CONFTEST_SCRIPT) \
NV_CFLAGS_FROM_CONFTEST := $(shell $(NV_CONFTEST_CMD) build_cflags)
NV_CONFTEST_CFLAGS = $(NV_CFLAGS_FROM_CONFTEST) $(EXTRA_CFLAGS) -fno-pie
NV_CONFTEST_CFLAGS = $(NV_CFLAGS_FROM_CONFTEST) $(ccflags-y) -fno-pie
NV_CONFTEST_CFLAGS += $(filter -std=%,$(KBUILD_CFLAGS))
NV_CONFTEST_CFLAGS += $(call cc-disable-warning,pointer-sign)
NV_CONFTEST_CFLAGS += $(call cc-option,-fshort-wchar,)
NV_CONFTEST_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types,)

View File

@@ -71,6 +71,31 @@ else
CC ?= cc
LD ?= ld
OBJDUMP ?= objdump
AWK ?= awk
# Bake the following awk program in a string. The program is needed to add C++
# to the languages excluded from BTF generation.
#
# Also, unconditionally return success (0) from the awk program, rather than
# propagating pahole's return status (with 'exit system(pahole_cmd)'), to
# workaround an DW_TAG_rvalue_reference_type error in
# kernel/nvidia-modeset.ko.
#
# BEGIN {
# pahole_cmd = "pahole"
# for (i = 1; i < ARGC; i++) {
# if (ARGV[i] ~ /--lang_exclude=/) {
# pahole_cmd = pahole_cmd sprintf(" %s,c++", ARGV[i])
# } else {
# pahole_cmd = pahole_cmd sprintf(" %s", ARGV[i])
# }
# }
# system(pahole_cmd)
# }
PAHOLE_AWK_PROGRAM = BEGIN { pahole_cmd = \"pahole\"; for (i = 1; i < ARGC; i++) { if (ARGV[i] ~ /--lang_exclude=/) { pahole_cmd = pahole_cmd sprintf(\" %s,c++\", ARGV[i]); } else { pahole_cmd = pahole_cmd sprintf(\" %s\", ARGV[i]); } } system(pahole_cmd); }
# If scripts/pahole-flags.sh is not present in the kernel tree, add PAHOLE and
# PAHOLE_AWK_PROGRAM assignments to PAHOLE_VARIABLES; otherwise assign the
# empty string to PAHOLE_VARIABLES.
PAHOLE_VARIABLES=$(if $(wildcard $(KERNEL_SOURCES)/scripts/pahole-flags.sh),,"PAHOLE=$(AWK) '$(PAHOLE_AWK_PROGRAM)'")
ifndef ARCH
ARCH := $(shell uname -m | sed -e 's/i.86/i386/' \
@@ -86,7 +111,7 @@ else
ifneq ($(filter $(ARCH),i386 x86_64),)
KERNEL_ARCH = x86
else
ifeq ($(filter $(ARCH),arm64 powerpc),)
ifeq ($(filter $(ARCH),arm64 powerpc riscv),)
$(error Unsupported architecture $(ARCH))
endif
endif
@@ -112,7 +137,8 @@ else
.PHONY: modules module clean clean_conftest modules_install
modules clean modules_install:
@$(MAKE) "LD=$(LD)" "CC=$(CC)" "OBJDUMP=$(OBJDUMP)" $(KBUILD_PARAMS) $@
@$(MAKE) "LD=$(LD)" "CC=$(CC)" "OBJDUMP=$(OBJDUMP)" \
$(PAHOLE_VARIABLES) $(KBUILD_PARAMS) $@
@if [ "$@" = "modules" ]; then \
for module in $(NV_KERNEL_MODULES); do \
if [ -x split-object-file.sh ]; then \

View File

@@ -0,0 +1,35 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _OS_DCE_CLIENT_IPC_H_
#define _OS_DCE_CLIENT_IPC_H_
// RM IPC Client Types
#define DCE_CLIENT_RM_IPC_TYPE_SYNC 0x0
#define DCE_CLIENT_RM_IPC_TYPE_EVENT 0x1
#define DCE_CLIENT_RM_IPC_TYPE_MAX 0x2
void dceclientHandleAsyncRpcCallback(NvU32 handle, NvU32 interfaceType,
NvU32 msgLength, void *data,
void *usrCtx);
#endif

View File

@@ -32,7 +32,10 @@
typedef enum
{
NV_FIRMWARE_TYPE_GSP,
NV_FIRMWARE_TYPE_GSP_LOG
NV_FIRMWARE_TYPE_GSP_LOG,
#if defined(NV_VMWARE)
NV_FIRMWARE_TYPE_BINDATA
#endif
} nv_firmware_type_t;
typedef enum
@@ -45,6 +48,7 @@ typedef enum
NV_FIRMWARE_CHIP_FAMILY_AD10X = 5,
NV_FIRMWARE_CHIP_FAMILY_GH100 = 6,
NV_FIRMWARE_CHIP_FAMILY_GB10X = 8,
NV_FIRMWARE_CHIP_FAMILY_GB20X = 9,
NV_FIRMWARE_CHIP_FAMILY_END,
} nv_firmware_chip_family_t;
@@ -54,6 +58,7 @@ static inline const char *nv_firmware_chip_family_to_string(
{
switch (fw_chip_family) {
case NV_FIRMWARE_CHIP_FAMILY_GB10X: return "gb10x";
case NV_FIRMWARE_CHIP_FAMILY_GB20X: return "gb20x";
case NV_FIRMWARE_CHIP_FAMILY_GH100: return "gh100";
case NV_FIRMWARE_CHIP_FAMILY_AD10X: return "ad10x";
case NV_FIRMWARE_CHIP_FAMILY_GA10X: return "ga10x";
@@ -84,6 +89,7 @@ static inline const char *nv_firmware_for_chip_family(
switch (fw_chip_family)
{
case NV_FIRMWARE_CHIP_FAMILY_GB10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GB20X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GA10X:
@@ -104,6 +110,7 @@ static inline const char *nv_firmware_for_chip_family(
switch (fw_chip_family)
{
case NV_FIRMWARE_CHIP_FAMILY_GB10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GB20X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GA10X:
@@ -119,7 +126,12 @@ static inline const char *nv_firmware_for_chip_family(
return "";
}
}
#if defined(NV_VMWARE)
else if (fw_type == NV_FIRMWARE_TYPE_BINDATA)
{
return NV_FIRMWARE_FOR_NAME("bindata_image");
}
#endif
return "";
}
#endif // defined(NV_FIRMWARE_FOR_NAME)

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -128,6 +128,9 @@ typedef struct nv_ioctl_register_fd
#define NV_DMABUF_EXPORT_MAX_HANDLES 128
#define NV_DMABUF_EXPORT_MAPPING_TYPE_DEFAULT 0
#define NV_DMABUF_EXPORT_MAPPING_TYPE_FORCE_PCIE 1
typedef struct nv_ioctl_export_to_dma_buf_fd
{
int fd;
@@ -136,6 +139,7 @@ typedef struct nv_ioctl_export_to_dma_buf_fd
NvU32 numObjects;
NvU32 index;
NvU64 totalSize NV_ALIGN_BYTES(8);
NvU8 mappingType;
NvHandle handles[NV_DMABUF_EXPORT_MAX_HANDLES];
NvU64 offsets[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8);
NvU64 sizes[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8);

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2001-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2001-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -36,8 +36,7 @@
#include "nv-timer.h"
#include "nv-time.h"
#include "nv-chardev-numbers.h"
#define NV_KERNEL_NAME "Linux"
#include "nv-platform.h"
#ifndef AUTOCONF_INCLUDED
#if defined(NV_GENERATED_AUTOCONF_H_PRESENT)
@@ -231,12 +230,6 @@ NV_STATUS nvos_forward_error_to_cray(struct pci_dev *, NvU32,
const char *, va_list);
#endif
#if defined(NVCPU_PPC64LE) && defined(CONFIG_EEH)
#include <asm/eeh.h>
#define NV_PCI_ERROR_RECOVERY_ENABLED() eeh_enabled()
#define NV_PCI_ERROR_RECOVERY
#endif
#if defined(NV_ASM_SET_MEMORY_H_PRESENT)
#include <asm/set_memory.h>
#endif
@@ -245,7 +238,7 @@ NV_STATUS nvos_forward_error_to_cray(struct pci_dev *, NvU32,
#undef NV_SET_PAGES_UC_PRESENT
#endif
#if !defined(NVCPU_AARCH64) && !defined(NVCPU_PPC64LE) && !defined(NVCPU_RISCV64)
#if !defined(NVCPU_AARCH64) && !defined(NVCPU_RISCV64)
#if !defined(NV_SET_MEMORY_UC_PRESENT) && !defined(NV_SET_PAGES_UC_PRESENT)
#error "This driver requires the ability to change memory types!"
#endif
@@ -351,8 +344,6 @@ extern int nv_pat_mode;
#define NV_PAGE_COUNT(page) \
((unsigned int)page_count(page))
#define NV_GET_PAGE_COUNT(page_ptr) \
(NV_PAGE_COUNT(NV_GET_PAGE_STRUCT(page_ptr->phys_addr)))
#define NV_GET_PAGE_FLAGS(page_ptr) \
(NV_GET_PAGE_STRUCT(page_ptr->phys_addr)->flags)
@@ -411,7 +402,7 @@ typedef enum
NV_MEMORY_TYPE_DEVICE_MMIO, /* All kinds of MMIO referred by NVRM e.g. BARs and MCFG of device */
} nv_memory_type_t;
#if defined(NVCPU_AARCH64) || defined(NVCPU_PPC64LE) || defined(NVCPU_RISCV64)
#if defined(NVCPU_AARCH64) || defined(NVCPU_RISCV64)
#define NV_ALLOW_WRITE_COMBINING(mt) 1
#elif defined(NVCPU_X86_64)
#if defined(NV_ENABLE_PAT_SUPPORT)
@@ -469,10 +460,7 @@ static inline void *nv_vmalloc(unsigned long size)
#else
void *ptr = __vmalloc(size, GFP_KERNEL);
#endif
if (ptr)
{
NV_MEMDBG_ADD(ptr, size);
}
NV_MEMDBG_ADD(ptr, size);
return ptr;
}
@@ -489,10 +477,7 @@ static inline void *nv_ioremap(NvU64 phys, NvU64 size)
#else
void *ptr = ioremap(phys, size);
#endif
if (ptr)
{
NV_MEMDBG_ADD(ptr, size);
}
NV_MEMDBG_ADD(ptr, size);
return ptr;
}
@@ -508,29 +493,12 @@ static inline void *nv_ioremap_cache(NvU64 phys, NvU64 size)
ptr = ioremap_cache_shared(phys, size);
#elif defined(NV_IOREMAP_CACHE_PRESENT)
ptr = ioremap_cache(phys, size);
#elif defined(NVCPU_PPC64LE)
//
// ioremap_cache() has been only implemented correctly for ppc64le with
// commit f855b2f544d6 in April 2017 (kernel 4.12+). Internally, the kernel
// does provide a default implementation of ioremap_cache() that would be
// incorrect for our use (creating an uncached mapping) before the
// referenced commit, but that implementation is not exported and the
// NV_IOREMAP_CACHE_PRESENT conftest doesn't pick it up, and we end up in
// this #elif branch.
//
// At the same time, ppc64le have supported ioremap_prot() since May 2011
// (commit 40f1ce7fb7e8, kernel 3.0+) and that covers all kernels we
// support on power.
//
ptr = ioremap_prot(phys, size, pgprot_val(PAGE_KERNEL));
#else
return nv_ioremap(phys, size);
#endif
if (ptr)
{
NV_MEMDBG_ADD(ptr, size);
}
NV_MEMDBG_ADD(ptr, size);
return ptr;
}
@@ -545,10 +513,8 @@ static inline void *nv_ioremap_wc(NvU64 phys, NvU64 size)
return nv_ioremap_nocache(phys, size);
#endif
if (ptr)
{
NV_MEMDBG_ADD(ptr, size);
}
NV_MEMDBG_ADD(ptr, size);
return ptr;
}
@@ -568,22 +534,19 @@ static NvBool nv_numa_node_has_memory(int node_id)
#define NV_KMALLOC(ptr, size) \
{ \
(ptr) = kmalloc(size, NV_GFP_KERNEL); \
if (ptr) \
NV_MEMDBG_ADD(ptr, size); \
NV_MEMDBG_ADD(ptr, size); \
}
#define NV_KZALLOC(ptr, size) \
{ \
(ptr) = kzalloc(size, NV_GFP_KERNEL); \
if (ptr) \
NV_MEMDBG_ADD(ptr, size); \
NV_MEMDBG_ADD(ptr, size); \
}
#define NV_KMALLOC_ATOMIC(ptr, size) \
{ \
(ptr) = kmalloc(size, NV_GFP_ATOMIC); \
if (ptr) \
NV_MEMDBG_ADD(ptr, size); \
NV_MEMDBG_ADD(ptr, size); \
}
#if defined(__GFP_RETRY_MAYFAIL)
@@ -597,8 +560,7 @@ static NvBool nv_numa_node_has_memory(int node_id)
#define NV_KMALLOC_NO_OOM(ptr, size) \
{ \
(ptr) = kmalloc(size, NV_GFP_NO_OOM); \
if (ptr) \
NV_MEMDBG_ADD(ptr, size); \
NV_MEMDBG_ADD(ptr, size); \
}
#define NV_KFREE(ptr, size) \
@@ -609,7 +571,7 @@ static NvBool nv_numa_node_has_memory(int node_id)
#define NV_ALLOC_PAGES_NODE(ptr, nid, order, gfp_mask) \
{ \
(ptr) = (unsigned long)page_address(alloc_pages_node(nid, gfp_mask, order)); \
(ptr) = (unsigned long) alloc_pages_node(nid, gfp_mask, order); \
}
#define NV_GET_FREE_PAGES(ptr, order, gfp_mask) \
@@ -631,9 +593,9 @@ static inline pgprot_t nv_sme_clr(pgprot_t prot)
#endif // __sme_clr
}
static inline pgprot_t nv_adjust_pgprot(pgprot_t vm_prot, NvU32 extra)
static inline pgprot_t nv_adjust_pgprot(pgprot_t vm_prot)
{
pgprot_t prot = __pgprot(pgprot_val(vm_prot) | extra);
pgprot_t prot = __pgprot(pgprot_val(vm_prot));
#if defined(pgprot_decrypted)
return pgprot_decrypted(prot);
@@ -654,41 +616,6 @@ static inline pgprot_t nv_adjust_pgprot(pgprot_t vm_prot, NvU32 extra)
#endif
#endif
static inline NvUPtr nv_vmap(struct page **pages, NvU32 page_count,
NvBool cached, NvBool unencrypted)
{
void *ptr;
pgprot_t prot = PAGE_KERNEL;
#if defined(NVCPU_X86_64)
#if defined(PAGE_KERNEL_NOENC)
if (unencrypted)
{
prot = cached ? nv_adjust_pgprot(PAGE_KERNEL_NOENC, 0) :
nv_adjust_pgprot(NV_PAGE_KERNEL_NOCACHE_NOENC, 0);
}
else
#endif
{
prot = cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE;
}
#elif defined(NVCPU_AARCH64)
prot = cached ? PAGE_KERNEL : NV_PGPROT_UNCACHED(PAGE_KERNEL);
#endif
/* All memory cached in PPC64LE; can't honor 'cached' input. */
ptr = vmap(pages, page_count, VM_MAP, prot);
if (ptr)
{
NV_MEMDBG_ADD(ptr, page_count * PAGE_SIZE);
}
return (NvUPtr)ptr;
}
static inline void nv_vunmap(NvUPtr vaddr, NvU32 page_count)
{
vunmap((void *)vaddr);
NV_MEMDBG_REMOVE((void *)vaddr, page_count * PAGE_SIZE);
}
#if defined(NV_GET_NUM_PHYSPAGES_PRESENT)
#define NV_NUM_PHYSPAGES get_num_physpages()
#else
@@ -713,6 +640,47 @@ static inline void nv_vunmap(NvUPtr vaddr, NvU32 page_count)
#define NV_NUM_CPUS() num_possible_cpus()
#define NV_HAVE_MEMORY_ENCRYPT_DECRYPT 0
#if defined(NVCPU_X86_64) && \
NV_IS_EXPORT_SYMBOL_GPL_set_memory_encrypted && \
NV_IS_EXPORT_SYMBOL_GPL_set_memory_decrypted
#undef NV_HAVE_MEMORY_ENCRYPT_DECRYPT
#define NV_HAVE_MEMORY_ENCRYPT_DECRYPT 1
#endif
static inline void nv_set_memory_decrypted_zeroed(NvBool unencrypted,
unsigned long virt_addr,
int num_native_pages,
size_t size)
{
if (virt_addr == 0)
return;
#if NV_HAVE_MEMORY_ENCRYPT_DECRYPT
if (unencrypted)
{
set_memory_decrypted(virt_addr, num_native_pages);
memset((void *)virt_addr, 0, size);
}
#endif
}
static inline void nv_set_memory_encrypted(NvBool unencrypted,
unsigned long virt_addr,
int num_native_pages)
{
if (virt_addr == 0)
return;
#if NV_HAVE_MEMORY_ENCRYPT_DECRYPT
if (unencrypted)
{
set_memory_encrypted(virt_addr, num_native_pages);
}
#endif
}
static inline dma_addr_t nv_phys_to_dma(struct device *dev, NvU64 pa)
{
#if defined(NV_PHYS_TO_DMA_PRESENT)
@@ -881,16 +849,6 @@ typedef void irqreturn_t;
#define PCI_CAP_ID_EXP 0x10
#endif
/*
* On Linux on PPC64LE enable basic support for Linux PCI error recovery (see
* Documentation/PCI/pci-error-recovery.txt). Currently RM only supports error
* notification and data collection, not actual recovery of the device.
*/
#if defined(NVCPU_PPC64LE) && defined(CONFIG_EEH)
#include <asm/eeh.h>
#define NV_PCI_ERROR_RECOVERY
#endif
/*
* If the host OS has page sizes larger than 4KB, we may have a security
* problem. Registers are typically grouped in 4KB pages, but if there are
@@ -903,94 +861,42 @@ typedef void irqreturn_t;
(((addr) >> NV_RM_PAGE_SHIFT) == \
(((addr) + (size) - 1) >> NV_RM_PAGE_SHIFT)))
/*
* The kernel may have a workaround for this, by providing a method to isolate
* a single 4K page in a given mapping.
*/
#if (PAGE_SIZE > NV_RM_PAGE_SIZE) && defined(NVCPU_PPC64LE) && defined(NV_PAGE_4K_PFN)
#define NV_4K_PAGE_ISOLATION_PRESENT
#define NV_4K_PAGE_ISOLATION_MMAP_ADDR(addr) \
((NvP64)((void*)(((addr) >> NV_RM_PAGE_SHIFT) << PAGE_SHIFT)))
#define NV_4K_PAGE_ISOLATION_MMAP_LEN(size) PAGE_SIZE
#define NV_4K_PAGE_ISOLATION_ACCESS_START(addr) \
((NvP64)((void*)((addr) & ~NV_RM_PAGE_MASK)))
#define NV_4K_PAGE_ISOLATION_ACCESS_LEN(addr, size) \
((((addr) & NV_RM_PAGE_MASK) + size + NV_RM_PAGE_MASK) & \
~NV_RM_PAGE_MASK)
#define NV_PROT_4K_PAGE_ISOLATION NV_PAGE_4K_PFN
#endif
static inline int nv_remap_page_range(struct vm_area_struct *vma,
unsigned long virt_addr, NvU64 phys_addr, NvU64 size, pgprot_t prot)
{
int ret = -1;
#if defined(NV_4K_PAGE_ISOLATION_PRESENT) && defined(NV_PROT_4K_PAGE_ISOLATION)
if ((size == PAGE_SIZE) &&
((pgprot_val(prot) & NV_PROT_4K_PAGE_ISOLATION) != 0))
{
/*
* remap_4k_pfn() hardcodes the length to a single OS page, and checks
* whether applying the page isolation workaround will cause PTE
* corruption (in which case it will fail, and this is an unsupported
* configuration).
*/
#if defined(NV_HASH__REMAP_4K_PFN_PRESENT)
ret = hash__remap_4k_pfn(vma, virt_addr, (phys_addr >> PAGE_SHIFT), prot);
#else
ret = remap_4k_pfn(vma, virt_addr, (phys_addr >> PAGE_SHIFT), prot);
#endif
}
else
#endif
{
ret = remap_pfn_range(vma, virt_addr, (phys_addr >> PAGE_SHIFT), size,
return remap_pfn_range(vma, virt_addr, (phys_addr >> PAGE_SHIFT), size,
prot);
}
return ret;
}
static inline int nv_io_remap_page_range(struct vm_area_struct *vma,
NvU64 phys_addr, NvU64 size, NvU32 extra_prot, NvU64 start)
NvU64 phys_addr, NvU64 size, NvU64 start)
{
int ret = -1;
#if !defined(NV_XEN_SUPPORT_FULLY_VIRTUALIZED_KERNEL)
ret = nv_remap_page_range(vma, start, phys_addr, size,
nv_adjust_pgprot(vma->vm_page_prot, extra_prot));
nv_adjust_pgprot(vma->vm_page_prot));
#else
ret = io_remap_pfn_range(vma, start, (phys_addr >> PAGE_SHIFT),
size, nv_adjust_pgprot(vma->vm_page_prot, extra_prot));
size, nv_adjust_pgprot(vma->vm_page_prot));
#endif
return ret;
}
static inline vm_fault_t nv_insert_pfn(struct vm_area_struct *vma,
NvU64 virt_addr, NvU64 pfn, NvU32 extra_prot)
NvU64 virt_addr, NvU64 pfn)
{
/*
* vm_insert_pfn{,_prot} replaced with vmf_insert_pfn{,_prot} in Linux 4.20
*/
#if defined(NV_VMF_INSERT_PFN_PROT_PRESENT)
return vmf_insert_pfn_prot(vma, virt_addr, pfn,
__pgprot(pgprot_val(vma->vm_page_prot) | extra_prot));
__pgprot(pgprot_val(vma->vm_page_prot)));
#else
int ret = -EINVAL;
/*
* Only PPC64LE (NV_4K_PAGE_ISOLATION_PRESENT) requires extra_prot to be
* used when remapping.
*
* vm_insert_pfn_prot() was added in Linux 4.4, whereas POWER9 support
* was added in Linux 4.8.
*
* Rather than tampering with the vma to make use of extra_prot with
* vm_insert_pfn() on older kernels, for now, just fail in this case, as
* it's not expected to be used currently.
*/
#if defined(NV_VM_INSERT_PFN_PROT_PRESENT)
ret = vm_insert_pfn_prot(vma, virt_addr, pfn,
__pgprot(pgprot_val(vma->vm_page_prot) | extra_prot));
#elif !defined(NV_4K_PAGE_ISOLATION_PRESENT)
__pgprot(pgprot_val(vma->vm_page_prot)));
#else
ret = vm_insert_pfn(vma, virt_addr, pfn);
#endif
switch (ret)
@@ -1176,11 +1082,6 @@ static inline void nv_kmem_cache_free_stack(nvidia_stack_t *stack)
typedef struct nvidia_pte_s {
NvU64 phys_addr;
unsigned long virt_addr;
NvU64 dma_addr;
#ifdef CONFIG_XEN
unsigned int guest_pfn;
#endif
unsigned int page_count;
} nvidia_pte_t;
#if defined(CONFIG_DMA_SHARED_BUFFER)
@@ -1221,6 +1122,7 @@ typedef struct nv_alloc_s {
NvS32 node_id; /* Node id for memory allocation when node is set in flags */
void *import_priv;
struct sg_table *import_sgt;
dma_addr_t dma_handle; /* dma handle used by dma_alloc_coherent(), dma_free_coherent() */
} nv_alloc_t;
/**
@@ -1419,8 +1321,6 @@ typedef struct nv_dma_map_s {
0 ? NV_OK : NV_ERR_OPERATING_SYSTEM)
#endif
typedef struct nv_ibmnpu_info nv_ibmnpu_info_t;
typedef struct nv_work_s {
struct work_struct task;
void *data;
@@ -1448,6 +1348,23 @@ struct os_wait_queue {
struct completion q;
};
/*!
* @brief Mapping between clock names and clock handles.
*
* TEGRA_DISP_WHICH_CLK_MAX: maximum number of clocks
* defined in below enum.
*
* arch/nvalloc/unix/include/nv.h
* enum TEGRASOC_WHICH_CLK_MAX;
*
*/
typedef struct nvsoc_clks_s {
struct {
struct clk *handles;
const char *clkName;
} clk[TEGRASOC_WHICH_CLK_MAX];
} nvsoc_clks_t;
/*
* To report error in msi/msix when unhandled count reaches a threshold
*/
@@ -1468,7 +1385,6 @@ struct nv_dma_device {
} addressable_range;
struct device *dev;
NvBool nvlink;
};
/* Properties of the coherent link */
@@ -1517,9 +1433,6 @@ typedef struct nv_linux_state_s {
struct device *dev;
struct pci_dev *pci_dev;
/* IBM-NPU info associated with this GPU */
nv_ibmnpu_info_t *npu;
/* coherent link information */
coherent_link_info_t coherent_link_info;
@@ -1611,6 +1524,8 @@ typedef struct nv_linux_state_s {
nv_acpi_t* nv_acpi_object;
#endif
nvsoc_clks_t soc_clk_handles;
/* Lock serializing ISRs for different SOC vectors */
nv_spinlock_t soc_isr_lock;
void *soc_bh_mutex;
@@ -1810,12 +1725,10 @@ static inline struct kmem_cache *nv_kmem_cache_create(const char *name, unsigned
*/
static inline NV_STATUS nv_check_gpu_state(nv_state_t *nv)
{
#if !defined(NVCPU_PPC64LE)
if (NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv))
{
return NV_ERR_GPU_IS_LOST;
}
#endif
return NV_OK;
}
@@ -1835,7 +1748,7 @@ static inline int nv_is_control_device(struct inode *inode)
return (minor((inode)->i_rdev) == NV_MINOR_DEVICE_NUMBER_CONTROL_DEVICE);
}
#if defined(NV_DOM0_KERNEL_PRESENT) || defined(NV_VGPU_KVM_BUILD)
#if defined(NV_DOM0_KERNEL_PRESENT) || defined(NV_VGPU_KVM_BUILD) || defined(NV_DEVICE_VM_BUILD)
#define NV_VGX_HYPER
#if defined(NV_XEN_IOEMU_INJECT_MSI)
#include <xen/ioemu.h>
@@ -1872,59 +1785,6 @@ static inline NvBool nv_alloc_release(nv_linux_file_private_t *nvlfp, nv_alloc_t
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
#endif
/*
* Starting on Power9 systems, DMA addresses for NVLink are no longer
* the same as used over PCIe.
*
* Power9 supports a 56-bit Real Address. This address range is compressed
* when accessed over NVLink to allow the GPU to access all of memory using
* its 47-bit Physical address.
*
* If there is an NPU device present on the system, it implies that NVLink
* sysmem links are present and we need to apply the required address
* conversion for NVLink within the driver.
*
* See Bug 1920398 for further background and details.
*
* Note, a deviation from the documented compression scheme is that the
* upper address bits (i.e. bit 56-63) instead of being set to zero are
* preserved during NVLink address compression so the orignal PCIe DMA
* address can be reconstructed on expansion. These bits can be safely
* ignored on NVLink since they are truncated by the GPU.
*
* Bug 1968345: As a performance enhancement it is the responsibility of
* the caller on PowerPC platforms to check for presence of an NPU device
* before the address transformation is applied.
*/
static inline NvU64 nv_compress_nvlink_addr(NvU64 addr)
{
NvU64 addr47 = addr;
#if defined(NVCPU_PPC64LE)
addr47 = addr & ((1ULL << 43) - 1);
addr47 |= (addr & (0x3ULL << 45)) >> 2;
WARN_ON(addr47 & (1ULL << 44));
addr47 |= (addr & (0x3ULL << 49)) >> 4;
addr47 |= addr & ~((1ULL << 56) - 1);
#endif
return addr47;
}
static inline NvU64 nv_expand_nvlink_addr(NvU64 addr47)
{
NvU64 addr = addr47;
#if defined(NVCPU_PPC64LE)
addr = addr47 & ((1ULL << 43) - 1);
addr |= (addr47 & (3ULL << 43)) << 2;
addr |= (addr47 & (3ULL << 45)) << 4;
addr |= addr47 & ~((1ULL << 56) - 1);
#endif
return addr;
}
// Default flags for ISRs
static inline NvU32 nv_default_irq_flags(nv_state_t *nv)
{

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2016-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -297,9 +297,21 @@ static inline struct rw_semaphore *nv_mmap_get_lock(struct mm_struct *mm)
#endif
}
#define NV_CAN_CALL_VMA_START_WRITE 1
#if !NV_CAN_CALL_VMA_START_WRITE
/*
* Commit 45ad9f5290dc updated vma_start_write() to call __vma_start_write().
*/
void nv_vma_start_write(struct vm_area_struct *);
#endif
static inline void nv_vm_flags_set(struct vm_area_struct *vma, vm_flags_t flags)
{
#if defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS)
#if !NV_CAN_CALL_VMA_START_WRITE
nv_vma_start_write(vma);
ACCESS_PRIVATE(vma, __vm_flags) |= flags;
#elif defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS)
vm_flags_set(vma, flags);
#else
vma->vm_flags |= flags;
@@ -308,7 +320,10 @@ static inline void nv_vm_flags_set(struct vm_area_struct *vma, vm_flags_t flags)
static inline void nv_vm_flags_clear(struct vm_area_struct *vma, vm_flags_t flags)
{
#if defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS)
#if !NV_CAN_CALL_VMA_START_WRITE
nv_vma_start_write(vma);
ACCESS_PRIVATE(vma, __vm_flags) &= ~flags;
#elif defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS)
vm_flags_clear(vma, flags);
#else
vma->vm_flags &= ~flags;

View File

@@ -26,8 +26,7 @@
#include "nv-linux.h"
#if (defined(CONFIG_X86_LOCAL_APIC) || defined(NVCPU_AARCH64) || \
defined(NVCPU_PPC64LE)) && \
#if (defined(CONFIG_X86_LOCAL_APIC) || defined(NVCPU_AARCH64)) && \
(defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR))
#define NV_LINUX_PCIE_MSI_SUPPORTED
#endif

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -36,5 +36,6 @@ int nv_pci_count_devices(void);
NvU8 nv_find_pci_capability(struct pci_dev *, NvU8);
int nvidia_dev_get_pci_info(const NvU8 *, struct pci_dev **, NvU64 *, NvU64 *);
nv_linux_state_t * find_pci(NvU32, NvU8, NvU8, NvU8);
NvBool nv_pci_is_valid_topology_for_direct_pci(nv_state_t *, struct device *);
#endif

View File

@@ -0,0 +1,36 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NV_PLATFORM_H
#define NV_PLATFORM_H
#include "nv-linux.h"
irqreturn_t nvidia_isr (int, void *);
irqreturn_t nvidia_isr_kthread_bh (int, void *);
#define NV_SUPPORTS_PLATFORM_DEVICE 0
#define NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE 0
#endif

View File

@@ -41,7 +41,7 @@ void nv_procfs_remove_gpu (nv_linux_state_t *);
int nvidia_mmap (struct file *, struct vm_area_struct *);
int nvidia_mmap_helper (nv_state_t *, nv_linux_file_private_t *, nvidia_stack_t *, struct vm_area_struct *, void *);
int nv_encode_caching (pgprot_t *, NvU32, NvU32);
int nv_encode_caching (pgprot_t *, NvU32, nv_memory_type_t);
void nv_revoke_gpu_mappings_locked(nv_state_t *);
NvUPtr nv_vm_map_pages (struct page **, NvU32, NvBool, NvBool);

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2017-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -63,4 +63,13 @@ static inline void nv_timer_setup(struct nv_timer *nv_timer,
#endif
}
static inline void nv_timer_delete_sync(struct timer_list *timer)
{
#if !defined(NV_BSD) && NV_IS_EXPORT_SYMBOL_PRESENT_timer_delete_sync
timer_delete_sync(timer);
#else
del_timer_sync(timer);
#endif
}
#endif // __NV_TIMER_H__

View File

@@ -168,6 +168,15 @@ typedef enum _TEGRASOC_WHICH_CLK
TEGRASOC_WHICH_CLK_PLLA_DISP,
TEGRASOC_WHICH_CLK_PLLA_DISPHUB,
TEGRASOC_WHICH_CLK_PLLA,
TEGRASOC_WHICH_CLK_EMC,
TEGRASOC_WHICH_CLK_GPU_FIRST,
TEGRASOC_WHICH_CLK_GPU_SYS = TEGRASOC_WHICH_CLK_GPU_FIRST,
TEGRASOC_WHICH_CLK_GPU_NVD,
TEGRASOC_WHICH_CLK_GPU_UPROC,
TEGRASOC_WHICH_CLK_GPU_GPC0,
TEGRASOC_WHICH_CLK_GPU_GPC1,
TEGRASOC_WHICH_CLK_GPU_GPC2,
TEGRASOC_WHICH_CLK_GPU_LAST = TEGRASOC_WHICH_CLK_GPU_GPC2,
TEGRASOC_WHICH_CLK_MAX, // TEGRASOC_WHICH_CLK_MAX is defined for boundary checks only.
} TEGRASOC_WHICH_CLK;
@@ -283,7 +292,6 @@ typedef struct nv_usermap_access_params_s
MemoryArea memArea;
NvU64 access_start;
NvU64 access_size;
NvU64 remap_prot_extra;
NvBool contig;
NvU32 caching;
} nv_usermap_access_params_t;
@@ -299,7 +307,6 @@ typedef struct nv_alloc_mapping_context_s {
MemoryArea memArea;
NvU64 access_start;
NvU64 access_size;
NvU64 remap_prot_extra;
NvU32 prot;
NvBool valid;
NvU32 caching;
@@ -368,6 +375,8 @@ typedef struct nv_state_t
{
NvBool valid;
NvU8 uuid[GPU_UUID_LEN];
NvBool pci_uuid_read_attempted;
NV_STATUS pci_uuid_status;
} nv_uuid_cache;
void *handle;
@@ -479,6 +488,8 @@ typedef struct nv_state_t
/* Bool to check if the GPU has a coherent sysmem link */
NvBool coherent;
/* OS detected GPU has ATS capability */
NvBool ats_support;
/*
* NUMA node ID of the CPU to which the GPU is attached.
* Holds NUMA_NO_NODE on platforms that don't support NUMA configuration.
@@ -494,6 +505,9 @@ typedef struct nv_state_t
NvU32 dispIsoStreamId;
NvU32 dispNisoStreamId;
} iommus;
/* Console is managed by drm drivers or NVKMS */
NvBool client_managed_console;
} nv_state_t;
#define NVFP_TYPE_NONE 0x0
@@ -538,9 +552,9 @@ typedef struct UvmGpuNvlinkInfo_tag *nvgpuNvlinkInfo_t;
typedef struct UvmGpuEccInfo_tag *nvgpuEccInfo_t;
typedef struct UvmGpuFaultInfo_tag *nvgpuFaultInfo_t;
typedef struct UvmGpuAccessCntrInfo_tag *nvgpuAccessCntrInfo_t;
typedef struct UvmGpuAccessCntrConfig_tag *nvgpuAccessCntrConfig_t;
typedef struct UvmGpuInfo_tag nvgpuInfo_t;
typedef struct UvmGpuClientInfo_tag nvgpuClientInfo_t;
typedef struct UvmGpuAccessCntrConfig_tag nvgpuAccessCntrConfig_t;
typedef struct UvmGpuInfo_tag nvgpuInfo_t;
typedef struct UvmGpuClientInfo_tag nvgpuClientInfo_t;
typedef struct UvmPmaAllocationOptions_tag *nvgpuPmaAllocationOptions_t;
typedef struct UvmPmaStatistics_tag *nvgpuPmaStatistics_t;
typedef struct UvmGpuMemoryInfo_tag *nvgpuMemoryInfo_t;
@@ -560,23 +574,24 @@ typedef NV_STATUS (*nvPmaEvictRangeCallback)(void *, NvU64, NvU64, nvgpuGpuMemor
* flags
*/
#define NV_FLAG_OPEN 0x0001
#define NV_FLAG_EXCLUDE 0x0002
#define NV_FLAG_CONTROL 0x0004
// Unused 0x0008
#define NV_FLAG_SOC_DISPLAY 0x0010
#define NV_FLAG_USES_MSI 0x0020
#define NV_FLAG_USES_MSIX 0x0040
#define NV_FLAG_PASSTHRU 0x0080
#define NV_FLAG_SUSPENDED 0x0100
#define NV_FLAG_SOC_IGPU 0x0200
// Unused 0x0400
#define NV_FLAG_PERSISTENT_SW_STATE 0x0800
#define NV_FLAG_IN_RECOVERY 0x1000
// Unused 0x2000
#define NV_FLAG_UNBIND_LOCK 0x4000
#define NV_FLAG_OPEN 0x0001
#define NV_FLAG_EXCLUDE 0x0002
#define NV_FLAG_CONTROL 0x0004
// Unused 0x0008
#define NV_FLAG_SOC_DISPLAY 0x0010
#define NV_FLAG_USES_MSI 0x0020
#define NV_FLAG_USES_MSIX 0x0040
#define NV_FLAG_PASSTHRU 0x0080
#define NV_FLAG_SUSPENDED 0x0100
#define NV_FLAG_SOC_IGPU 0x0200
/* To be set when an FLR needs to be triggered after device shut down. */
#define NV_FLAG_TRIGGER_FLR 0x0400
#define NV_FLAG_PERSISTENT_SW_STATE 0x0800
#define NV_FLAG_IN_RECOVERY 0x1000
#define NV_FLAG_PCI_REMOVE_IN_PROGRESS 0x2000
#define NV_FLAG_UNBIND_LOCK 0x4000
/* To be set when GPU is not present on the bus, to help device teardown */
#define NV_FLAG_IN_SURPRISE_REMOVAL 0x8000
#define NV_FLAG_IN_SURPRISE_REMOVAL 0x8000
typedef enum
{
@@ -613,6 +628,7 @@ typedef struct
const char *gc6_support;
const char *gcoff_support;
const char *s0ix_status;
const char *db_support;
} nv_power_info_t;
#define NV_PRIMARY_VGA(nv) ((nv)->primary_vga)
@@ -758,6 +774,7 @@ static inline NvBool IS_IMEM_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
#define NV_ALIGN_DOWN(v,g) ((v) & ~((g) - 1))
#endif
/*
* driver internal interfaces
*/
@@ -788,7 +805,7 @@ NV_STATUS NV_API_CALL nv_alias_pages (nv_state_t *, NvU32, NvU64, Nv
NV_STATUS NV_API_CALL nv_alloc_pages (nv_state_t *, NvU32, NvU64, NvBool, NvU32, NvBool, NvBool, NvS32, NvU64 *, void **);
NV_STATUS NV_API_CALL nv_free_pages (nv_state_t *, NvU32, NvBool, NvU32, void *);
NV_STATUS NV_API_CALL nv_register_user_pages (nv_state_t *, NvU64, NvU64 *, void *, void **);
NV_STATUS NV_API_CALL nv_register_user_pages (nv_state_t *, NvU64, NvU64 *, void *, void **, NvBool);
void NV_API_CALL nv_unregister_user_pages (nv_state_t *, NvU64, void **, void **);
NV_STATUS NV_API_CALL nv_register_peer_io_mem (nv_state_t *, NvU64 *, NvU64, void **);
@@ -813,7 +830,6 @@ NV_STATUS NV_API_CALL nv_dma_map_mmio (nv_dma_device_t *, NvU64, NvU6
void NV_API_CALL nv_dma_unmap_mmio (nv_dma_device_t *, NvU64, NvU64);
void NV_API_CALL nv_dma_cache_invalidate (nv_dma_device_t *, void *);
void NV_API_CALL nv_dma_enable_nvlink (nv_dma_device_t *);
NvS32 NV_API_CALL nv_start_rc_timer (nv_state_t *);
NvS32 NV_API_CALL nv_stop_rc_timer (nv_state_t *);
@@ -840,9 +856,7 @@ NV_STATUS NV_API_CALL nv_acpi_mux_method (nv_state_t *, NvU32 *, NvU32,
NV_STATUS NV_API_CALL nv_log_error (nv_state_t *, NvU32, const char *, va_list);
NvU64 NV_API_CALL nv_get_dma_start_address (nv_state_t *);
NV_STATUS NV_API_CALL nv_set_primary_vga_status(nv_state_t *);
NV_STATUS NV_API_CALL nv_pci_trigger_recovery (nv_state_t *);
NvBool NV_API_CALL nv_requires_dma_remap (nv_state_t *);
NvBool NV_API_CALL nv_is_rm_firmware_active(nv_state_t *);
@@ -855,19 +869,8 @@ void NV_API_CALL nv_put_file_private(void *);
NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU64 *, NvU32 *, NvS32 *);
NV_STATUS NV_API_CALL nv_get_egm_info(nv_state_t *, NvU64 *, NvU64 *, NvS32 *);
NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *, NvU64 *, NvU64 *, void**);
NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv, NvBool *mode);
void NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv);
void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64, NvU64);
void NV_API_CALL nv_p2p_free_platform_data(void *data);
#if defined(NVCPU_PPC64LE)
NV_STATUS NV_API_CALL nv_get_nvlink_line_rate (nv_state_t *, NvU32 *);
#endif
NV_STATUS NV_API_CALL nv_revoke_gpu_mappings (nv_state_t *);
void NV_API_CALL nv_acquire_mmap_lock (nv_state_t *);
void NV_API_CALL nv_release_mmap_lock (nv_state_t *);
@@ -922,6 +925,15 @@ NV_STATUS NV_API_CALL nv_get_phys_pages (void *, void *, NvU32 *);
void NV_API_CALL nv_get_disp_smmu_stream_ids (nv_state_t *, NvU32 *, NvU32 *);
NV_STATUS NV_API_CALL nv_clk_get_handles (nv_state_t *);
void NV_API_CALL nv_clk_clear_handles (nv_state_t *);
NV_STATUS NV_API_CALL nv_enable_clk (nv_state_t *, TEGRASOC_WHICH_CLK);
void NV_API_CALL nv_disable_clk (nv_state_t *, TEGRASOC_WHICH_CLK);
NV_STATUS NV_API_CALL nv_get_curr_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *);
NV_STATUS NV_API_CALL nv_get_max_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *);
NV_STATUS NV_API_CALL nv_get_min_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *);
NV_STATUS NV_API_CALL nv_set_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32);
/*
* ---------------------------------------------------------------------------
*
@@ -998,18 +1010,24 @@ NV_STATUS NV_API_CALL rm_p2p_init_mapping (nvidia_stack_t *, NvU64, NvU6
NV_STATUS NV_API_CALL rm_p2p_destroy_mapping (nvidia_stack_t *, NvU64);
NV_STATUS NV_API_CALL rm_p2p_get_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU8 **, void *);
NV_STATUS NV_API_CALL rm_p2p_get_gpu_info (nvidia_stack_t *, NvU64, NvU64, NvU8 **, void **);
NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent (nvidia_stack_t *, NvU64, NvU64, void **, NvU64 *, NvU32 *, void *, void *, void **);
NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent (nvidia_stack_t *, NvU64, NvU64, void **, NvU64 *, NvU32 *, NvBool, void *, void *, void **);
NV_STATUS NV_API_CALL rm_p2p_register_callback (nvidia_stack_t *, NvU64, NvU64, NvU64, void *, void (*)(void *), void *);
NV_STATUS NV_API_CALL rm_p2p_put_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, void *);
NV_STATUS NV_API_CALL rm_p2p_put_pages_persistent(nvidia_stack_t *, void *, void *, void *);
NV_STATUS NV_API_CALL rm_p2p_dma_map_pages (nvidia_stack_t *, nv_dma_device_t *, NvU8 *, NvU64, NvU32, NvU64 *, void **);
NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *, void **);
void NV_API_CALL rm_dma_buf_undup_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle);
NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, MemoryRange, void *, NvBool, MemoryArea *);
void NV_API_CALL rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, void *, NvBool, MemoryArea);
NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle *, NvHandle *, NvHandle *, void **, NvBool *);
NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *,
NvHandle, NvHandle, MemoryRange,
NvU8, void *, NvBool, MemoryArea *);
void NV_API_CALL rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *,
NvHandle, NvHandle, NvU8, void *,
NvBool, MemoryArea);
NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *,
nv_state_t *, NvHandle, NvHandle,
NvU8, NvHandle *, NvHandle *,
NvHandle *, void **, NvBool *);
void NV_API_CALL rm_dma_buf_put_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, void *);
NV_STATUS NV_API_CALL rm_log_gpu_crash (nv_stack_t *, nv_state_t *);
void NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd);
NvBool NV_API_CALL rm_get_device_remove_flag(nvidia_stack_t *sp, NvU32 gpu_id);
@@ -1026,7 +1044,6 @@ NvBool NV_API_CALL rm_is_device_sequestered(nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_check_for_gpu_surprise_removal(nvidia_stack_t *, nv_state_t *);
NV_STATUS NV_API_CALL rm_set_external_kernel_client_count(nvidia_stack_t *, nv_state_t *, NvBool);
NV_STATUS NV_API_CALL rm_schedule_gpu_wakeup(nvidia_stack_t *, nv_state_t *);
NvBool NV_API_CALL rm_is_iommu_needed_for_sriov(nvidia_stack_t *, nv_state_t *);
NvBool NV_API_CALL rm_disable_iomap_wc(void);
void NV_API_CALL rm_init_dynamic_power_management(nvidia_stack_t *, nv_state_t *, NvBool);
@@ -1042,19 +1059,24 @@ void NV_API_CALL rm_acpi_nvpcf_notify(nvidia_stack_t *);
NvBool NV_API_CALL rm_is_altstack_in_use(void);
void NV_API_CALL rm_notify_gpu_addition(nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_notify_gpu_removal(nvidia_stack_t *, nv_state_t *);
/* vGPU VFIO specific functions */
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32);
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *,
NvU32 *, NvU32 *, NvU32);
NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16);
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool);
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8);
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *,
NvU64 *, NvU64 *, NvU32 *, NvBool *, NvU8 *);
NV_STATUS NV_API_CALL nv_vgpu_update_sysfs_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU32);
NV_STATUS NV_API_CALL nv_vgpu_get_hbm_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU64 *);
NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *);
NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *, NvU32, NvBool *);
NV_STATUS NV_API_CALL nv_gpu_unbind_event(nvidia_stack_t *, NvU32, NvBool *);
NV_STATUS NV_API_CALL nv_get_usermap_access_params(nv_state_t*, nv_usermap_access_params_t*);
NV_STATUS NV_API_CALL nv_check_usermap_access_params(nv_state_t*, const nv_usermap_access_params_t*);
nv_soc_irq_type_t NV_API_CALL nv_get_current_irq_type(nv_state_t*);
void NV_API_CALL nv_flush_coherent_cpu_cache_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size);

View File

@@ -0,0 +1,120 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NV_COMMON_UTILS_H__
#define __NV_COMMON_UTILS_H__
#include "nvtypes.h"
#include "nvmisc.h"
#if !defined(TRUE)
#define TRUE NV_TRUE
#endif
#if !defined(FALSE)
#define FALSE NV_FALSE
#endif
#define NV_IS_UNSIGNED(x) ((__typeof__(x))-1 > 0)
/* Get the length of a statically-sized array. */
#define ARRAY_LEN(_arr) (sizeof(_arr) / sizeof(_arr[0]))
#define NV_INVALID_HEAD 0xFFFFFFFF
#define NV_INVALID_CONNECTOR_PHYSICAL_INFORMATION (~0)
#if !defined(NV_MIN)
# define NV_MIN(a,b) (((a)<(b))?(a):(b))
#endif
#define NV_MIN3(a,b,c) NV_MIN(NV_MIN(a, b), c)
#define NV_MIN4(a,b,c,d) NV_MIN3(NV_MIN(a,b),c,d)
#if !defined(NV_MAX)
# define NV_MAX(a,b) (((a)>(b))?(a):(b))
#endif
#define NV_MAX3(a,b,c) NV_MAX(NV_MAX(a, b), c)
#define NV_MAX4(a,b,c,d) NV_MAX3(NV_MAX(a,b),c,d)
static inline int NV_LIMIT_VAL_TO_MIN_MAX(int val, int min, int max)
{
if (val < min) {
return min;
}
if (val > max) {
return max;
}
return val;
}
#define NV_ROUNDUP_DIV(x,y) ((x) / (y) + (((x) % (y)) ? 1 : 0))
/*
* Macros used for computing palette entries:
*
* NV_UNDER_REPLICATE(val, source_size, result_size) expands a value
* of source_size bits into a value of target_size bits by shifting
* the source value into the high bits and replicating the high bits
* of the value into the low bits of the result.
*
* PALETTE_DEPTH_SHIFT(val, w) maps a colormap entry for a component
* that has w bits to an appropriate entry in a LUT of 256 entries.
*/
static inline unsigned int NV_UNDER_REPLICATE(unsigned short val,
int source_size,
int result_size)
{
return (val << (result_size - source_size)) |
(val >> ((source_size << 1) - result_size));
}
static inline unsigned short PALETTE_DEPTH_SHIFT(unsigned short val, int depth)
{
return NV_UNDER_REPLICATE(val, depth, 8);
}
/*
* Use __builtin_ffs where it is supported, or provide an equivalent
* implementation for platforms like riscv where it is not.
*/
#if defined(__GNUC__) && !NVCPU_IS_RISCV64
static inline int nv_ffs(int x)
{
return __builtin_ffs(x);
}
#else
static inline int nv_ffs(int x)
{
if (x == 0)
return 0;
LOWESTBITIDX_32(x);
return 1 + x;
}
#endif
#endif /* __NV_COMMON_UTILS_H__ */

View File

@@ -0,0 +1,370 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2010-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* This header file defines the types NVDpyId and NVDpyIdList, as well
* as inline functions to manipulate these types. NVDpyId and
* NVDpyIdList should be treated as opaque by includers of this header
* file.
*/
#ifndef __NV_DPY_ID_H__
#define __NV_DPY_ID_H__
#include "nvtypes.h"
#include "nvmisc.h"
#include "nv_common_utils.h"
#include <nvlimits.h> /* NV_MAX_SUBDEVICES */
typedef struct {
NvU32 opaqueDpyId;
} NVDpyId;
typedef struct {
NvU32 opaqueDpyIdList;
} NVDpyIdList;
#define NV_DPY_ID_MAX_SUBDEVICES NV_MAX_SUBDEVICES
#define NV_DPY_ID_MAX_DPYS_IN_LIST 32
/*
* For use in combination with nvDpyIdToPrintFormat(); e.g.,
*
* printf("dpy id: " NV_DPY_ID_PRINT_FORMAT "\n",
* nvDpyIdToPrintFormat(dpyId));
*
* The includer should not make assumptions about the return type of
* nvDpyIdToPrintFormat().
*/
#define NV_DPY_ID_PRINT_FORMAT "0x%08x"
/* functions to return an invalid DpyId and empty DpyIdList */
static inline NVDpyId nvInvalidDpyId(void)
{
NVDpyId dpyId = { 0 };
return dpyId;
}
static inline NVDpyIdList nvEmptyDpyIdList(void)
{
NVDpyIdList dpyIdList = { 0 };
return dpyIdList;
}
static inline NVDpyIdList nvAllDpyIdList(void)
{
NVDpyIdList dpyIdList = { ~0U };
return dpyIdList;
}
static inline void
nvEmptyDpyIdListSubDeviceArray(NVDpyIdList dpyIdList[NV_DPY_ID_MAX_SUBDEVICES])
{
int dispIndex;
for (dispIndex = 0; dispIndex < NV_DPY_ID_MAX_SUBDEVICES; dispIndex++) {
dpyIdList[dispIndex] = nvEmptyDpyIdList();
}
}
/* set operations on DpyIds and DpyIdLists: Add, Subtract, Intersect, Xor */
static inline __attribute__ ((warn_unused_result))
NVDpyIdList nvAddDpyIdToDpyIdList(NVDpyId dpyId, NVDpyIdList dpyIdList)
{
NVDpyIdList tmpDpyIdList;
tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList |
dpyId.opaqueDpyId;
return tmpDpyIdList;
}
/* Passing an invalid display ID makes this function return an empty list. */
static inline __attribute__ ((warn_unused_result))
NVDpyIdList nvAddDpyIdToEmptyDpyIdList(NVDpyId dpyId)
{
NVDpyIdList tmpDpyIdList;
tmpDpyIdList.opaqueDpyIdList = dpyId.opaqueDpyId;
return tmpDpyIdList;
}
static inline __attribute__ ((warn_unused_result))
NVDpyIdList nvAddDpyIdListToDpyIdList(NVDpyIdList dpyIdListA,
NVDpyIdList dpyIdListB)
{
NVDpyIdList tmpDpyIdList;
tmpDpyIdList.opaqueDpyIdList = dpyIdListB.opaqueDpyIdList |
dpyIdListA.opaqueDpyIdList;
return tmpDpyIdList;
}
/* Returns: dpyIdList - dpyId */
static inline __attribute__ ((warn_unused_result))
NVDpyIdList nvDpyIdListMinusDpyId(NVDpyIdList dpyIdList, NVDpyId dpyId)
{
NVDpyIdList tmpDpyIdList;
tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList &
(~dpyId.opaqueDpyId);
return tmpDpyIdList;
}
/* Returns: dpyIdListA - dpyIdListB */
static inline __attribute__ ((warn_unused_result))
NVDpyIdList nvDpyIdListMinusDpyIdList(NVDpyIdList dpyIdListA,
NVDpyIdList dpyIdListB)
{
NVDpyIdList tmpDpyIdList;
tmpDpyIdList.opaqueDpyIdList = dpyIdListA.opaqueDpyIdList &
(~dpyIdListB.opaqueDpyIdList);
return tmpDpyIdList;
}
static inline __attribute__ ((warn_unused_result))
NVDpyIdList nvIntersectDpyIdAndDpyIdList(NVDpyId dpyId, NVDpyIdList dpyIdList)
{
NVDpyIdList tmpDpyIdList;
tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList &
dpyId.opaqueDpyId;
return tmpDpyIdList;
}
static inline __attribute__ ((warn_unused_result))
NVDpyIdList nvIntersectDpyIdListAndDpyIdList(NVDpyIdList dpyIdListA,
NVDpyIdList dpyIdListB)
{
NVDpyIdList tmpDpyIdList;
tmpDpyIdList.opaqueDpyIdList = dpyIdListA.opaqueDpyIdList &
dpyIdListB.opaqueDpyIdList;
return tmpDpyIdList;
}
static inline __attribute__ ((warn_unused_result))
NVDpyIdList nvXorDpyIdAndDpyIdList(NVDpyId dpyId, NVDpyIdList dpyIdList)
{
NVDpyIdList tmpDpyIdList;
tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList ^
dpyId.opaqueDpyId;
return tmpDpyIdList;
}
static inline __attribute__ ((warn_unused_result))
NVDpyIdList nvXorDpyIdListAndDpyIdList(NVDpyIdList dpyIdListA,
NVDpyIdList dpyIdListB)
{
NVDpyIdList tmpDpyIdList;
tmpDpyIdList.opaqueDpyIdList = dpyIdListA.opaqueDpyIdList ^
dpyIdListB.opaqueDpyIdList;
return tmpDpyIdList;
}
/* boolean checks */
static inline NvBool nvDpyIdIsInDpyIdList(NVDpyId dpyId,
NVDpyIdList dpyIdList)
{
return !!(dpyIdList.opaqueDpyIdList & dpyId.opaqueDpyId);
}
static inline NvBool nvDpyIdIsInvalid(NVDpyId dpyId)
{
return (dpyId.opaqueDpyId == 0);
}
static inline NvBool nvDpyIdListIsEmpty(NVDpyIdList dpyIdList)
{
return (dpyIdList.opaqueDpyIdList == 0);
}
static inline NvBool
nvDpyIdListSubDeviceArrayIsEmpty(NVDpyIdList
dpyIdList[NV_DPY_ID_MAX_SUBDEVICES])
{
int dispIndex;
for (dispIndex = 0; dispIndex < NV_DPY_ID_MAX_SUBDEVICES; dispIndex++) {
if (!nvDpyIdListIsEmpty(dpyIdList[dispIndex])) {
return NV_FALSE;
}
}
return NV_TRUE;
}
static inline NvBool nvDpyIdsAreEqual(NVDpyId dpyIdA, NVDpyId dpyIdB)
{
return (dpyIdA.opaqueDpyId == dpyIdB.opaqueDpyId);
}
static inline NvBool nvDpyIdListsAreEqual(NVDpyIdList dpyIdListA,
NVDpyIdList dpyIdListB)
{
return (dpyIdListA.opaqueDpyIdList == dpyIdListB.opaqueDpyIdList);
}
static inline NvBool nvDpyIdListIsASubSetofDpyIdList(NVDpyIdList dpyIdListA,
NVDpyIdList dpyIdListB)
{
NVDpyIdList intersectedDpyIdList =
nvIntersectDpyIdListAndDpyIdList(dpyIdListA, dpyIdListB);
return nvDpyIdListsAreEqual(intersectedDpyIdList, dpyIdListA);
}
/*
* retrieve the individual dpyIds from dpyIdList; if dpyId is invalid,
* start at the beginning of the list; otherwise, start at the dpyId
* after the specified dpyId
*/
static inline __attribute__ ((warn_unused_result))
NVDpyId nvNextDpyIdInDpyIdListUnsorted(NVDpyId dpyId, NVDpyIdList dpyIdList)
{
if (nvDpyIdIsInvalid(dpyId)) {
dpyId.opaqueDpyId = 1;
} else {
dpyId.opaqueDpyId <<= 1;
}
while (dpyId.opaqueDpyId) {
if (nvDpyIdIsInDpyIdList(dpyId, dpyIdList)) {
return dpyId;
}
dpyId.opaqueDpyId <<= 1;
}
/* no dpyIds left in dpyIdlist; return the invalid dpyId */
return nvInvalidDpyId();
}
#define FOR_ALL_DPY_IDS(_dpyId, _dpyIdList) \
for ((_dpyId) = nvNextDpyIdInDpyIdListUnsorted(nvInvalidDpyId(), \
(_dpyIdList)); \
!nvDpyIdIsInvalid(_dpyId); \
(_dpyId) = nvNextDpyIdInDpyIdListUnsorted((_dpyId), \
(_dpyIdList)))
/* report how many dpyIds are in the dpyIdList */
static inline int nvCountDpyIdsInDpyIdList(NVDpyIdList dpyIdList)
{
return nvPopCount32(dpyIdList.opaqueDpyIdList);
}
static inline int
nvCountDpyIdsInDpyIdListSubDeviceArray(NVDpyIdList
dpyIdList[NV_DPY_ID_MAX_SUBDEVICES])
{
int dispIndex, n = 0;
for (dispIndex = 0; dispIndex < NV_DPY_ID_MAX_SUBDEVICES; dispIndex++) {
n += nvCountDpyIdsInDpyIdList(dpyIdList[dispIndex]);
}
return n;
}
/* convert between dpyId/dpyIdList and NV-CONTROL values */
static inline int nvDpyIdToNvControlVal(NVDpyId dpyId)
{
return (int) dpyId.opaqueDpyId;
}
static inline int nvDpyIdListToNvControlVal(NVDpyIdList dpyIdList)
{
return (int) dpyIdList.opaqueDpyIdList;
}
static inline NVDpyId nvNvControlValToDpyId(int val)
{
NVDpyId dpyId;
dpyId.opaqueDpyId = (val == 0) ? 0 : 1 << (nv_ffs(val)-1);
return dpyId;
}
static inline NVDpyIdList nvNvControlValToDpyIdList(int val)
{
NVDpyIdList dpyIdList;
dpyIdList.opaqueDpyIdList = val;
return dpyIdList;
}
/* convert between dpyId and NvU32 */
static inline NVDpyId nvNvU32ToDpyId(NvU32 val)
{
NVDpyId dpyId;
dpyId.opaqueDpyId = (val == 0) ? 0 : 1 << (nv_ffs(val)-1);
return dpyId;
}
static inline NVDpyIdList nvNvU32ToDpyIdList(NvU32 val)
{
NVDpyIdList dpyIdList;
dpyIdList.opaqueDpyIdList = val;
return dpyIdList;
}
static inline NvU32 nvDpyIdToNvU32(NVDpyId dpyId)
{
return dpyId.opaqueDpyId;
}
static inline NvU32 nvDpyIdListToNvU32(NVDpyIdList dpyIdList)
{
return dpyIdList.opaqueDpyIdList;
}
/* Return the bit position of dpyId: a number in the range [0..31]. */
static inline NvU32 nvDpyIdToIndex(NVDpyId dpyId)
{
return nv_ffs(dpyId.opaqueDpyId) - 1;
}
/* Return a display ID that is not in the list passed in. */
static inline NVDpyId nvNewDpyId(NVDpyIdList excludeList)
{
NVDpyId dpyId;
if (~excludeList.opaqueDpyIdList == 0) {
return nvInvalidDpyId();
}
dpyId.opaqueDpyId =
1U << (nv_ffs(~excludeList.opaqueDpyIdList) - 1);
return dpyId;
}
/* See comment for NV_DPY_ID_PRINT_FORMAT. */
static inline NvU32 nvDpyIdToPrintFormat(NVDpyId dpyId)
{
return nvDpyIdToNvU32(dpyId);
}
/* Prevent usage of opaque values. */
#define opaqueDpyId __ERROR_ACCESS_ME_VIA_NV_DPY_ID_H
#define opaqueDpyIdList __ERROR_ACCESS_ME_VIA_NV_DPY_ID_H
#endif /* __NV_DPY_ID_H__ */

View File

@@ -0,0 +1,40 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NV_MIG_TYPES_H__
#define __NV_MIG_TYPES_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "nvtypes.h"
typedef NvU32 MIGDeviceId;
#define NO_MIG_DEVICE 0L
#ifdef __cplusplus
}
#endif
#endif /* __NV_MIG_TYPES_H__ */

View File

@@ -592,6 +592,14 @@ void nvUvmInterfaceChannelDestroy(uvmGpuChannelHandle channel);
Error codes:
NV_ERR_GENERIC
NV_ERR_NO_MEMORY
NV_ERR_INVALID_STATE
NV_ERR_NOT_SUPPORTED
NV_ERR_NOT_READY
NV_ERR_INVALID_LOCK_STATE
NV_ERR_INVALID_STATE
NV_ERR_NVLINK_FABRIC_NOT_READY
NV_ERR_NVLINK_FABRIC_FAILURE
NV_ERR_GPU_MEMORY_ONLINING_FAILURE
*/
NV_STATUS nvUvmInterfaceQueryCaps(uvmGpuDeviceHandle device,
UvmGpuCaps *caps);
@@ -652,14 +660,20 @@ NV_STATUS nvUvmInterfaceServiceDeviceInterruptsRM(uvmGpuDeviceHandle device);
RM will propagate the update to all channels using the provided VA space.
All channels must be idle when this call is made.
If the pageDirectory is in system memory then a CPU physical address must be
provided. RM will establish and manage the DMA mapping for the
pageDirectory.
Arguments:
vaSpace[IN} - VASpace Object
physAddress[IN] - Physical address of new page directory
physAddress[IN] - Physical address of new page directory. If
!bVidMemAperture this is a CPU physical address.
numEntries[IN] - Number of entries including previous PDE which will be copied
bVidMemAperture[IN] - If set pageDirectory will reside in VidMem aperture else sysmem
pasid[IN] - PASID (Process Address Space IDentifier) of the process
corresponding to the VA space. Ignored unless the VA space
object has ATS enabled.
dmaAddress[OUT] - DMA mapping created for physAddress.
Error codes:
NV_ERR_GENERIC
@@ -667,7 +681,8 @@ NV_STATUS nvUvmInterfaceServiceDeviceInterruptsRM(uvmGpuDeviceHandle device);
*/
NV_STATUS nvUvmInterfaceSetPageDirectory(uvmGpuAddressSpaceHandle vaSpace,
NvU64 physAddress, unsigned numEntries,
NvBool bVidMemAperture, NvU32 pasid);
NvBool bVidMemAperture, NvU32 pasid,
NvU64 *dmaAddress);
/*******************************************************************************
nvUvmInterfaceUnsetPageDirectory
@@ -1048,7 +1063,7 @@ NV_STATUS nvUvmInterfaceDestroyAccessCntrInfo(uvmGpuDeviceHandle device,
*/
NV_STATUS nvUvmInterfaceEnableAccessCntr(uvmGpuDeviceHandle device,
UvmGpuAccessCntrInfo *pAccessCntrInfo,
UvmGpuAccessCntrConfig *pAccessCntrConfig);
const UvmGpuAccessCntrConfig *pAccessCntrConfig);
/*******************************************************************************
nvUvmInterfaceDisableAccessCntr
@@ -1854,5 +1869,4 @@ NV_STATUS nvUvmInterfaceCslIncrementIv(UvmCslContext *uvmCslContext,
NV_STATUS nvUvmInterfaceCslLogEncryption(UvmCslContext *uvmCslContext,
UvmCslOperation operation,
NvU32 bufferSize);
#endif // _NV_UVM_INTERFACE_H_

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2014-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -268,6 +268,7 @@ typedef struct UvmGpuChannelInfo_tag
// The errorNotifier is filled out when the channel hits an RC error.
NvNotification *errorNotifier;
NvNotification *keyRotationNotifier;
NvU32 hwRunlistId;
@@ -297,6 +298,7 @@ typedef struct UvmGpuChannelInfo_tag
NvU64 gpFifoGpuVa;
NvU64 gpPutGpuVa;
NvU64 gpGetGpuVa;
// GPU VA of work submission offset is needed in Confidential Computing
// so CE channels can ring doorbell of other channels as required for
// WLC/LCIC work submission
@@ -374,6 +376,9 @@ typedef struct
// True if the CE can be used for P2P transactions
NvBool p2p:1;
// True if the CE supports encryption
NvBool secure:1;
// Mask of physical CEs assigned to this LCE
//
// The value returned by RM for this field may change when a GPU is
@@ -620,19 +625,12 @@ typedef struct UvmGpuClientInfo_tag
NvHandle hSmcPartRef;
} UvmGpuClientInfo;
typedef enum
{
UVM_GPU_CONF_COMPUTE_MODE_NONE,
UVM_GPU_CONF_COMPUTE_MODE_APM,
UVM_GPU_CONF_COMPUTE_MODE_HCC,
UVM_GPU_CONF_COMPUTE_MODE_COUNT
} UvmGpuConfComputeMode;
typedef struct UvmGpuConfComputeCaps_tag
{
// Out: GPU's confidential compute mode
UvmGpuConfComputeMode mode;
// Is key rotation enabled for UVM keys
// Out: true if Confidential Computing is enabled on the GPU
NvBool bConfComputingEnabled;
// Out: true if key rotation is enabled (for UVM keys) on the GPU
NvBool bKeyRotationEnabled;
} UvmGpuConfComputeCaps;
@@ -746,6 +744,8 @@ typedef struct UvmGpuInfo_tag
// to NVSwitch peers.
NvU64 nvswitchEgmMemoryWindowStart;
// GPU supports ATS capability
NvBool atsSupport;
} UvmGpuInfo;
typedef struct UvmGpuFbInfo_tag
@@ -759,7 +759,10 @@ typedef struct UvmGpuFbInfo_tag
NvBool bZeroFb; // Zero FB mode enabled.
NvU64 maxVidmemPageSize; // Largest GPU page size to access vidmem.
NvBool bStaticBar1Enabled; // Static BAR1 mode is enabled
NvU64 staticBar1StartOffset; // The start offset of the the static mapping
NvU64 staticBar1Size; // The size of the static mapping
NvU32 heapStart; // The start offset of heap in KB, helpful for MIG
// systems
} UvmGpuFbInfo;
typedef struct UvmGpuEccInfo_tag
@@ -1009,17 +1012,17 @@ typedef struct UvmGpuFaultInfo_tag
NvU32 replayableFaultMask;
// Fault buffer CPU mapping
void* bufferAddress;
//
// When Confidential Computing is disabled, the mapping points to the
// actual HW fault buffer.
//
// When Confidential Computing is enabled, the mapping points to a
// copy of the HW fault buffer. This "shadow buffer" is maintained
// by GSP-RM.
void* bufferAddress;
// Size, in bytes, of the fault buffer pointed by bufferAddress.
NvU32 bufferSize;
// Mapping pointing to the start of the fault buffer metadata containing
// a 16Byte authentication tag and a valid byte. Always NULL when
// Confidential Computing is disabled.
@@ -1105,24 +1108,9 @@ typedef enum
UVM_ACCESS_COUNTER_GRANULARITY_16G = 4,
} UVM_ACCESS_COUNTER_GRANULARITY;
typedef enum
{
UVM_ACCESS_COUNTER_USE_LIMIT_NONE = 1,
UVM_ACCESS_COUNTER_USE_LIMIT_QTR = 2,
UVM_ACCESS_COUNTER_USE_LIMIT_HALF = 3,
UVM_ACCESS_COUNTER_USE_LIMIT_FULL = 4,
} UVM_ACCESS_COUNTER_USE_LIMIT;
typedef struct UvmGpuAccessCntrConfig_tag
{
NvU32 mimcGranularity;
NvU32 momcGranularity;
NvU32 mimcUseLimit;
NvU32 momcUseLimit;
NvU32 granularity;
NvU32 threshold;
} UvmGpuAccessCntrConfig;

View File

@@ -0,0 +1,37 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_I2C_H_
#define _NV_I2C_H_
#define NV_I2C_MSG_WR 0x0000
#define NV_I2C_MSG_RD 0x0001
typedef struct nv_i2c_msg_s
{
NvU16 addr;
NvU16 flags;
NvU16 len;
NvU8* buf;
} nv_i2c_msg_t;
#endif

View File

@@ -0,0 +1,96 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/******************************************************************************\
* *
* Description: *
* Accommodates sharing of IMP-related structures between kernel interface *
* files and core RM. *
* *
\******************************************************************************/
#pragma once
#include <nvtypes.h>
#if defined(_MSC_VER)
#pragma warning(disable:4324)
#endif
//
// This file was generated with FINN, an NVIDIA coding tool.
// Source file: nvimpshared.finn
//
//
// There are only a small number of discrete dramclk frequencies available on
// the system. This structure contains IMP-relevant information associated
// with a specific dramclk frequency.
//
typedef struct DRAM_CLK_INSTANCE {
NvU32 dram_clk_freq_khz;
NvU32 mchub_clk_khz;
NvU32 mc_clk_khz;
NvU32 max_iso_bw_kbps;
//
// switch_latency_ns is the maximum time required to switch the dramclk
// frequency to the frequency specified in dram_clk_freq_khz.
//
NvU32 switch_latency_ns;
} DRAM_CLK_INSTANCE;
//
// This table is used to collect information from other modules that is needed
// for RM IMP calculations. (Used on Tegra only.)
//
typedef struct TEGRA_IMP_IMPORT_DATA {
//
// max_iso_bw_kbps stores the maximum possible ISO bandwidth available to
// display, assuming display is the only active ISO client. (Note that ISO
// bandwidth will typically be allocated to multiple clients, so display
// will generally not have access to the maximum possible bandwidth.)
//
NvU32 max_iso_bw_kbps;
// On Orin, each dram channel is 16 bits wide.
NvU32 num_dram_channels;
//
// dram_clk_instance stores entries for all possible dramclk frequencies,
// sorted by dramclk frequency in increasing order.
//
// "24" is expected to be larger than the actual number of required entries
// (which is provided by a BPMP API), but it can be increased if necessary.
//
// num_dram_clk_entries is filled in with the actual number of distinct
// dramclk entries.
//
NvU32 num_dram_clk_entries;
DRAM_CLK_INSTANCE dram_clk_instance[24];
} TEGRA_IMP_IMPORT_DATA;

View File

@@ -640,22 +640,28 @@ enum NvKmsInputColorRange {
* If DEFAULT is provided, driver will assume full range for RGB formats
* and limited range for YUV formats.
*/
NVKMS_INPUT_COLORRANGE_DEFAULT = 0,
NVKMS_INPUT_COLOR_RANGE_DEFAULT = 0,
NVKMS_INPUT_COLORRANGE_LIMITED = 1,
NVKMS_INPUT_COLOR_RANGE_LIMITED = 1,
NVKMS_INPUT_COLORRANGE_FULL = 2,
NVKMS_INPUT_COLOR_RANGE_FULL = 2,
};
enum NvKmsInputColorSpace {
/* Unknown colorspace; no de-gamma will be applied */
NVKMS_INPUT_COLORSPACE_NONE = 0,
/* Unknown colorspace */
NVKMS_INPUT_COLOR_SPACE_NONE = 0,
/* Linear, Rec.709 [-0.5, 7.5) */
NVKMS_INPUT_COLORSPACE_SCRGB_LINEAR = 1,
NVKMS_INPUT_COLOR_SPACE_BT601 = 1,
NVKMS_INPUT_COLOR_SPACE_BT709 = 2,
NVKMS_INPUT_COLOR_SPACE_BT2020 = 3,
NVKMS_INPUT_COLOR_SPACE_BT2100 = NVKMS_INPUT_COLOR_SPACE_BT2020,
/* PQ, Rec.2020 unity */
NVKMS_INPUT_COLORSPACE_BT2100_PQ = 2,
NVKMS_INPUT_COLOR_SPACE_SCRGB = 4
};
enum NvKmsInputTf {
NVKMS_INPUT_TF_LINEAR = 0,
NVKMS_INPUT_TF_PQ = 1
};
enum NvKmsOutputColorimetry {

View File

@@ -24,8 +24,10 @@
#if !defined(__NVKMS_KAPI_H__)
#include "nvtypes.h"
#include "nv_mig_types.h"
#include "nv-gpu-info.h"
#include "nv_dpy_id.h"
#include "nvkms-api-types.h"
#include "nvkms-format.h"
@@ -173,12 +175,18 @@ struct NvKmsKapiDeviceResourcesInfo {
NvBool supportsSyncpts;
NvBool requiresVrrSemaphores;
NvBool supportsInputColorRange;
NvBool supportsInputColorSpace;
} caps;
NvU64 supportedSurfaceMemoryFormats[NVKMS_KAPI_LAYER_MAX];
NvBool supportsICtCp[NVKMS_KAPI_LAYER_MAX];
struct NvKmsKapiLutCaps lutCaps;
NvU64 vtFbBaseAddress;
NvU64 vtFbSize;
};
#define NVKMS_KAPI_LAYER_MASK(layerType) (1 << (layerType))
@@ -204,6 +212,7 @@ struct NvKmsKapiConnectorInfo {
NvU32 numIncompatibleConnectors;
NvKmsKapiConnector incompatibleConnectorHandles[NVKMS_KAPI_MAX_CONNECTORS];
NVDpyIdList dynamicDpyIdList;
};
struct NvKmsKapiStaticDisplayInfo {
@@ -222,6 +231,8 @@ struct NvKmsKapiStaticDisplayInfo {
NvKmsKapiDisplay possibleCloneHandles[NVKMS_KAPI_MAX_CLONE_DISPLAYS];
NvU32 headMask;
NvBool isDpMST;
};
struct NvKmsKapiSyncParams {
@@ -260,7 +271,8 @@ struct NvKmsKapiLayerConfig {
NvBool enabled;
} hdrMetadata;
enum NvKmsOutputTf tf;
enum NvKmsInputTf inputTf;
enum NvKmsOutputTf outputTf;
NvU8 minPresentInterval;
NvBool tearing;
@@ -272,6 +284,7 @@ struct NvKmsKapiLayerConfig {
NvU16 dstWidth, dstHeight;
enum NvKmsInputColorSpace inputColorSpace;
enum NvKmsInputColorRange inputColorRange;
struct {
NvBool enabled;
@@ -315,7 +328,10 @@ struct NvKmsKapiLayerRequestedConfig {
NvBool dstXYChanged : 1;
NvBool dstWHChanged : 1;
NvBool cscChanged : 1;
NvBool tfChanged : 1;
NvBool inputTfChanged : 1;
NvBool outputTfChanged : 1;
NvBool inputColorSpaceChanged : 1;
NvBool inputColorRangeChanged : 1;
NvBool hdrMetadataChanged : 1;
NvBool matrixOverridesChanged : 1;
NvBool ilutChanged : 1;
@@ -481,6 +497,8 @@ struct NvKmsKapiEvent {
struct NvKmsKapiAllocateDeviceParams {
/* [IN] GPU ID obtained from enumerateGpus() */
NvU32 gpuId;
/* [IN] MIG device if requested */
MIGDeviceId migDevice;
/* [IN] Private data of device allocator */
void *privateData;
@@ -544,6 +562,9 @@ struct NvKmsKapiCreateSurfaceParams {
* explicit_layout is NV_TRUE and layout is
* NvKmsSurfaceMemoryLayoutBlockLinear */
NvU8 log2GobsPerBlockY;
/* [IN] Whether a surface can be updated directly on the screen */
NvBool noDisplayCaching;
};
enum NvKmsKapiAllocationType {
@@ -560,6 +581,11 @@ typedef enum NvKmsKapiRegisterWaiterResultRec {
typedef void NvKmsKapiSuspendResumeCallbackFunc(NvBool suspend);
struct NvKmsKapiGpuInfo {
nv_gpu_info_t gpuInfo;
MIGDeviceId migDevice;
};
struct NvKmsKapiFunctionsTable {
/*!
@@ -583,7 +609,7 @@ struct NvKmsKapiFunctionsTable {
*
* \return Count of enumerated gpus.
*/
NvU32 (*enumerateGpus)(nv_gpu_info_t *gpuInfo);
NvU32 (*enumerateGpus)(struct NvKmsKapiGpuInfo *kapiGpuInfo);
/*!
* Allocate an NVK device using which you can query/allocate resources on
@@ -1011,6 +1037,17 @@ struct NvKmsKapiFunctionsTable {
const void *pLinearAddress
);
/*!
* Check if memory object allocated is video memory.
*
* \param [in] memory Memory allocated using allocateMemory()
*
* \return NV_TRUE if memory is vidmem, NV_FALSE otherwise.
*/
NvBool (*isVidmem)(
const struct NvKmsKapiMemory *memory
);
/*!
* Create a formatted surface from an NvKmsKapiMemory object.
*
@@ -1545,6 +1582,26 @@ struct NvKmsKapiFunctionsTable {
NvS32 index
);
/*!
* Check or wait on a head's LUT notifier.
*
* \param [in] device A device allocated using allocateDevice().
*
* \param [in] head The head to check for LUT completion.
*
* \param [in] waitForCompletion If true, wait for the notifier in NvKms
* before returning.
*
* \param [out] complete Returns whether the notifier has completed.
*/
NvBool
(*checkLutNotifier)
(
struct NvKmsKapiDevice *device,
NvU32 head,
NvBool waitForCompletion
);
/*
* Notify NVKMS that the system's framebuffer console has been disabled and
* the reserved allocation for the old framebuffer console can be unmapped.

View File

@@ -33,38 +33,18 @@ extern "C" {
#include "nvtypes.h"
#if !defined(NVIDIA_UNDEF_LEGACY_BIT_MACROS)
//
// Miscellaneous macros useful for bit field manipulations
//
// STUPID HACK FOR CL 19434692. Will revert when fix CL is delivered bfm -> chips_a.
#ifndef BIT
#define BIT(b) (1U<<(b))
#endif
#ifndef BIT32
#define BIT32(b) ((NvU32)1U<<(b))
#endif
#ifndef BIT64
#define BIT64(b) ((NvU64)1U<<(b))
#endif
#endif
//
// It is recommended to use the following bit macros to avoid macro name
// collisions with other src code bases.
//
// Miscellaneous macros useful for bit field manipulations.
#ifndef NVBIT
#define NVBIT(b) (1U<<(b))
#define NVBIT(b) (1U<<(b))
#endif
#ifndef NVBIT_TYPE
#define NVBIT_TYPE(b, t) (((t)1U)<<(b))
#define NVBIT_TYPE(b, t) (((t)1U)<<(b))
#endif
#ifndef NVBIT32
#define NVBIT32(b) NVBIT_TYPE(b, NvU32)
#define NVBIT32(b) NVBIT_TYPE(b, NvU32)
#endif
#ifndef NVBIT64
#define NVBIT64(b) NVBIT_TYPE(b, NvU64)
#define NVBIT64(b) NVBIT_TYPE(b, NvU64)
#endif
//Concatenate 2 32bit values to a 64bit value
@@ -72,7 +52,7 @@ extern "C" {
// Helper macro's for 32 bit bitmasks
#define NV_BITMASK32_ELEMENT_SIZE (sizeof(NvU32) << 3)
#define NV_BITMASK32_IDX(chId) (((chId) & ~(0x1F)) >> 5)
#define NV_BITMASK32_IDX(chId) (((chId) & ~(0x1F)) >> 5)
#define NV_BITMASK32_OFFSET(chId) ((chId) & (0x1F))
#define NV_BITMASK32_SET(pChannelMask, chId) \
(pChannelMask)[NV_BITMASK32_IDX(chId)] |= NVBIT(NV_BITMASK32_OFFSET(chId))
@@ -721,11 +701,6 @@ nvPrevPow2_U64(const NvU64 x )
} \
}
//
// Bug 4851259: Newly added functions must be hidden from certain HS-signed
// ucode compilers to avoid signature mismatch.
//
#ifndef NVDEC_1_0
/*!
* Returns the position of nth set bit in the given mask.
*
@@ -755,8 +730,6 @@ nvGetNthSetBitIndex32(NvU32 mask, NvU32 n)
return -1;
}
#endif // NVDEC_1_0
//
// Size to use when declaring variable-sized arrays
//
@@ -800,12 +773,15 @@ nvGetNthSetBitIndex32(NvU32 mask, NvU32 n)
// Returns the offset (in bytes) of 'member' in struct 'type'.
#ifndef NV_OFFSETOF
#if defined(__GNUC__) && (__GNUC__ > 3)
#define NV_OFFSETOF(type, member) ((NvU32)__builtin_offsetof(type, member))
#define NV_OFFSETOF(type, member) ((NvUPtr) __builtin_offsetof(type, member))
#else
#define NV_OFFSETOF(type, member) ((NvU32)(NvU64)&(((type *)0)->member)) // shouldn't we use PtrToUlong? But will need to include windows header.
#define NV_OFFSETOF(type, member) ((NvUPtr) &(((type *)0)->member))
#endif
#endif
// Given a pointer and the member it is of the parent struct, return a pointer to the parent struct
#define NV_CONTAINEROF(ptr, type, member) ((type *) (((NvUPtr) ptr) - NV_OFFSETOF(type, member)))
//
// Performs a rounded division of b into a (unsigned). For SIGNED version of
// NV_ROUNDED_DIV() macro check the comments in bug 769777.
@@ -990,6 +966,22 @@ static NV_FORCEINLINE void *NV_NVUPTR_TO_PTR(NvUPtr address)
// Get the number of elements the specified fixed-size array
#define NV_ARRAY_ELEMENTS(x) ((sizeof(x)/sizeof((x)[0])))
#if !defined(NVIDIA_UNDEF_LEGACY_BIT_MACROS)
//
// Deprecated macros whose definition can be removed once the code base no longer references them.
// Use the NVBIT* macros instead of these macros.
//
#ifndef BIT
#define BIT(b) (1U<<(b))
#endif
#ifndef BIT32
#define BIT32(b) ((NvU32)1U<<(b))
#endif
#ifndef BIT64
#define BIT64(b) ((NvU64)1U<<(b))
#endif
#endif
#ifdef __cplusplus
}
#endif //__cplusplus

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2014-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -155,6 +155,15 @@ NV_STATUS_CODE(NV_ERR_KEY_ROTATION_IN_PROGRESS, 0x0000007D, "Operation no
NV_STATUS_CODE(NV_ERR_TEST_ONLY_CODE_NOT_ENABLED, 0x0000007E, "Test-only code path not enabled")
NV_STATUS_CODE(NV_ERR_SECURE_BOOT_FAILED, 0x0000007F, "GFW secure boot failed")
NV_STATUS_CODE(NV_ERR_INSUFFICIENT_ZBC_ENTRY, 0x00000080, "No more ZBC entry for the client")
NV_STATUS_CODE(NV_ERR_NVLINK_FABRIC_NOT_READY, 0x00000081, "Nvlink Fabric Status or Fabric Probe is not yet complete, caller needs to retry")
NV_STATUS_CODE(NV_ERR_NVLINK_FABRIC_FAILURE, 0x00000082, "Nvlink Fabric Probe failed")
NV_STATUS_CODE(NV_ERR_GPU_MEMORY_ONLINING_FAILURE, 0x00000083, "GPU Memory Onlining failed")
NV_STATUS_CODE(NV_ERR_REDUCTION_MANAGER_NOT_AVAILABLE, 0x00000084, "Reduction Manager is not available")
NV_STATUS_CODE(NV_ERR_THRESHOLD_CROSSED, 0x00000085, "A fatal threshold has been crossed")
NV_STATUS_CODE(NV_ERR_RESOURCE_RETIREMENT_ERROR, 0x00000086, "An error occurred while trying to retire a resource")
NV_STATUS_CODE(NV_ERR_FABRIC_STATE_OUT_OF_SYNC, 0x00000087, "NVLink fabric state cached by the driver is out of sync")
NV_STATUS_CODE(NV_ERR_BUFFER_FULL, 0x00000088, "Buffer is full")
NV_STATUS_CODE(NV_ERR_BUFFER_EMPTY, 0x00000089, "Buffer is empty")
// Warnings:
NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch")
@@ -165,5 +174,6 @@ NV_STATUS_CODE(NV_WARN_MORE_PROCESSING_REQUIRED, 0x00010005, "WARNING More
NV_STATUS_CODE(NV_WARN_NOTHING_TO_DO, 0x00010006, "WARNING Nothing to do")
NV_STATUS_CODE(NV_WARN_NULL_OBJECT, 0x00010007, "WARNING NULL object found")
NV_STATUS_CODE(NV_WARN_OUT_OF_RANGE, 0x00010008, "WARNING value out of range")
NV_STATUS_CODE(NV_WARN_THRESHOLD_CROSSED, 0x00010009, "WARNING Threshold has been crossed")
#endif /* SDK_NVSTATUSCODES_H */

View File

@@ -170,7 +170,7 @@ NvU32 NV_API_CALL os_get_grid_csp_support (void);
void NV_API_CALL os_bug_check (NvU32, const char *);
NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32);
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **);
NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *);
NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *, NvU32);
NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *);
NV_STATUS NV_API_CALL os_get_euid (NvU32 *);
NV_STATUS NV_API_CALL os_get_smbios_header (NvU64 *pSmbsAddr);
@@ -178,6 +178,7 @@ NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi (NvU32 *);
void NV_API_CALL os_add_record_for_crashLog (void *, NvU32);
void NV_API_CALL os_delete_record_for_crashLog (void *);
NV_STATUS NV_API_CALL os_call_vgpu_vfio (void *, NvU32);
NV_STATUS NV_API_CALL os_device_vm_present (void);
NV_STATUS NV_API_CALL os_numa_memblock_size (NvU64 *);
NV_STATUS NV_API_CALL os_alloc_pages_node (NvS32, NvU32, NvU32, NvU64 *);
NV_STATUS NV_API_CALL os_get_page (NvU64 address);
@@ -213,6 +214,7 @@ enum os_pci_req_atomics_type {
OS_INTF_PCIE_REQ_ATOMICS_128BIT
};
NV_STATUS NV_API_CALL os_enable_pci_req_atomics (void *, enum os_pci_req_atomics_type);
void NV_API_CALL os_pci_trigger_flr(void *handle);
NV_STATUS NV_API_CALL os_get_numa_node_memory_usage (NvS32, NvU64 *, NvU64 *);
NV_STATUS NV_API_CALL os_numa_add_gpu_memory (void *, NvU64, NvU64, NvU32 *);
NV_STATUS NV_API_CALL os_numa_remove_gpu_memory (void *, NvU64, NvU64, NvU32);
@@ -220,12 +222,14 @@ NV_STATUS NV_API_CALL os_offline_page_at_address(NvU64 address);
void* NV_API_CALL os_get_pid_info(void);
void NV_API_CALL os_put_pid_info(void *pid_info);
NV_STATUS NV_API_CALL os_find_ns_pid(void *pid_info, NvU32 *ns_pid);
NvBool NV_API_CALL os_is_init_ns(void);
extern NvU32 os_page_size;
extern NvU64 os_page_mask;
extern NvU8 os_page_shift;
extern NvBool os_cc_enabled;
extern NvBool os_cc_sev_snp_enabled;
extern NvBool os_cc_sme_enabled;
extern NvBool os_cc_snp_vtom_enabled;
extern NvBool os_cc_tdx_enabled;
extern NvBool os_dma_buf_enabled;

View File

@@ -0,0 +1,387 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _OS_DSI_PANEL_PARAMS_H_
#define _OS_DSI_PANEL_PARAMS_H_
#define DSI_GENERIC_LONG_WRITE 0x29
#define DSI_DCS_LONG_WRITE 0x39
#define DSI_GENERIC_SHORT_WRITE_1_PARAMS 0x13
#define DSI_GENERIC_SHORT_WRITE_2_PARAMS 0x23
#define DSI_DCS_WRITE_0_PARAM 0x05
#define DSI_DCS_WRITE_1_PARAM 0x15
#define DSI_DCS_READ_PARAM 0x06
#define DSI_DCS_COMPRESSION_MODE 0x07
#define DSI_DCS_PPS_LONG_WRITE 0x0A
#define DSI_DCS_SET_ADDR_MODE 0x36
#define DSI_DCS_EXIT_SLEEP_MODE 0x11
#define DSI_DCS_ENTER_SLEEP_MODE 0x10
#define DSI_DCS_SET_DISPLAY_ON 0x29
#define DSI_DCS_SET_DISPLAY_OFF 0x28
#define DSI_DCS_SET_TEARING_EFFECT_OFF 0x34
#define DSI_DCS_SET_TEARING_EFFECT_ON 0x35
#define DSI_DCS_NO_OP 0x0
#define DSI_NULL_PKT_NO_DATA 0x9
#define DSI_BLANKING_PKT_NO_DATA 0x19
#define DSI_DCS_SET_COMPRESSION_METHOD 0xC0
/* DCS commands for command mode */
#define DSI_ENTER_PARTIAL_MODE 0x12
#define DSI_SET_PIXEL_FORMAT 0x3A
#define DSI_AREA_COLOR_MODE 0x4C
#define DSI_SET_PARTIAL_AREA 0x30
#define DSI_SET_PAGE_ADDRESS 0x2B
#define DSI_SET_ADDRESS_MODE 0x36
#define DSI_SET_COLUMN_ADDRESS 0x2A
#define DSI_WRITE_MEMORY_START 0x2C
#define DSI_WRITE_MEMORY_CONTINUE 0x3C
#define PKT_ID0(id) ((((id) & 0x3f) << 3) | \
(((DSI_ENABLE) & 0x1) << 9))
#define PKT_LEN0(len) (((len) & 0x7) << 0)
#define PKT_ID1(id) ((((id) & 0x3f) << 13) | \
(((DSI_ENABLE) & 0x1) << 19))
#define PKT_LEN1(len) (((len) & 0x7) << 10)
#define PKT_ID2(id) ((((id) & 0x3f) << 23) | \
(((DSI_ENABLE) & 0x1) << 29))
#define PKT_LEN2(len) (((len) & 0x7) << 20)
#define PKT_ID3(id) ((((id) & 0x3f) << 3) | \
(((DSI_ENABLE) & 0x1) << 9))
#define PKT_LEN3(len) (((len) & 0x7) << 0)
#define PKT_ID4(id) ((((id) & 0x3f) << 13) | \
(((DSI_ENABLE) & 0x1) << 19))
#define PKT_LEN4(len) (((len) & 0x7) << 10)
#define PKT_ID5(id) ((((id) & 0x3f) << 23) | \
(((DSI_ENABLE) & 0x1) << 29))
#define PKT_LEN5(len) (((len) & 0x7) << 20)
#define PKT_LP (((DSI_ENABLE) & 0x1) << 30)
#define NUMOF_PKT_SEQ 12
/* DSI pixel data format, enum values should match with dt-bindings in tegra-panel.h */
typedef enum
{
DSI_PIXEL_FORMAT_16BIT_P,
DSI_PIXEL_FORMAT_18BIT_P,
DSI_PIXEL_FORMAT_18BIT_NP,
DSI_PIXEL_FORMAT_24BIT_P,
DSI_PIXEL_FORMAT_8BIT_DSC,
DSI_PIXEL_FORMAT_12BIT_DSC,
DSI_PIXEL_FORMAT_16BIT_DSC,
DSI_PIXEL_FORMAT_10BIT_DSC,
DSI_PIXEL_FORMAT_30BIT_P,
DSI_PIXEL_FORMAT_36BIT_P,
} DSIPIXELFORMAT;
/* DSI virtual channel number */
typedef enum
{
DSI_VIRTUAL_CHANNEL_0,
DSI_VIRTUAL_CHANNEL_1,
DSI_VIRTUAL_CHANNEL_2,
DSI_VIRTUAL_CHANNEL_3,
} DSIVIRTUALCHANNEL;
/* DSI transmit method for video data */
typedef enum
{
DSI_VIDEO_TYPE_VIDEO_MODE,
DSI_VIDEO_TYPE_COMMAND_MODE,
} DSIVIDEODATAMODE;
/* DSI HS clock mode */
typedef enum
{
DSI_VIDEO_CLOCK_CONTINUOUS,
DSI_VIDEO_CLOCK_TX_ONLY,
} DSICLOCKMODE;
/* DSI burst mode setting in video mode. Each mode is assigned with a
* fixed value. The rationale behind this is to avoid change of these
* values, since the calculation of dsi clock depends on them. */
typedef enum
{
DSI_VIDEO_NON_BURST_MODE = 0,
DSI_VIDEO_NON_BURST_MODE_WITH_SYNC_END = 1,
DSI_VIDEO_BURST_MODE_LOWEST_SPEED = 2,
DSI_VIDEO_BURST_MODE_LOW_SPEED = 3,
DSI_VIDEO_BURST_MODE_MEDIUM_SPEED = 4,
DSI_VIDEO_BURST_MODE_FAST_SPEED = 5,
DSI_VIDEO_BURST_MODE_FASTEST_SPEED = 6,
} DSIVIDEOBURSTMODE;
/* DSI Ganged Mode */
typedef enum
{
DSI_GANGED_SYMMETRIC_LEFT_RIGHT = 1,
DSI_GANGED_SYMMETRIC_EVEN_ODD = 2,
DSI_GANGED_SYMMETRIC_LEFT_RIGHT_OVERLAP = 3,
} DSIGANGEDTYPE;
typedef enum
{
DSI_LINK0,
DSI_LINK1,
} DSILINKNUM;
/* DSI Command Packet type */
typedef enum
{
DSI_PACKET_CMD,
DSI_DELAY_MS,
DSI_GPIO_SET,
DSI_SEND_FRAME,
DSI_PACKET_VIDEO_VBLANK_CMD,
DSI_DELAY_US,
} DSICMDPKTTYPE;
/* DSI Phy type */
typedef enum
{
DSI_DPHY,
DSI_CPHY,
} DSIPHYTYPE;
enum {
DSI_GPIO_LCD_RESET,
DSI_GPIO_PANEL_EN,
DSI_GPIO_PANEL_EN_1,
DSI_GPIO_BL_ENABLE,
DSI_GPIO_BL_PWM,
DSI_GPIO_AVDD_AVEE_EN,
DSI_GPIO_VDD_1V8_LCD_EN,
DSI_GPIO_TE,
DSI_GPIO_BRIDGE_EN_0,
DSI_GPIO_BRIDGE_EN_1,
DSI_GPIO_BRIDGE_REFCLK_EN,
DSI_N_GPIO_PANEL, /* add new gpio above this entry */
};
enum
{
DSI_DISABLE,
DSI_ENABLE,
};
typedef struct
{
NvU8 cmd_type;
NvU8 data_id;
union
{
NvU16 data_len;
NvU16 delay_ms;
NvU16 delay_us;
NvU32 gpio;
NvU16 frame_cnt;
struct
{
NvU8 data0;
NvU8 data1;
} sp;
} sp_len_dly;
NvU32 *pdata;
NvU8 link_id;
NvBool club_cmd;
} DSI_CMD, *PDSICMD;
typedef struct
{
NvU16 t_hsdexit_ns;
NvU16 t_hstrail_ns;
NvU16 t_datzero_ns;
NvU16 t_hsprepare_ns;
NvU16 t_hsprebegin_ns;
NvU16 t_hspost_ns;
NvU16 t_clktrail_ns;
NvU16 t_clkpost_ns;
NvU16 t_clkzero_ns;
NvU16 t_tlpx_ns;
NvU16 t_clkprepare_ns;
NvU16 t_clkpre_ns;
NvU16 t_wakeup_ns;
NvU16 t_taget_ns;
NvU16 t_tasure_ns;
NvU16 t_tago_ns;
} DSI_PHY_TIMING_IN_NS;
typedef struct
{
NvU32 hActive;
NvU32 vActive;
NvU32 hFrontPorch;
NvU32 vFrontPorch;
NvU32 hBackPorch;
NvU32 vBackPorch;
NvU32 hSyncWidth;
NvU32 vSyncWidth;
NvU32 hPulsePolarity;
NvU32 vPulsePolarity;
NvU32 pixelClkRate;
} DSITIMINGS, *PDSITIMINGS;
typedef struct
{
NvU8 n_data_lanes; /* required */
NvU8 pixel_format; /* required */
NvU8 refresh_rate; /* required */
NvU8 rated_refresh_rate;
NvU8 panel_reset; /* required */
NvU8 virtual_channel; /* required */
NvU8 dsi_instance;
NvU16 dsi_panel_rst_gpio;
NvU16 dsi_panel_bl_en_gpio;
NvU16 dsi_panel_bl_pwm_gpio;
NvU16 even_odd_split_width;
NvU8 controller_vs;
NvBool panel_has_frame_buffer; /* required*/
/* Deprecated. Use DSI_SEND_FRAME panel command instead. */
NvBool panel_send_dc_frames;
DSI_CMD *dsi_init_cmd; /* required */
NvU16 n_init_cmd; /* required */
NvU32 *dsi_init_cmd_array;
NvU32 init_cmd_array_size;
NvBool sendInitCmdsEarly;
DSI_CMD *dsi_early_suspend_cmd;
NvU16 n_early_suspend_cmd;
NvU32 *dsi_early_suspend_cmd_array;
NvU32 early_suspend_cmd_array_size;
DSI_CMD *dsi_late_resume_cmd;
NvU16 n_late_resume_cmd;
NvU32 *dsi_late_resume_cmd_array;
NvU32 late_resume_cmd_array_size;
DSI_CMD *dsi_postvideo_cmd;
NvU16 n_postvideo_cmd;
NvU32 *dsi_postvideo_cmd_array;
NvU32 postvideo_cmd_array_size;
DSI_CMD *dsi_suspend_cmd; /* required */
NvU16 n_suspend_cmd; /* required */
NvU32 *dsi_suspend_cmd_array;
NvU32 suspend_cmd_array_size;
NvU8 video_data_type; /* required */
NvU8 video_clock_mode;
NvU8 video_burst_mode;
NvU8 ganged_type;
NvU16 ganged_overlap;
NvBool ganged_swap_links;
NvBool ganged_write_to_all_links;
NvU8 split_link_type;
NvU8 suspend_aggr;
NvU16 panel_buffer_size_byte;
NvU16 panel_reset_timeout_msec;
NvBool hs_cmd_mode_supported;
NvBool hs_cmd_mode_on_blank_supported;
NvBool enable_hs_clock_on_lp_cmd_mode;
NvBool no_pkt_seq_eot; /* 1st generation panel may not
* support eot. Don't set it for
* most panels.*/
const NvU32 *pktSeq;
NvU32 *pktSeq_array;
NvU32 pktSeq_array_size;
NvBool skip_dsi_pkt_header;
NvBool power_saving_suspend;
NvBool suspend_stop_stream_late;
NvBool dsi2lvds_bridge_enable;
NvBool dsi2edp_bridge_enable;
NvU32 max_panel_freq_khz;
NvU32 lp_cmd_mode_freq_khz;
NvU32 lp_read_cmd_mode_freq_khz;
NvU32 hs_clk_in_lp_cmd_mode_freq_khz;
NvU32 burst_mode_freq_khz;
NvU32 fpga_freq_khz;
NvU32 te_gpio;
NvBool te_polarity_low;
NvBool dsiEnVRR;
NvBool dsiVrrPanelSupportsTe;
NvBool dsiForceSetTePin;
int panel_gpio[DSI_N_GPIO_PANEL];
NvBool panel_gpio_populated;
NvU32 dpd_dsi_pads;
DSI_PHY_TIMING_IN_NS phyTimingNs;
NvU8 *bl_name;
NvBool lp00_pre_panel_wakeup;
NvBool ulpm_not_supported;
NvBool use_video_host_fifo_for_cmd;
NvBool dsi_csi_loopback;
NvBool set_max_timeout;
NvBool use_legacy_dphy_core;
// Swap P/N pins polarity of all data lanes
NvBool swap_data_lane_polarity;
// Swap P/N pins polarity of clock lane
NvBool swap_clock_lane_polarity;
// Reverse clock polarity for partition A/B. 1st SOT bit goes on negedge of Clock lane
NvBool reverse_clock_polarity;
// DSI Lane Crossbar. Allocating xbar array for max number of lanes
NvBool lane_xbar_exists;
NvU32 lane_xbar_ctrl[8];
NvU32 refresh_rate_adj;
NvU8 dsiPhyType;
NvBool en_data_scrambling;
NvU32 dsipll_vco_rate_hz;
NvU32 dsipll_clkoutpn_rate_hz;
NvU32 dsipll_clkouta_rate_hz;
NvU32 vpll0_rate_hz;
DSITIMINGS dsiTimings;
// DSC Parameters
NvBool dsiDscEnable;
NvU32 dsiDscBpp;
NvU32 dsiDscNumSlices;
NvU32 dsiDscSliceWidth;
NvU32 dsiDscSliceHeight;
NvBool dsiDscEnBlockPrediction;
NvBool dsiDscEnDualDsc;
NvU32 dsiDscDecoderMajorVersion;
NvU32 dsiDscDecoderMinorVersion;
NvBool dsiDscUseCustomPPS;
NvU32 dsiDscCustomPPSData[32];
// Driver allocates memory for PPS cmd to be sent to Panel
NvBool ppsCmdMemAllocated;
} DSI_PANEL_INFO;
#endif

View File

@@ -0,0 +1,32 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _OS_GPIO_H_
#define _OS_GPIO_H_
typedef enum
{
NV_OS_GPIO_FUNC_HOTPLUG_A,
NV_OS_GPIO_FUNC_HOTPLUG_B,
} NV_OS_GPIO_FUNC_NAMES;
#endif

View File

@@ -81,9 +81,9 @@ NV_STATUS NV_API_CALL rm_gpu_ops_has_pending_non_replayable_faults(nvidia_stack
NV_STATUS NV_API_CALL rm_gpu_ops_init_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t, NvU32);
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_own_access_cntr_intr(nvidia_stack_t *, nvgpuSessionHandle_t, nvgpuAccessCntrInfo_t, NvBool);
NV_STATUS NV_API_CALL rm_gpu_ops_enable_access_cntr(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t, nvgpuAccessCntrConfig_t);
NV_STATUS NV_API_CALL rm_gpu_ops_enable_access_cntr(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t, const nvgpuAccessCntrConfig_t *);
NV_STATUS NV_API_CALL rm_gpu_ops_disable_access_cntr(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_set_page_directory (nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, unsigned, NvBool, NvU32);
NV_STATUS NV_API_CALL rm_gpu_ops_set_page_directory (nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, unsigned, NvBool, NvU32, NvU64 *);
NV_STATUS NV_API_CALL rm_gpu_ops_unset_page_directory (nvidia_stack_t *, nvgpuAddressSpaceHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_get_nvlink_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuNvlinkInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_p2p_object_create(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuDeviceHandle_t, NvHandle *);

View File

@@ -25,6 +25,7 @@ fi
# VGX_KVM_BUILD parameter defined only vGPU builds on KVM hypervisor
# GRID_BUILD parameter defined only for GRID builds (GRID Guest driver)
# GRID_BUILD_CSP parameter defined only for GRID CSP builds (GRID Guest driver for CSPs)
# VGX_DEVICE_VM_BUILD parameter defined only for Device VM VGX build (vGPU Host driver)
test_xen() {
#
@@ -661,27 +662,6 @@ compile_test() {
compile_check_conftest "$CODE" "NV_PCI_GET_DOMAIN_BUS_AND_SLOT_PRESENT" "" "functions"
;;
hash__remap_4k_pfn)
#
# Determine if the hash__remap_4k_pfn() function is
# present.
#
# Added by commit 6cc1a0ee4ce2 ("powerpc/mm/radix: Add radix
# callback for pmd accessors") in v4.7 (committed 2016-04-29).
# Present only in arch/powerpc
#
CODE="
#if defined(NV_ASM_BOOK3S_64_HASH_64K_H_PRESENT)
#include <linux/mm.h>
#include <asm/book3s/64/hash-64k.h>
#endif
void conftest_hash__remap_4k_pfn(void) {
hash__remap_4k_pfn();
}"
compile_check_conftest "$CODE" "NV_HASH__REMAP_4K_PFN_PRESENT" "" "functions"
;;
register_cpu_notifier)
#
# Determine if register_cpu_notifier() is present
@@ -806,6 +786,16 @@ compile_test() {
return
;;
device_vm_build)
# Add config parameter if running on Device VM.
if [ -n "$VGX_DEVICE_VM_BUILD" ]; then
echo "#define NV_DEVICE_VM_BUILD" | append_conftest "generic"
else
echo "#undef NV_DEVICE_VM_BUILD" | append_conftest "generic"
fi
return
;;
vfio_register_notifier)
#
# Check number of arguments required.
@@ -1273,6 +1263,77 @@ compile_test() {
compile_check_conftest "$CODE" "NV_PFN_ADDRESS_SPACE_STRUCT_PRESENT" "" "types"
;;
egm_module_helper_api_present)
#
# Determine if egm management api are present or not.
#
CODE="
#include <linux/pci.h>
#include <linux/nvgrace-egm.h>
void conftest_egm_module_helper_api_present() {
struct pci_dev *pdev;
register_egm_node(pdev);
unregister_egm_node(0);
}
"
compile_check_conftest "$CODE" "NV_EGM_MODULE_HELPER_API_PRESENT" "" "types"
;;
egm_bad_pages_handling_support)
#
# Determine if egm_bad_pages_list is present or not.
#
CODE="
#include <linux/types.h>
#include <linux/egm.h>
void conftest_egm_bad_pages_handle() {
int ioctl = EGM_BAD_PAGES_LIST;
struct egm_bad_pages_list list;
}
"
compile_check_conftest "$CODE" "NV_EGM_BAD_PAGES_HANDLING_SUPPORT" "" "types"
;;
class_create_has_no_owner_arg)
#
# Determine if the class_create API with the new signature
# is present or not.
#
# Added by commit 1aaba11da9aa ("driver core: class: remove
# module * from class_create()") in v6.4 (2023-03-13)
#
CODE="
#include <linux/device/class.h>
void conftest_class_create() {
struct class *class;
class = class_create(\"test\");
}"
compile_check_conftest "$CODE" "NV_CLASS_CREATE_HAS_NO_OWNER_ARG" "" "types"
;;
class_devnode_has_const_arg)
#
# Determine if the class.devnode is present with the new signature.
#
# Added by commit ff62b8e6588f ("driver core: make struct
# class.devnode() take a const *") in v6.2 (2022-11-23)
#
CODE="
#include <linux/device.h>
static char *conftest_devnode(const struct device *device, umode_t *mode) {
return NULL;
}
void conftest_class_devnode() {
struct class class;
class.devnode = conftest_devnode;
}"
compile_check_conftest "$CODE" "NV_CLASS_DEVNODE_HAS_CONST_ARG" "" "types"
;;
pci_irq_vector_helpers)
#
# Determine if pci_alloc_irq_vectors(), pci_free_irq_vectors()
@@ -1551,7 +1612,6 @@ compile_test() {
compile_check_conftest "$CODE" "NV_PHYS_TO_DMA_PRESENT" "" "functions"
;;
dma_attr_macros)
#
# Determine if the NV_DMA_ATTR_SKIP_CPU_SYNC_PRESENT macro present.
@@ -1770,22 +1830,6 @@ compile_test() {
fi
;;
pnv_pci_get_npu_dev)
#
# Determine if the pnv_pci_get_npu_dev function is present.
#
# Added by commit 5d2aa710e697 ("powerpc/powernv: Add support
# for Nvlink NPUs") in v4.5
#
CODE="
#include <linux/pci.h>
void conftest_pnv_pci_get_npu_dev() {
pnv_pci_get_npu_dev();
}"
compile_check_conftest "$CODE" "NV_PNV_PCI_GET_NPU_DEV_PRESENT" "" "functions"
;;
kernel_write_has_pointer_pos_arg)
#
# Determine the pos argument type, which was changed by commit
@@ -2375,6 +2419,45 @@ compile_test() {
compile_check_conftest "$CODE" "NV_DRM_ATOMIC_HELPER_LEGACY_GAMMA_SET_PRESENT" "" "functions"
;;
drm_plane_create_color_properties)
#
# Determine if the function drm_plane_create_color_properties() is
# present.
#
# Added by commit 80f690e9e3a6 ("drm: Add optional COLOR_ENCODING
# and COLOR_RANGE properties to drm_plane") in v4.17 (2018-02-19).
#
CODE="
#include <linux/types.h>
#if defined(NV_DRM_DRM_COLOR_MGMT_H_PRESENT)
#include <drm/drm_color_mgmt.h>
#endif
void conftest_drm_plane_create_color_properties(void) {
drm_plane_create_color_properties();
}"
compile_check_conftest "$CODE" "NV_DRM_PLANE_CREATE_COLOR_PROPERTIES_PRESENT" "" "functions"
;;
drm_format_info_has_is_yuv)
#
# Determine if struct drm_format_info has .is_yuv member.
#
# Added by commit ce2d54619a10 ("drm/fourcc: Add is_yuv field to
# drm_format_info to denote if format is yuv") in v4.19
# (2018-07-17).
#
CODE="
#if defined(NV_DRM_DRM_FOURCC_H_PRESENT)
#include <drm/drm_fourcc.h>
#endif
int conftest_drm_format_info_has_is_yuv(void) {
return offsetof(struct drm_format_info, is_yuv);
}"
compile_check_conftest "$CODE" "NV_DRM_FORMAT_INFO_HAS_IS_YUV" "" "types"
;;
pci_stop_and_remove_bus_device)
#
# Determine if the pci_stop_and_remove_bus_device() function is present.
@@ -3066,6 +3149,21 @@ compile_test() {
compile_check_conftest "$CODE" "NV_FOLL_LONGTERM_PRESENT" "" "types"
;;
has_enum_pidtype_tgid)
# Determine if PIDTYPE_TGID is present in the kernel as an enum
#
# Added by commit 6883f81aac6f ("pid: Implement PIDTYPE_TGID")
# in v4.19
#
CODE="
#include <linux/pid.h>
enum pid_type type = PIDTYPE_TGID;
"
compile_check_conftest "$CODE" "NV_HAS_ENUM_PIDTYPE_TGID" "" "types"
;;
vfio_pin_pages_has_vfio_device_arg)
#
# Determine if vfio_pin_pages() kABI accepts "struct vfio_device *"
@@ -3453,60 +3551,6 @@ compile_test() {
compile_check_conftest "$CODE" "NV_VM_OPS_FAULT_REMOVED_VMA_ARG" "" "types"
;;
pnv_npu2_init_context)
#
# Determine if the pnv_npu2_init_context() function is
# present and the signature of its callback.
#
# Added by commit 1ab66d1fbada ("powerpc/powernv: Introduce
# address translation services for Nvlink2") in v4.12
# (2017-04-03).
#
echo "$CONFTEST_PREAMBLE
#if defined(NV_ASM_POWERNV_H_PRESENT)
#include <linux/pci.h>
#include <asm/powernv.h>
#endif
void conftest_pnv_npu2_init_context(void) {
pnv_npu2_init_context();
}" > conftest$$.c
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
rm -f conftest$$.c
if [ -f conftest$$.o ]; then
echo "#undef NV_PNV_NPU2_INIT_CONTEXT_PRESENT" | append_conftest "functions"
echo "#undef NV_PNV_NPU2_INIT_CONTEXT_CALLBACK_RETURNS_VOID" | append_conftest "functions"
rm -f conftest$$.o
return
fi
echo "#define NV_PNV_NPU2_INIT_CONTEXT_PRESENT" | append_conftest "functions"
# Check the callback signature
echo "$CONFTEST_PREAMBLE
#if defined(NV_ASM_POWERNV_H_PRESENT)
#include <linux/pci.h>
#include <asm/powernv.h>
#endif
struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
unsigned long flags,
void (*cb)(struct npu_context *, void *),
void *priv) {
return NULL;
}" > conftest$$.c
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
rm -f conftest$$.c
if [ -f conftest$$.o ]; then
echo "#define NV_PNV_NPU2_INIT_CONTEXT_CALLBACK_RETURNS_VOID" | append_conftest "functions"
rm -f conftest$$.o
return
fi
echo "#undef NV_PNV_NPU2_INIT_CONTEXT_CALLBACK_RETURNS_VOID" | append_conftest "functions"
;;
of_get_ibm_chip_id)
#
# Determine if the of_get_ibm_chip_id() function is present.
@@ -5223,6 +5267,45 @@ compile_test() {
compile_check_conftest "$CODE" "NV_FOLLOW_PFN_PRESENT" "" "functions"
;;
follow_pte_arg_vma)
#
# Determine if the first argument of follow_pte is
# mm_struct or vm_area_struct.
#
# The first argument was changed from mm_struct to vm_area_struct by
# commit 29ae7d96d166 ("mm: pass VMA instead of MM to follow_pte()")
#
CODE="
#include <linux/mm.h>
typeof(follow_pte) conftest_follow_pte_has_vma_arg;
int conftest_follow_pte_has_vma_arg(struct vm_area_struct *vma,
unsigned long address,
pte_t **ptep,
spinlock_t **ptl) {
return 0;
}"
compile_check_conftest "$CODE" "NV_FOLLOW_PTE_ARG1_VMA" "" "types"
;;
ptep_get)
#
# Determine if ptep_get() is present.
#
# ptep_get() was added by commit 481e980a7c19
# ("mm: Allow arches to provide ptep_get()")
#
CODE="
#include <linux/mm.h>
void conftest_ptep_get(void) {
ptep_get();
}"
compile_check_conftest "$CODE" "NV_PTEP_GET_PRESENT" "" "functions"
;;
drm_plane_atomic_check_has_atomic_state_arg)
#
# Determine if drm_plane_helper_funcs::atomic_check takes 'state'
@@ -5412,6 +5495,31 @@ compile_test() {
fi
;;
of_property_for_each_u32_has_internal_args)
#
# Determine if the internal arguments for the macro
# of_property_for_each_u32() are present.
#
# Commit 9722c3b66e21 ("of: remove internal arguments from
# of_property_for_each_u32()") removes two arguments from
# of_property_for_each_u32() which are used internally within
# the macro and so do not need to be passed. This change was
# made for Linux v6.11.
#
CODE="
#include <linux/of.h>
void conftest_of_property_for_each_u32(struct device_node *np,
char *propname) {
struct property *iparam1;
const __be32 *iparam2;
u32 val;
of_property_for_each_u32(np, propname, iparam1, iparam2, val);
}"
compile_check_conftest "$CODE" "NV_OF_PROPERTY_FOR_EACH_U32_HAS_INTERNAL_ARGS" "" "types"
;;
of_property_read_variable_u8_array)
#
# Determine if of_property_read_variable_u8_array is present
@@ -5508,8 +5616,8 @@ compile_test() {
of_dma_configure)
#
# Determine if of_dma_configure() function is present, and how
# many arguments it takes.
# Determine if of_dma_configure() function is present, if it
# returns int, and how many arguments it takes.
#
# Added by commit 591c1ee465ce ("of: configure the platform
# device dma parameters") in v3.16. However, it was a static,
@@ -5519,6 +5627,10 @@ compile_test() {
# commit 1f5c69aa51f9 ("of: Move of_dma_configure() to device.c
# to help re-use") in v4.1.
#
# Its return type was changed from void to int by commit
# 7b07cbefb68d ("iommu: of: Handle IOMMU lookup failure with
# deferred probing or error") in v4.12.
#
# It subsequently began taking a third parameter with commit
# 3d6ce86ee794 ("drivers: remove force dma flag from buses")
# in v4.18.
@@ -5543,6 +5655,7 @@ compile_test() {
echo "#undef NV_OF_DMA_CONFIGURE_PRESENT" | append_conftest "functions"
echo "#undef NV_OF_DMA_CONFIGURE_ARGUMENT_COUNT" | append_conftest "functions"
echo "#undef NV_OF_DMA_CONFIGURE_HAS_INT_RETURN_TYPE" | append_conftest "functions"
else
echo "#define NV_OF_DMA_CONFIGURE_PRESENT" | append_conftest "functions"
@@ -5561,6 +5674,26 @@ compile_test() {
if [ -f conftest$$.o ]; then
rm -f conftest$$.o
echo "#define NV_OF_DMA_CONFIGURE_ARGUMENT_COUNT 3" | append_conftest "functions"
echo "$CONFTEST_PREAMBLE
#if defined(NV_LINUX_OF_DEVICE_H_PRESENT)
#include <linux/of_device.h>
#endif
int conftest_of_dma_configure_has_int_return_type(void) {
return of_dma_configure(NULL, NULL, false);
}" > conftest$$.c
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
rm -f conftest$$.c
if [ -f conftest$$.o ]; then
rm -f conftest$$.o
echo "#define NV_OF_DMA_CONFIGURE_HAS_INT_RETURN_TYPE" | append_conftest "functions"
else
echo "#undef NV_OF_DMA_CONFIGURE_HAS_INT_RETURN_TYPE" | append_conftest "functions"
fi
return
fi
@@ -5579,6 +5712,26 @@ compile_test() {
if [ -f conftest$$.o ]; then
rm -f conftest$$.o
echo "#define NV_OF_DMA_CONFIGURE_ARGUMENT_COUNT 2" | append_conftest "functions"
echo "$CONFTEST_PREAMBLE
#if defined(NV_LINUX_OF_DEVICE_H_PRESENT)
#include <linux/of_device.h>
#endif
int conftest_of_dma_configure_has_int_return_type(void) {
return of_dma_configure(NULL, NULL);
}" > conftest$$.c
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
rm -f conftest$$.c
if [ -f conftest$$.o ]; then
rm -f conftest$$.o
echo "#define NV_OF_DMA_CONFIGURE_HAS_INT_RETURN_TYPE" | append_conftest "functions"
else
echo "#undef NV_OF_DMA_CONFIGURE_HAS_INT_RETURN_TYPE" | append_conftest "functions"
fi
return
fi
fi
@@ -5604,6 +5757,26 @@ compile_test() {
compile_check_conftest "$CODE" "NV_ICC_GET_PRESENT" "" "functions"
;;
devm_of_icc_get)
#
# Determine if devm_of_icc_get() function is present
#
# Added by commit e145d9a ("interconnect: Add devm_of_icc_get() as
# exported API for user interconnect API")
#
CODE="
#if defined(NV_LINUX_INTERCONNECT_H_PRESENT)
#include <linux/interconnect.h>
#endif
void conftest_devm_of_icc_get(void)
{
devm_of_icc_get();
}
"
compile_check_conftest "$CODE" "NV_DEVM_ICC_GET_PRESENT" "" "functions"
;;
icc_set_bw)
#
# Determine if icc_set_bw() function is present
@@ -6050,6 +6223,20 @@ compile_test() {
compile_check_conftest "$CODE" "NV_PLATFORM_IRQ_COUNT_PRESENT" "" "functions"
;;
pcie_reset_flr)
#
# Determine if the pcie_reset_flr() function is present
#
# Added by commit 56f107d ("PCI: Add pcie_reset_flr() with
# 'probe' argument") in v5.15.
CODE="
#include <linux/pci.h>
int conftest_pcie_reset_flr(void) {
return pcie_reset_flr();
}"
compile_check_conftest "$CODE" "NV_PCIE_RESET_FLR_PRESENT" "" "functions"
;;
devm_clk_bulk_get_all)
#
# Determine if devm_clk_bulk_get_all() function is present
@@ -6207,6 +6394,32 @@ compile_test() {
compile_check_conftest "$CODE" "NV_NUM_REGISTERED_FB_PRESENT" "" "types"
;;
acpi_video_register_backlight)
#
# Determine if acpi_video_register_backlight() function is present
#
# acpi_video_register_backlight was added by commit 3dbc80a3e4c55c
# (ACPI: video: Make backlight class device registration a separate
# step (v2)) for v6.0 (2022-09-02).
# Note: the include directive for <linux/types> in this conftest is
# necessary in order to support kernels between commit 0b9f7d93ca61
# ("ACPI / i915: ignore firmware requests backlight change") for
# v3.16 (2014-07-07) and commit 3bd6bce369f5 ("ACPI / video: Port
# to new backlight interface selection API") for v4.2 (2015-07-16).
# Kernels within this range use the 'bool' type and the related
# 'false' value in <acpi/video.h> without first including the
# definitions of that type and value.
#
CODE="
#include <linux/types.h>
#include <acpi/video.h>
void conftest_acpi_video_register_backlight(void) {
acpi_video_register_backlight(0);
}"
compile_check_conftest "$CODE" "NV_ACPI_VIDEO_REGISTER_BACKLIGHT" "" "functions"
;;
acpi_video_backlight_use_native)
#
# Determine if acpi_video_backlight_use_native() function is present
@@ -6571,7 +6784,8 @@ compile_test() {
# Determine whether drm_fbdev_ttm_setup is present.
#
# Added by commit aae4682e5d66 ("drm/fbdev-generic:
# Convert to fbdev-ttm") in v6.11.
# Convert to fbdev-ttm") in v6.11. Removed by commit
# 1000634477d8 ("drm/fbdev-ttm:Convert to client-setup") in v6.13.
#
CODE="
#include <drm/drm_fb_helper.h>
@@ -6585,6 +6799,30 @@ compile_test() {
compile_check_conftest "$CODE" "NV_DRM_FBDEV_TTM_SETUP_PRESENT" "" "functions"
;;
drm_client_setup)
#
# Determine whether drm_client_setup is present.
#
# Added by commit d07fdf922592 ("drm/fbdev-ttm: Convert to
# client-setup") in v6.13 in drm/drm_client_setup.h, but then moved
# to drm/clients/drm_client_setup.h by commit b86711c6d6e2
# ("drm/client: Move public client header to clients/ subdirectory")
# in linux-next b86711c6d6e2.
#
CODE="
#include <drm/drm_fb_helper.h>
#if defined(NV_DRM_DRM_CLIENT_SETUP_H_PRESENT)
#include <drm/drm_client_setup.h>
#elif defined(NV_DRM_CLIENTS_DRM_CLIENT_SETUP_H_PRESENT)
#include <drm/clients/drm_client_setup.h>
#endif
void conftest_drm_client_setup(void) {
drm_client_setup();
}"
compile_check_conftest "$CODE" "NV_DRM_CLIENT_SETUP_PRESENT" "" "functions"
;;
drm_output_poll_changed)
#
# Determine whether drm_mode_config_funcs.output_poll_changed
@@ -6608,6 +6846,38 @@ compile_test() {
compile_check_conftest "$CODE" "NV_DRM_OUTPUT_POLL_CHANGED_PRESENT" "" "types"
;;
aperture_remove_conflicting_devices)
#
# Determine whether aperture_remove_conflicting_devices is present.
#
# Added by commit 7283f862bd991 ("drm: Implement DRM aperture
# helpers under video/") in v6.0
CODE="
#if defined(NV_LINUX_APERTURE_H_PRESENT)
#include <linux/aperture.h>
#endif
void conftest_aperture_remove_conflicting_devices(void) {
aperture_remove_conflicting_devices();
}"
compile_check_conftest "$CODE" "NV_APERTURE_REMOVE_CONFLICTING_DEVICES_PRESENT" "" "functions"
;;
aperture_remove_conflicting_pci_devices)
#
# Determine whether aperture_remove_conflicting_pci_devices is present.
#
# Added by commit 7283f862bd991 ("drm: Implement DRM aperture
# helpers under video/") in v6.0
CODE="
#if defined(NV_LINUX_APERTURE_H_PRESENT)
#include <linux/aperture.h>
#endif
void conftest_aperture_remove_conflicting_pci_devices(void) {
aperture_remove_conflicting_pci_devices();
}"
compile_check_conftest "$CODE" "NV_APERTURE_REMOVE_CONFLICTING_PCI_DEVICES_PRESENT" "" "functions"
;;
drm_aperture_remove_conflicting_pci_framebuffers)
#
# Determine whether drm_aperture_remove_conflicting_pci_framebuffers is present.
@@ -6701,17 +6971,17 @@ compile_test() {
# This test is not complete and may return false positive.
#
CODE="
#include <crypto/akcipher.h>
#include <crypto/algapi.h>
#include <crypto/ecc_curve.h>
#include <crypto/ecdh.h>
#include <crypto/hash.h>
#include <crypto/internal/ecc.h>
#include <crypto/kpp.h>
#include <crypto/public_key.h>
#include <crypto/sm3.h>
#include <keys/asymmetric-type.h>
#include <linux/crypto.h>
#include <crypto/akcipher.h>
#include <crypto/algapi.h>
#include <crypto/ecc_curve.h>
#include <crypto/ecdh.h>
#include <crypto/hash.h>
#include <crypto/internal/ecc.h>
#include <crypto/kpp.h>
#include <crypto/public_key.h>
#include <crypto/sm3.h>
#include <keys/asymmetric-type.h>
#include <linux/crypto.h>
void conftest_crypto(void) {
struct shash_desc sd;
struct crypto_shash cs;
@@ -6721,6 +6991,47 @@ compile_test() {
compile_check_conftest "$CODE" "NV_CRYPTO_PRESENT" "" "symbols"
;;
crypto_akcipher_verify)
#
# Determine whether the crypto_akcipher_verify API is still present.
# It was removed by commit 6b34562 ('crypto: akcipher - Drop sign/verify operations')
# in v6.13-rc1 (2024-10-04).
#
# This test is dependent on the crypto conftest to determine whether crypto should be
# enabled at all. That means that if the kernel is old enough such that crypto_akcipher_verify
#
# The test merely checks for the presence of the API, as it assumes that if the API
# is no longer present, the new API to replace it (crypto_sig_verify) must be present.
# If the kernel version is too old to have crypto_akcipher_verify, it will fail the crypto
# conftest above and all crypto code will be compiled out.
#
CODE="
#include <crypto/akcipher.h>
#include <linux/crypto.h>
void conftest_crypto_akcipher_verify(void) {
(void)crypto_akcipher_verify;
}"
compile_check_conftest "$CODE" "NV_CRYPTO_AKCIPHER_VERIFY_PRESENT" "" "symbols"
;;
ecc_digits_from_bytes)
#
# Determine whether ecc_digits_from_bytes is present.
# It was added in commit c6ab5c915da4 ('crypto: ecc - Prevent ecc_digits_from_bytes from
# reading too many bytes') in v6.10.
#
# This functionality is needed when crypto_akcipher_verify is not present.
#
CODE="
#include <crypto/internal/ecc.h>
void conftest_ecc_digits_from_bytes(void) {
(void)ecc_digits_from_bytes;
}"
compile_check_conftest "$CODE" "NV_ECC_DIGITS_FROM_BYTES_PRESENT" "" "symbols"
;;
mempolicy_has_unified_nodes)
#
# Determine if the 'mempolicy' structure has
@@ -7142,6 +7453,131 @@ compile_test() {
compile_check_conftest "$CODE" "NV_DRM_GEM_OBJECT_FUNCS_PRESENT" "" "types"
;;
sg_dma_page_iter)
#
# Determine if the struct sg_dma_page_iter is present.
# This also serves to know if the argument type of the macro
# sg_page_iter_dma_address() changed:
# - before: struct sg_page_iter *piter
# - after: struct sg_dma_page_iter *dma_iter
#
# Added by commit d901b2760dc6c ("lib/scatterlist: Provide a DMA
# page iterator") v5.0.
#
CODE="
#include <linux/scatterlist.h>
struct sg_dma_page_iter conftest_dma_page_iter;"
compile_check_conftest "$CODE" "NV_SG_DMA_PAGE_ITER_PRESENT" "" "types"
;;
# FIXME: See if we can remove this test
for_each_sgtable_dma_page)
#
# Determine if macro for_each_sgtable_dma_page is present.
#
# Added by commit 709d6d73c756 ("scatterlist: add generic wrappers
# for iterating over sgtable objects") v5.7.
#
CODE="
#include <linux/scatterlist.h>
void conftest_for_each_sgtable_dma_page(void) {
for_each_sgtable_dma_page();
}"
compile_check_conftest "$CODE" "NV_FOR_EACH_SGTABLE_DMA_PAGE_PRESENT" "" "functions"
;;
drm_aperture_remove_conflicting_framebuffers)
#
# Determine whether drm_aperture_remove_conflicting_framebuffers is present.
#
# drm_aperture_remove_conflicting_framebuffers was added in commit 2916059147ea
# ("drm/aperture: Add infrastructure for aperture ownership) in
# v5.14-rc1 (2021-04-12)
#
CODE="
#if defined(NV_DRM_DRM_APERTURE_H_PRESENT)
#include <drm/drm_aperture.h>
#endif
void conftest_drm_aperture_remove_conflicting_framebuffers(void) {
drm_aperture_remove_conflicting_framebuffers();
}"
compile_check_conftest "$CODE" "NV_DRM_APERTURE_REMOVE_CONFLICTING_FRAMEBUFFERS_PRESENT" "" "functions"
;;
drm_aperture_remove_conflicting_framebuffers_has_driver_arg)
#
# Determine whether drm_aperture_remove_conflicting_framebuffers
# takes a struct drm_driver * as its fourth argument.
#
# Prior to commit 97c9bfe3f6605d41eb8f1206e6e0f62b31ba15d6, the
# second argument was a char * pointer to the driver's name.
#
# To test if drm_aperture_remove_conflicting_framebuffers() has
# a req_driver argument, define a function with the expected
# signature and then define the corresponding function
# implementation with the expected signature. Successful compilation
# indicates that this function has the expected signature.
#
# This change occurred in commit 97c9bfe3f660 ("drm/aperture: Pass
# DRM driver structure instead of driver name") in v5.15
# (2021-06-29).
#
CODE="
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
#if defined(NV_DRM_DRM_APERTURE_H_PRESENT)
#include <drm/drm_aperture.h>
#endif
typeof(drm_aperture_remove_conflicting_framebuffers) conftest_drm_aperture_remove_conflicting_framebuffers;
int conftest_drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size,
bool primary, const struct drm_driver *req_driver)
{
return 0;
}"
compile_check_conftest "$CODE" "NV_DRM_APERTURE_REMOVE_CONFLICTING_FRAMEBUFFERS_HAS_DRIVER_ARG" "" "types"
;;
drm_aperture_remove_conflicting_framebuffers_has_no_primary_arg)
#
# Determine whether drm_aperture_remove_conflicting_framebuffers
# has its third argument as a bool.
#
# Prior to commit 62aeaeaa1b267c5149abee6b45967a5df3feed58, the
# third argument was a bool for figuring out whether the legacy vga
# stuff should be nuked, but it's only for pci devices and not
# really needed in this function.
#
# To test if drm_aperture_remove_conflicting_framebuffers() has
# a bool primary argument, define a function with the expected
# signature and then define the corresponding function
# implementation with the expected signature. Successful compilation
# indicates that this function has the expected signature.
#
# This change occurred in commit 62aeaeaa1b26 ("drm/aperture: Remove
# primary argument") in v6.5 (2023-04-16).
#
CODE="
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
#if defined(NV_DRM_DRM_APERTURE_H_PRESENT)
#include <drm/drm_aperture.h>
#endif
typeof(drm_aperture_remove_conflicting_framebuffers) conftest_drm_aperture_remove_conflicting_framebuffers;
int conftest_drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size,
const struct drm_driver *req_driver)
{
return 0;
}"
compile_check_conftest "$CODE" "NV_DRM_APERTURE_REMOVE_CONFLICTING_FRAMEBUFFERS_HAS_NO_PRIMARY_ARG" "" "types"
;;
struct_page_has_zone_device_data)
#
# Determine if struct page has a 'zone_device_data' field.
@@ -7158,6 +7594,22 @@ compile_test() {
compile_check_conftest "$CODE" "NV_STRUCT_PAGE_HAS_ZONE_DEVICE_DATA" "" "types"
;;
page_pgmap)
#
# Determine if the page_pgmap() function is present.
#
# Added by commit 82ba975e4c43 ("mm: allow compound zone device
# pages") in v6.14
#
CODE="
#include <linux/mmzone.h>
int conftest_page_pgmap(void) {
return page_pgmap();
}"
compile_check_conftest "$CODE" "NV_PAGE_PGMAP_PRESENT" "" "functions"
;;
folio_test_swapcache)
#
# Determine if the folio_test_swapcache() function is present.
@@ -7174,6 +7626,159 @@ compile_test() {
compile_check_conftest "$CODE" "NV_FOLIO_TEST_SWAPCACHE_PRESENT" "" "functions"
;;
platform_driver_struct_remove_returns_void)
#
# Determine if the 'platform_driver' structure 'remove' function
# pointer returns void.
#
# Commit 0edb555a65d1 ("platform: Make platform_driver::remove()
# return void") updated the platform_driver structure 'remove'
# callback to return void instead of int in Linux v6.11-rc1.
#
echo "$CONFTEST_PREAMBLE
#include <linux/platform_device.h>
int conftest_platform_driver_struct_remove_returns_void(struct platform_device *pdev,
struct platform_driver *driver) {
return driver->remove(pdev);
}" > conftest$$.c
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
rm -f conftest$$.c
if [ -f conftest$$.o ]; then
rm -f conftest$$.o
echo "#undef NV_PLATFORM_DRIVER_STRUCT_REMOVE_RETURNS_VOID" | append_conftest "types"
else
echo "#define NV_PLATFORM_DRIVER_STRUCT_REMOVE_RETURNS_VOID" | append_conftest "types"
fi
;;
module_import_ns_takes_constant)
#
# Determine if the MODULE_IMPORT_NS macro takes a string literal
# or constant.
#
# Commit cdd30ebb1b9f ("module: Convert symbol namespace to
# string literal") changed MODULE_IMPORT_NS to take a string
# literal in Linux kernel v6.13.
#
CODE="
#include <linux/module.h>
MODULE_IMPORT_NS(DMA_BUF);"
compile_check_conftest "$CODE" "NV_MODULE_IMPORT_NS_TAKES_CONSTANT" "" "generic"
;;
assign_str)
#
# Determine whether the __assign_str() macro, used in tracepoint
# event definitions, has the 'src' parameter.
#
# The 'src' parameter was removed by commit 2c92ca849fcc
# ("tracing/treewide: Remove second parameter of __assign_str()") in
# v6.10.
#
# The expected usage of __assign_str() inside the TRACE_EVENT()
# macro, which involves multiple include passes and assumes it is
# in a header file, requires a non-standard conftest approach of
# producing both a header and a C file.
#
echo "$CONFTEST_PREAMBLE
#undef TRACE_SYSTEM
#define TRACE_SYSTEM conftest
#if !defined(_TRACE_CONFTEST_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_CONFTEST_H
#include <linux/tracepoint.h>
TRACE_EVENT(conftest,
TP_PROTO(const char *s),
TP_ARGS(s),
TP_STRUCT__entry(__string(s, s)),
TP_fast_assign(__assign_str(s);),
TP_printk(\"%s\", __get_str(s))
);
#endif
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE conftest$$
#include <trace/define_trace.h>
" > conftest$$.h
echo "$CONFTEST_PREAMBLE
#define CREATE_TRACE_POINTS
#include \"conftest$$.h\"
void conftest_assign_str(void) {
trace_conftest(\"conftest\");
}
" > conftest$$.c
$CC $CFLAGS -c conftest$$.c >/dev/null 2>&1
rm -f conftest$$.c conftest$$.h
if [ -f conftest$$.o ]; then
rm -f conftest$$.o
echo "#define NV_ASSIGN_STR_ARGUMENT_COUNT 1" | append_conftest "functions"
else
echo "#define NV_ASSIGN_STR_ARGUMENT_COUNT 2" | append_conftest "functions"
fi
;;
drm_driver_has_date)
#
# Determine if the 'drm_driver' structure has a 'date' field.
#
# Removed by commit cb2e1c2136f7 ("drm: remove driver date from
# struct drm_driver and all drivers") in linux-next, expected in
# v6.14.
#
CODE="
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
int conftest_drm_driver_has_date(void) {
return offsetof(struct drm_driver, date);
}"
compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_DATE" "" "types"
;;
drm_connector_helper_funcs_mode_valid_has_const_mode_arg)
#
# Determine if the 'mode' pointer argument is const in
# drm_connector_helper_funcs::mode_valid.
#
# The 'mode' pointer argument in
# drm_connector_helper_funcs::mode_valid was made const by commit
# 26d6fd81916e ("drm/connector: make mode_valid take a const struct
# drm_display_mode") in linux-next, expected in v6.15.
#
CODE="
#if defined(NV_DRM_DRM_ATOMIC_HELPER_H_PRESENT)
#include <drm/drm_atomic_helper.h>
#endif
static int conftest_drm_connector_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode) {
return 0;
}
const struct drm_connector_helper_funcs conftest_drm_connector_helper_funcs = {
.mode_valid = conftest_drm_connector_mode_valid,
};"
compile_check_conftest "$CODE" "NV_DRM_CONNECTOR_HELPER_FUNCS_MODE_VALID_HAS_CONST_MODE_ARG" "" "types"
;;
# When adding a new conftest entry, please use the correct format for
# specifying the relevant upstream Linux kernel commit. Please
# avoid specifying -rc kernels, and only use SHAs that actually exist

View File

@@ -14,8 +14,10 @@ NV_HEADER_PRESENCE_TESTS = \
drm/drm_encoder.h \
drm/drm_atomic_uapi.h \
drm/drm_drv.h \
drm/drm_edid.h \
drm/drm_fbdev_generic.h \
drm/drm_fbdev_ttm.h \
drm/drm_client_setup.h \
drm/drm_framebuffer.h \
drm/drm_connector.h \
drm/drm_probe_helper.h \
@@ -30,10 +32,13 @@ NV_HEADER_PRESENCE_TESTS = \
drm/drm_mode_config.h \
drm/drm_modeset_lock.h \
drm/drm_property.h \
drm/clients/drm_client_setup.h \
dt-bindings/interconnect/tegra_icc_id.h \
generated/autoconf.h \
generated/compile.h \
generated/utsrelease.h \
linux/aperture.h \
linux/dma-direct.h \
linux/efi.h \
linux/kconfig.h \
linux/platform/tegra/mc_utils.h \
@@ -61,13 +66,10 @@ NV_HEADER_PRESENCE_TESTS = \
linux/nvhost.h \
linux/nvhost_t194.h \
linux/host1x-next.h \
asm/book3s/64/hash-64k.h \
asm/set_memory.h \
asm/prom.h \
asm/powernv.h \
linux/atomic.h \
asm/barrier.h \
asm/opal-api.h \
sound/hdaudio.h \
asm/pgtable_types.h \
asm/page.h \
@@ -102,5 +104,6 @@ NV_HEADER_PRESENCE_TESTS = \
asm/cpufeature.h \
linux/mpi.h \
asm/mshyperv.h \
crypto/sig.h \
linux/pfn_t.h

View File

@@ -62,6 +62,31 @@
#undef NV_DRM_FENCE_AVAILABLE
#endif
#if defined(NV_DRM_FBDEV_GENERIC_SETUP_PRESENT) && \
defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
#define NV_DRM_FBDEV_AVAILABLE
#define NV_DRM_FBDEV_GENERIC_AVAILABLE
#endif
#if defined(NV_DRM_FBDEV_TTM_SETUP_PRESENT) && \
defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
#if IS_ENABLED(CONFIG_DRM_TTM_HELPER)
#define NV_DRM_FBDEV_AVAILABLE
#define NV_DRM_FBDEV_TTM_AVAILABLE
#endif
#endif
#if defined(NV_DRM_CLIENT_SETUP_PRESENT) && \
(defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT) || \
defined(NV_APERTURE_REMOVE_CONFLICTING_PCI_DEVICES_PRESENT))
// XXX remove dependency on DRM_TTM_HELPER by implementing nvidia-drm's own
// .fbdev_probe callback that uses NVKMS kapi
#if IS_ENABLED(CONFIG_DRM_TTM_HELPER)
#define NV_DRM_FBDEV_AVAILABLE
#define NV_DRM_CLIENT_AVAILABLE
#endif
#endif
/*
* We can support color management if either drm_helper_crtc_enable_color_mgmt()
* or drm_crtc_enable_color_mgmt() exist.

View File

@@ -314,7 +314,11 @@ static int nv_drm_connector_get_modes(struct drm_connector *connector)
}
static int nv_drm_connector_mode_valid(struct drm_connector *connector,
#if defined(NV_DRM_CONNECTOR_HELPER_FUNCS_MODE_VALID_HAS_CONST_MODE_ARG)
const struct drm_display_mode *mode)
#else
struct drm_display_mode *mode)
#endif
{
struct drm_device *dev = connector->dev;
struct nv_drm_device *nv_dev = to_nv_device(dev);

View File

@@ -372,23 +372,88 @@ cursor_plane_req_config_update(struct drm_plane *plane,
old_config.dstY != req_config->dstY;
}
static void free_drm_lut_surface(struct kref *ref)
static void release_drm_nvkms_surface(struct nv_drm_nvkms_surface *drm_nvkms_surface)
{
struct nv_drm_lut_surface *drm_lut_surface =
container_of(ref, struct nv_drm_lut_surface, refcount);
struct NvKmsKapiDevice *pDevice = drm_lut_surface->pDevice;
struct NvKmsKapiDevice *pDevice = drm_nvkms_surface->pDevice;
BUG_ON(drm_lut_surface->nvkms_surface == NULL);
BUG_ON(drm_lut_surface->nvkms_memory == NULL);
BUG_ON(drm_lut_surface->buffer == NULL);
BUG_ON(drm_nvkms_surface->nvkms_surface == NULL);
BUG_ON(drm_nvkms_surface->nvkms_memory == NULL);
BUG_ON(drm_nvkms_surface->buffer == NULL);
nvKms->destroySurface(pDevice, drm_lut_surface->nvkms_surface);
nvKms->unmapMemory(pDevice, drm_lut_surface->nvkms_memory,
nvKms->destroySurface(pDevice, drm_nvkms_surface->nvkms_surface);
nvKms->unmapMemory(pDevice, drm_nvkms_surface->nvkms_memory,
NVKMS_KAPI_MAPPING_TYPE_KERNEL,
drm_lut_surface->buffer);
nvKms->freeMemory(pDevice, drm_lut_surface->nvkms_memory);
drm_nvkms_surface->buffer);
nvKms->freeMemory(pDevice, drm_nvkms_surface->nvkms_memory);
}
nv_drm_free(drm_lut_surface);
static int init_drm_nvkms_surface(struct nv_drm_device *nv_dev,
struct nv_drm_nvkms_surface *drm_nvkms_surface,
struct nv_drm_nvkms_surface_params *surface_params)
{
struct NvKmsKapiDevice *pDevice = nv_dev->pDevice;
NvU8 compressible = 0; // No compression
struct NvKmsKapiCreateSurfaceParams params = {};
struct NvKmsKapiMemory *surface_mem;
struct NvKmsKapiSurface *surface;
void *buffer;
params.format = surface_params->format;
params.width = surface_params->width;
params.height = surface_params->height;
/* Allocate displayable memory. */
if (nv_dev->hasVideoMemory) {
surface_mem =
nvKms->allocateVideoMemory(pDevice,
NvKmsSurfaceMemoryLayoutPitch,
NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT,
surface_params->surface_size,
&compressible);
} else {
surface_mem =
nvKms->allocateSystemMemory(pDevice,
NvKmsSurfaceMemoryLayoutPitch,
NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT,
surface_params->surface_size,
&compressible);
}
if (surface_mem == NULL) {
return -ENOMEM;
}
/* Map memory in order to populate it. */
if (!nvKms->mapMemory(pDevice, surface_mem,
NVKMS_KAPI_MAPPING_TYPE_KERNEL,
&buffer)) {
nvKms->freeMemory(pDevice, surface_mem);
return -ENOMEM;
}
params.planes[0].memory = surface_mem;
params.planes[0].offset = 0;
params.planes[0].pitch = surface_params->surface_size;
/* Create surface. */
surface = nvKms->createSurface(pDevice, &params);
if (surface == NULL) {
nvKms->unmapMemory(pDevice, surface_mem,
NVKMS_KAPI_MAPPING_TYPE_KERNEL, buffer);
nvKms->freeMemory(pDevice, surface_mem);
return -ENOMEM;
}
/* Pack into struct nv_drm_nvkms_surface. */
drm_nvkms_surface->pDevice = pDevice;
drm_nvkms_surface->nvkms_memory = surface_mem;
drm_nvkms_surface->nvkms_surface = surface;
drm_nvkms_surface->buffer = buffer;
/* Init refcount. */
kref_init(&drm_nvkms_surface->refcount);
return 0;
}
static struct nv_drm_lut_surface *alloc_drm_lut_surface(
@@ -399,86 +464,49 @@ static struct nv_drm_lut_surface *alloc_drm_lut_surface(
NvU32 num_vss_header_entries,
NvU32 num_entries)
{
struct NvKmsKapiDevice *pDevice = nv_dev->pDevice;
struct nv_drm_lut_surface *drm_lut_surface;
NvU8 compressible = 0; // No compression
size_t size =
const size_t surface_size =
(((num_vss_header_entries + num_entries) *
NVKMS_LUT_CAPS_LUT_ENTRY_SIZE) + 255) & ~255; // 256-byte aligned
struct NvKmsKapiMemory *surface_mem;
struct NvKmsKapiSurface *surface;
struct NvKmsKapiCreateSurfaceParams params = {};
NvU16 *lut_data;
struct nv_drm_nvkms_surface_params params = {};
/* Allocate displayable memory. */
if (nv_dev->hasVideoMemory) {
surface_mem =
nvKms->allocateVideoMemory(pDevice,
NvKmsSurfaceMemoryLayoutPitch,
NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT,
size,
&compressible);
} else {
surface_mem =
nvKms->allocateSystemMemory(pDevice,
NvKmsSurfaceMemoryLayoutPitch,
NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT,
size,
&compressible);
}
if (surface_mem == NULL) {
return NULL;
}
/* Map memory in order to populate it. */
if (!nvKms->mapMemory(pDevice, surface_mem,
NVKMS_KAPI_MAPPING_TYPE_KERNEL,
(void **) &lut_data)) {
nvKms->freeMemory(pDevice, surface_mem);
return NULL;
}
/* Create surface. */
params.format = NvKmsSurfaceMemoryFormatR16G16B16A16;
params.width = num_vss_header_entries + num_entries;
params.height = 1;
params.planes[0].memory = surface_mem;
params.planes[0].offset = 0;
params.planes[0].pitch = size;
params.surface_size = surface_size;
surface = nvKms->createSurface(pDevice, &params);
if (surface == NULL) {
nvKms->unmapMemory(pDevice, surface_mem,
NVKMS_KAPI_MAPPING_TYPE_KERNEL, (void *) lut_data);
nvKms->freeMemory(pDevice, surface_mem);
return NULL;
}
/* Pack into struct nv_drm_lut_surface. */
drm_lut_surface = nv_drm_calloc(1, sizeof(struct nv_drm_lut_surface));
if (drm_lut_surface == NULL) {
nvKms->destroySurface(pDevice, surface);
nvKms->unmapMemory(pDevice, surface_mem,
NVKMS_KAPI_MAPPING_TYPE_KERNEL, (void *) lut_data);
nvKms->freeMemory(pDevice, surface_mem);
return NULL;
}
drm_lut_surface->pDevice = pDevice;
drm_lut_surface->nvkms_memory = surface_mem;
drm_lut_surface->nvkms_surface = surface;
drm_lut_surface->buffer = lut_data;
if (init_drm_nvkms_surface(nv_dev, &drm_lut_surface->base, &params) != 0) {
nv_drm_free(drm_lut_surface);
return NULL;
}
drm_lut_surface->properties.vssSegments = num_vss_header_segments;
drm_lut_surface->properties.vssType = vss_type;
drm_lut_surface->properties.lutEntries = num_entries;
drm_lut_surface->properties.entryFormat = entry_format;
/* Init refcount. */
kref_init(&drm_lut_surface->refcount);
return drm_lut_surface;
}
static void free_drm_lut_surface(struct kref *ref)
{
struct nv_drm_nvkms_surface *drm_nvkms_surface =
container_of(ref, struct nv_drm_nvkms_surface, refcount);
struct nv_drm_lut_surface *drm_lut_surface =
container_of(drm_nvkms_surface, struct nv_drm_lut_surface, base);
// Clean up base
release_drm_nvkms_surface(drm_nvkms_surface);
nv_drm_free(drm_lut_surface);
}
static NvU32 fp32_lut_interp(
NvU16 entry0,
NvU16 entry1,
@@ -582,7 +610,7 @@ static struct nv_drm_lut_surface *create_drm_ilut_surface_vss(
return NULL;
}
lut_data = (NvU16 *) drm_lut_surface->buffer;
lut_data = (NvU16 *) drm_lut_surface->base.buffer;
/* Calculate VSS header. */
if (vss_header_seg_sizes != NULL) {
@@ -733,7 +761,7 @@ static struct nv_drm_lut_surface *create_drm_ilut_surface_legacy(
return NULL;
}
lut_data = (NvU16 *) drm_lut_surface->buffer;
lut_data = (NvU16 *) drm_lut_surface->base.buffer;
/* Fill LUT surface. */
for (entry_idx = 0; entry_idx < NVKMS_LUT_ARRAY_SIZE; entry_idx++) {
@@ -799,7 +827,7 @@ static struct nv_drm_lut_surface *create_drm_tmo_surface(
return NULL;
}
lut_data = (NvU16 *) drm_lut_surface->buffer;
lut_data = (NvU16 *) drm_lut_surface->base.buffer;
/* Calculate linear VSS header. */
for (entry_idx = 0; entry_idx < NUM_VSS_HEADER_ENTRIES; entry_idx++) {
@@ -901,7 +929,7 @@ static struct nv_drm_lut_surface *create_drm_olut_surface_vss(
return NULL;
}
lut_data = (NvU16 *) drm_lut_surface->buffer;
lut_data = (NvU16 *) drm_lut_surface->base.buffer;
/* Calculate VSS header. */
if (vss_header_seg_sizes != NULL) {
@@ -1021,7 +1049,7 @@ static struct nv_drm_lut_surface *create_drm_olut_surface_legacy(
return NULL;
}
lut_data = (NvU16 *) drm_lut_surface->buffer;
lut_data = (NvU16 *) drm_lut_surface->base.buffer;
/* Fill LUT surface. */
for (entry_idx = 0; entry_idx < NVKMS_LUT_ARRAY_SIZE; entry_idx++) {
@@ -1057,6 +1085,74 @@ update_matrix_override(struct drm_property_blob *blob,
return enabled;
}
static enum NvKmsInputColorSpace nv_get_nvkms_input_colorspace(
enum nv_drm_input_color_space colorSpace)
{
switch (colorSpace) {
case NV_DRM_INPUT_COLOR_SPACE_NONE:
return NVKMS_INPUT_COLOR_SPACE_NONE;
case NV_DRM_INPUT_COLOR_SPACE_SCRGB_LINEAR:
return NVKMS_INPUT_COLOR_SPACE_BT709;
case NV_DRM_INPUT_COLOR_SPACE_BT2100_PQ:
return NVKMS_INPUT_COLOR_SPACE_BT2100;
default:
/* We shouldn't hit this */
WARN_ON("Unsupported input colorspace");
return NVKMS_INPUT_COLOR_SPACE_NONE;
}
}
static enum NvKmsInputTf nv_get_nvkms_input_tf(
enum nv_drm_input_color_space colorSpace)
{
switch (colorSpace) {
case NV_DRM_INPUT_COLOR_SPACE_NONE:
return NVKMS_INPUT_TF_LINEAR;
case NV_DRM_INPUT_COLOR_SPACE_SCRGB_LINEAR:
return NVKMS_INPUT_TF_LINEAR;
case NV_DRM_INPUT_COLOR_SPACE_BT2100_PQ:
return NVKMS_INPUT_TF_PQ;
default:
/* We shouldn't hit this */
WARN_ON("Unsupported input colorspace");
return NVKMS_INPUT_TF_LINEAR;
}
}
#if defined(NV_DRM_PLANE_CREATE_COLOR_PROPERTIES_PRESENT)
static enum NvKmsInputColorSpace nv_drm_color_encoding_to_nvkms_colorspace(
enum drm_color_encoding color_encoding)
{
switch(color_encoding) {
case DRM_COLOR_YCBCR_BT601:
return NVKMS_INPUT_COLOR_SPACE_BT601;
case DRM_COLOR_YCBCR_BT709:
return NVKMS_INPUT_COLOR_SPACE_BT709;
case DRM_COLOR_YCBCR_BT2020:
return NVKMS_INPUT_COLOR_SPACE_BT2020;
default:
/* We shouldn't hit this */
WARN_ON("Unsupported DRM color_encoding");
return NVKMS_INPUT_COLOR_SPACE_NONE;
}
}
static enum NvKmsInputColorRange nv_drm_color_range_to_nvkms_color_range(
enum drm_color_range color_range)
{
switch(color_range) {
case DRM_COLOR_YCBCR_FULL_RANGE:
return NVKMS_INPUT_COLOR_RANGE_FULL;
case DRM_COLOR_YCBCR_LIMITED_RANGE:
return NVKMS_INPUT_COLOR_RANGE_LIMITED;
default:
/* We shouldn't hit this */
WARN_ON("Unsupported DRM color_range");
return NVKMS_INPUT_COLOR_RANGE_DEFAULT;
}
}
#endif
static int
plane_req_config_update(struct drm_plane *plane,
struct drm_plane_state *plane_state,
@@ -1190,8 +1286,37 @@ plane_req_config_update(struct drm_plane *plane,
nv_plane->defaultCompositionMode;
#endif
req_config->config.inputColorSpace =
nv_drm_plane_state->input_colorspace;
#if defined(NV_DRM_PLANE_CREATE_COLOR_PROPERTIES_PRESENT)
if ((nv_drm_plane_state->input_colorspace == NV_DRM_INPUT_COLOR_SPACE_NONE) &&
nv_drm_format_is_yuv(plane_state->fb->format->format)) {
if (nv_plane->supportsColorProperties) {
req_config->config.inputColorSpace =
nv_drm_color_encoding_to_nvkms_colorspace(plane_state->color_encoding);
req_config->config.inputColorRange =
nv_drm_color_range_to_nvkms_color_range(plane_state->color_range);
} else {
req_config->config.inputColorSpace = NVKMS_INPUT_COLOR_SPACE_NONE;
req_config->config.inputColorRange = NVKMS_INPUT_COLOR_RANGE_DEFAULT;
}
req_config->config.inputTf = NVKMS_INPUT_TF_LINEAR;
} else {
#endif
req_config->config.inputColorSpace =
nv_get_nvkms_input_colorspace(nv_drm_plane_state->input_colorspace);
req_config->config.inputColorRange = NVKMS_INPUT_COLOR_RANGE_DEFAULT;
req_config->config.inputTf =
nv_get_nvkms_input_tf(nv_drm_plane_state->input_colorspace);
#if defined(NV_DRM_PLANE_CREATE_COLOR_PROPERTIES_PRESENT)
}
#endif
req_config->flags.inputTfChanged =
(old_config.inputTf != req_config->config.inputTf);
req_config->flags.inputColorSpaceChanged =
(old_config.inputColorSpace != req_config->config.inputColorSpace);
req_config->flags.inputColorRangeChanged =
(old_config.inputColorRange != req_config->config.inputColorRange);
req_config->config.syncParams.preSyncptSpecified = false;
req_config->config.syncParams.postSyncptRequested = false;
@@ -1240,10 +1365,10 @@ plane_req_config_update(struct drm_plane *plane,
switch (info_frame->eotf) {
case HDMI_EOTF_SMPTE_ST2084:
req_config->config.tf = NVKMS_OUTPUT_TF_PQ;
req_config->config.outputTf = NVKMS_OUTPUT_TF_PQ;
break;
case HDMI_EOTF_TRADITIONAL_GAMMA_SDR:
req_config->config.tf =
req_config->config.outputTf =
NVKMS_OUTPUT_TF_TRADITIONAL_GAMMA_SDR;
break;
default:
@@ -1254,7 +1379,7 @@ plane_req_config_update(struct drm_plane *plane,
req_config->config.hdrMetadata.enabled = true;
} else {
req_config->config.hdrMetadata.enabled = false;
req_config->config.tf = NVKMS_OUTPUT_TF_NONE;
req_config->config.outputTf = NVKMS_OUTPUT_TF_NONE;
}
req_config->flags.hdrMetadataChanged =
@@ -1264,7 +1389,7 @@ plane_req_config_update(struct drm_plane *plane,
&req_config->config.hdrMetadata.val,
sizeof(struct NvKmsHDRStaticMetadata)));
req_config->flags.tfChanged = (old_config.tf != req_config->config.tf);
req_config->flags.outputTfChanged = (old_config.outputTf != req_config->config.outputTf);
#endif
req_config->config.matrixOverrides.enabled.lmsCtm =
@@ -1295,7 +1420,7 @@ plane_req_config_update(struct drm_plane *plane,
if (nv_drm_plane_state->degamma_changed) {
if (nv_drm_plane_state->degamma_drm_lut_surface != NULL) {
kref_put(&nv_drm_plane_state->degamma_drm_lut_surface->refcount,
kref_put(&nv_drm_plane_state->degamma_drm_lut_surface->base.refcount,
free_drm_lut_surface);
nv_drm_plane_state->degamma_drm_lut_surface = NULL;
}
@@ -1327,7 +1452,7 @@ plane_req_config_update(struct drm_plane *plane,
if (nv_drm_plane_state->degamma_drm_lut_surface != NULL) {
req_config->config.ilut.enabled = NV_TRUE;
req_config->config.ilut.lutSurface =
nv_drm_plane_state->degamma_drm_lut_surface->nvkms_surface;
nv_drm_plane_state->degamma_drm_lut_surface->base.nvkms_surface;
req_config->config.ilut.offset = 0;
req_config->config.ilut.vssSegments =
nv_drm_plane_state->degamma_drm_lut_surface->properties.vssSegments;
@@ -1346,7 +1471,7 @@ plane_req_config_update(struct drm_plane *plane,
if (nv_drm_plane_state->tmo_changed) {
if (nv_drm_plane_state->tmo_drm_lut_surface != NULL) {
kref_put(&nv_drm_plane_state->tmo_drm_lut_surface->refcount,
kref_put(&nv_drm_plane_state->tmo_drm_lut_surface->base.refcount,
free_drm_lut_surface);
nv_drm_plane_state->tmo_drm_lut_surface = NULL;
}
@@ -1363,7 +1488,7 @@ plane_req_config_update(struct drm_plane *plane,
if (nv_drm_plane_state->tmo_drm_lut_surface != NULL) {
req_config->config.tmo.enabled = NV_TRUE;
req_config->config.tmo.lutSurface =
nv_drm_plane_state->tmo_drm_lut_surface->nvkms_surface;
nv_drm_plane_state->tmo_drm_lut_surface->base.nvkms_surface;
req_config->config.tmo.offset = 0;
req_config->config.tmo.vssSegments =
nv_drm_plane_state->tmo_drm_lut_surface->properties.vssSegments;
@@ -1870,7 +1995,7 @@ nv_drm_plane_atomic_duplicate_state(struct drm_plane *plane)
nv_plane_state->degamma_drm_lut_surface =
nv_old_plane_state->degamma_drm_lut_surface;
if (nv_plane_state->degamma_drm_lut_surface) {
kref_get(&nv_plane_state->degamma_drm_lut_surface->refcount);
kref_get(&nv_plane_state->degamma_drm_lut_surface->base.refcount);
}
nv_plane_state->tmo_lut = nv_old_plane_state->tmo_lut;
@@ -1881,7 +2006,7 @@ nv_drm_plane_atomic_duplicate_state(struct drm_plane *plane)
nv_plane_state->tmo_drm_lut_surface =
nv_old_plane_state->tmo_drm_lut_surface;
if (nv_plane_state->tmo_drm_lut_surface) {
kref_get(&nv_plane_state->tmo_drm_lut_surface->refcount);
kref_get(&nv_plane_state->tmo_drm_lut_surface->base.refcount);
}
return &nv_plane_state->base;
@@ -1909,13 +2034,13 @@ static inline void __nv_drm_plane_atomic_destroy_state(
nv_drm_property_blob_put(nv_drm_plane_state->degamma_lut);
if (nv_drm_plane_state->degamma_drm_lut_surface != NULL) {
kref_put(&nv_drm_plane_state->degamma_drm_lut_surface->refcount,
kref_put(&nv_drm_plane_state->degamma_drm_lut_surface->base.refcount,
free_drm_lut_surface);
}
nv_drm_property_blob_put(nv_drm_plane_state->tmo_lut);
if (nv_drm_plane_state->tmo_drm_lut_surface != NULL) {
kref_put(&nv_drm_plane_state->tmo_drm_lut_surface->refcount,
kref_put(&nv_drm_plane_state->tmo_drm_lut_surface->base.refcount,
free_drm_lut_surface);
}
}
@@ -2113,7 +2238,7 @@ nv_drm_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
}
nv_state->regamma_divisor = nv_old_state->regamma_divisor;
if (nv_state->regamma_drm_lut_surface) {
kref_get(&nv_state->regamma_drm_lut_surface->refcount);
kref_get(&nv_state->regamma_drm_lut_surface->base.refcount);
}
nv_state->regamma_changed = false;
@@ -2142,7 +2267,7 @@ static void nv_drm_atomic_crtc_destroy_state(struct drm_crtc *crtc,
nv_drm_property_blob_put(nv_state->regamma_lut);
if (nv_state->regamma_drm_lut_surface != NULL) {
kref_put(&nv_state->regamma_drm_lut_surface->refcount,
kref_put(&nv_state->regamma_drm_lut_surface->base.refcount,
free_drm_lut_surface);
}
@@ -2386,7 +2511,7 @@ static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc,
if (nv_crtc_state->regamma_changed) {
if (nv_crtc_state->regamma_drm_lut_surface != NULL) {
kref_put(&nv_crtc_state->regamma_drm_lut_surface->refcount,
kref_put(&nv_crtc_state->regamma_drm_lut_surface->base.refcount,
free_drm_lut_surface);
nv_crtc_state->regamma_drm_lut_surface = NULL;
}
@@ -2417,7 +2542,7 @@ static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc,
if (nv_crtc_state->regamma_drm_lut_surface != NULL) {
req_config->modeSetConfig.olut.enabled = NV_TRUE;
req_config->modeSetConfig.olut.lutSurface =
nv_crtc_state->regamma_drm_lut_surface->nvkms_surface;
nv_crtc_state->regamma_drm_lut_surface->base.nvkms_surface;
req_config->modeSetConfig.olut.offset = 0;
req_config->modeSetConfig.olut.vssSegments =
nv_crtc_state->regamma_drm_lut_surface->properties.vssSegments;
@@ -2521,7 +2646,7 @@ static void nv_drm_plane_install_properties(
if (nv_dev->nv_input_colorspace_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_input_colorspace_property,
NVKMS_INPUT_COLORSPACE_NONE);
NV_DRM_INPUT_COLOR_SPACE_NONE);
}
if (supportsICtCp) {
@@ -2531,17 +2656,14 @@ static void nv_drm_plane_install_properties(
&plane->base, nv_dev->nv_hdr_output_metadata_property, 0);
}
#endif
}
/*
* The old DRM_OBJECT_MAX_PROPERTY limit of 24 is too small to
* accomodate all of the properties for the ICtCp pipeline.
*
* Commit 1e13c5644c44 ("drm/drm_mode_object: increase max objects to
* accommodate new color props") in Linux v6.8 increased the limit to
* 64. To be safe, require this before attaching any properties for the
* ICtCp pipeline.
*/
if (DRM_OBJECT_MAX_PROPERTY >= 64) {
/*
* Per-plane HDR properties get us dangerously close to the 24 property
* limit on kernels that don't support NV_DRM_USE_EXTENDED_PROPERTIES.
*/
if (NV_DRM_USE_EXTENDED_PROPERTIES) {
if (supportsICtCp) {
if (nv_dev->nv_plane_lms_ctm_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_plane_lms_ctm_property, 0);
@@ -2568,36 +2690,36 @@ static void nv_drm_plane_install_properties(
NVKMS_LUT_ARRAY_SIZE);
}
}
}
if (nv_dev->nv_plane_blend_ctm_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_plane_blend_ctm_property, 0);
}
if (nv_dev->nv_plane_blend_ctm_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_plane_blend_ctm_property, 0);
}
if (nv_plane->ilut_caps.supported) {
if (nv_plane->ilut_caps.vssSupport == NVKMS_LUT_VSS_SUPPORTED) {
if (nv_dev->nv_plane_degamma_tf_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_plane_degamma_tf_property,
NV_DRM_TRANSFER_FUNCTION_DEFAULT);
if (nv_plane->ilut_caps.supported) {
if (nv_plane->ilut_caps.vssSupport == NVKMS_LUT_VSS_SUPPORTED) {
if (nv_dev->nv_plane_degamma_tf_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_plane_degamma_tf_property,
NV_DRM_TRANSFER_FUNCTION_DEFAULT);
}
if (nv_dev->nv_plane_degamma_multiplier_property) {
/* Default to 1 in S31.32 Sign-Magnitude Format */
nv_plane_state->degamma_multiplier = ((uint64_t) 1) << 32;
drm_object_attach_property(
&plane->base, nv_dev->nv_plane_degamma_multiplier_property,
nv_plane_state->degamma_multiplier);
}
}
if (nv_dev->nv_plane_degamma_multiplier_property) {
/* Default to 1 in S31.32 Sign-Magnitude Format */
nv_plane_state->degamma_multiplier = ((uint64_t) 1) << 32;
if (nv_dev->nv_plane_degamma_lut_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_plane_degamma_multiplier_property,
nv_plane_state->degamma_multiplier);
&plane->base, nv_dev->nv_plane_degamma_lut_property, 0);
}
if (nv_dev->nv_plane_degamma_lut_size_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_plane_degamma_lut_size_property,
NVKMS_LUT_ARRAY_SIZE);
}
}
if (nv_dev->nv_plane_degamma_lut_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_plane_degamma_lut_property, 0);
}
if (nv_dev->nv_plane_degamma_lut_size_property) {
drm_object_attach_property(
&plane->base, nv_dev->nv_plane_degamma_lut_size_property,
NVKMS_LUT_ARRAY_SIZE);
}
}
}
@@ -2776,6 +2898,29 @@ nv_drm_plane_create(struct drm_device *dev,
goto failed_plane_init;
}
#if defined(NV_DRM_PLANE_CREATE_COLOR_PROPERTIES_PRESENT)
if (pResInfo->caps.supportsInputColorSpace &&
pResInfo->caps.supportsInputColorRange) {
nv_plane->supportsColorProperties = true;
drm_plane_create_color_properties(
plane,
NVBIT(DRM_COLOR_YCBCR_BT601) |
NVBIT(DRM_COLOR_YCBCR_BT709) |
NVBIT(DRM_COLOR_YCBCR_BT2020),
NVBIT(DRM_COLOR_YCBCR_FULL_RANGE) |
NVBIT(DRM_COLOR_YCBCR_LIMITED_RANGE),
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_FULL_RANGE
);
} else {
nv_plane->supportsColorProperties = false;
}
#else
nv_plane->supportsColorProperties = false;
#endif
drm_plane_helper_add(plane, &nv_plane_helper_funcs);
if (plane_type != DRM_PLANE_TYPE_CURSOR) {

View File

@@ -191,6 +191,13 @@ struct nv_drm_plane {
*/
uint32_t layer_idx;
/**
* @supportsColorProperties
*
* If true, supports the COLOR_ENCODING and COLOR_RANGE properties.
*/
bool supportsColorProperties;
struct NvKmsLUTCaps ilut_caps;
struct NvKmsLUTCaps tmo_caps;
};
@@ -203,10 +210,23 @@ static inline struct nv_drm_plane *to_nv_plane(struct drm_plane *plane)
return container_of(plane, struct nv_drm_plane, base);
}
struct nv_drm_lut_surface {
struct nv_drm_nvkms_surface {
struct NvKmsKapiDevice *pDevice;
struct NvKmsKapiMemory *nvkms_memory;
struct NvKmsKapiSurface *nvkms_surface;
void *buffer;
struct kref refcount;
};
struct nv_drm_nvkms_surface_params {
NvU32 width;
NvU32 height;
size_t surface_size;
enum NvKmsSurfaceMemoryFormat format;
};
struct nv_drm_lut_surface {
struct nv_drm_nvkms_surface base;
struct {
NvU32 vssSegments;
enum NvKmsLUTVssType vssType;
@@ -215,14 +235,12 @@ struct nv_drm_lut_surface {
enum NvKmsLUTFormat entryFormat;
} properties;
void *buffer;
struct kref refcount;
};
struct nv_drm_plane_state {
struct drm_plane_state base;
s32 __user *fd_user_ptr;
enum NvKmsInputColorSpace input_colorspace;
enum nv_drm_input_color_space input_colorspace;
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
struct drm_property_blob *hdr_output_metadata;
#endif

View File

@@ -35,6 +35,8 @@
#include "nvidia-drm-gem-nvkms-memory.h"
#include "nvidia-drm-gem-user-memory.h"
#include "nvidia-drm-gem-dma-buf.h"
#include "nvidia-drm-utils.h"
#include "nv_dpy_id.h"
#if defined(NV_DRM_AVAILABLE)
@@ -64,11 +66,24 @@
#include <drm/drm_ioctl.h>
#endif
#if defined(NV_DRM_FBDEV_AVAILABLE)
#if defined(NV_LINUX_APERTURE_H_PRESENT)
#include <linux/aperture.h>
#endif
#if defined(NV_DRM_DRM_APERTURE_H_PRESENT)
#include <drm/drm_aperture.h>
#endif
#if defined(NV_DRM_FBDEV_AVAILABLE)
#include <drm/drm_fb_helper.h>
#endif
#if defined(NV_DRM_DRM_CLIENT_SETUP_H_PRESENT)
#include <drm/drm_client_setup.h>
#elif defined(NV_DRM_CLIENTS_DRM_CLIENT_SETUP_H_PRESENT)
#include <drm/clients/drm_client_setup.h>
#endif
#if defined(NV_DRM_DRM_FBDEV_TTM_H_PRESENT)
#include <drm/drm_fbdev_ttm.h>
#elif defined(NV_DRM_DRM_FBDEV_GENERIC_H_PRESENT)
@@ -77,6 +92,7 @@
#include <linux/pci.h>
#include <linux/workqueue.h>
#include <linux/sort.h>
/*
* Commit fcd70cd36b9b ("drm: Split out drm_probe_helper.h")
@@ -107,15 +123,15 @@ static int nv_drm_revoke_sub_ownership(struct drm_device *dev);
static struct nv_drm_device *dev_list = NULL;
static char* nv_get_input_colorspace_name(
enum NvKmsInputColorSpace colorSpace)
static const char* nv_get_input_colorspace_name(
enum nv_drm_input_color_space colorSpace)
{
switch (colorSpace) {
case NVKMS_INPUT_COLORSPACE_NONE:
case NV_DRM_INPUT_COLOR_SPACE_NONE:
return "None";
case NVKMS_INPUT_COLORSPACE_SCRGB_LINEAR:
case NV_DRM_INPUT_COLOR_SPACE_SCRGB_LINEAR:
return "scRGB Linear FP16";
case NVKMS_INPUT_COLORSPACE_BT2100_PQ:
case NV_DRM_INPUT_COLOR_SPACE_BT2100_PQ:
return "BT.2100 PQ";
default:
/* We shoudn't hit this */
@@ -271,6 +287,123 @@ done:
mutex_unlock(&nv_dev->lock);
}
struct nv_drm_mst_display_info {
NvKmsKapiDisplay handle;
NvBool isDpMST;
char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH];
};
/*
* Helper function to get DpMST display info.
* dpMSTDisplayInfos is allocated dynamically,
* so it needs to be freed after finishing the query.
*/
static int nv_drm_get_mst_display_infos
(
struct nv_drm_device *nv_dev,
NvKmsKapiDisplay hDisplay,
struct nv_drm_mst_display_info **dpMSTDisplayInfos,
NvU32 *nDynamicDisplays
)
{
struct NvKmsKapiStaticDisplayInfo *displayInfo = NULL;
struct NvKmsKapiStaticDisplayInfo *dynamicDisplayInfo = NULL;
struct NvKmsKapiConnectorInfo *connectorInfo = NULL;
struct nv_drm_mst_display_info *displayInfos = NULL;
NvU32 i = 0;
int ret = 0;
NVDpyId dpyId;
*nDynamicDisplays = 0;
/* Query NvKmsKapiStaticDisplayInfo and NvKmsKapiConnectorInfo */
if ((displayInfo = nv_drm_calloc(1, sizeof(*displayInfo))) == NULL) {
ret = -ENOMEM;
goto done;
}
if ((dynamicDisplayInfo = nv_drm_calloc(1, sizeof(*dynamicDisplayInfo))) == NULL) {
ret = -ENOMEM;
goto done;
}
if (!nvKms->getStaticDisplayInfo(nv_dev->pDevice, hDisplay, displayInfo)) {
ret = -EINVAL;
goto done;
}
connectorInfo = nvkms_get_connector_info(nv_dev->pDevice,
displayInfo->connectorHandle);
if (IS_ERR(connectorInfo)) {
ret = PTR_ERR(connectorInfo);
goto done;
}
*nDynamicDisplays = nvCountDpyIdsInDpyIdList(connectorInfo->dynamicDpyIdList);
if (*nDynamicDisplays == 0) {
goto done;
}
if ((displayInfos = nv_drm_calloc(*nDynamicDisplays, sizeof(*displayInfos))) == NULL) {
ret = -ENOMEM;
goto done;
}
FOR_ALL_DPY_IDS(dpyId, connectorInfo->dynamicDpyIdList) {
if (!nvKms->getStaticDisplayInfo(nv_dev->pDevice,
nvDpyIdToNvU32(dpyId),
dynamicDisplayInfo)) {
ret = -EINVAL;
nv_drm_free(displayInfos);
goto done;
}
displayInfos[i].handle = dynamicDisplayInfo->handle;
displayInfos[i].isDpMST = dynamicDisplayInfo->isDpMST;
memcpy(displayInfos[i].dpAddress, dynamicDisplayInfo->dpAddress, sizeof(dynamicDisplayInfo->dpAddress));
i++;
}
*dpMSTDisplayInfos = displayInfos;
done:
nv_drm_free(displayInfo);
nv_drm_free(dynamicDisplayInfo);
nv_drm_free(connectorInfo);
return ret;
}
static int nv_drm_disp_cmp (const void *l, const void *r)
{
struct nv_drm_mst_display_info *l_info = (struct nv_drm_mst_display_info *)l;
struct nv_drm_mst_display_info *r_info = (struct nv_drm_mst_display_info *)r;
return strcmp(l_info->dpAddress, r_info->dpAddress);
}
/*
* Helper function to sort the dpAddress in terms of string.
* This function is to create DRM connectors ID order deterministically.
* It's not numerically.
*/
static void nv_drm_sort_dynamic_displays_by_dp_addr
(
struct nv_drm_mst_display_info *infos,
int nDynamicDisplays
)
{
sort(infos, nDynamicDisplays, sizeof(*infos), nv_drm_disp_cmp, NULL);
}
/*
* Helper function to initialize drm_device::mode_config from
* NvKmsKapiDevice's resource information.
@@ -352,9 +485,11 @@ static void nv_drm_enumerate_encoders_and_connectors
nv_dev,
"Failed to enumurate NvKmsKapiDisplay handles");
} else {
NvU32 i;
NvU32 i, j;
NvU32 nDynamicDisplays = 0;
for (i = 0; i < nDisplays; i++) {
struct nv_drm_mst_display_info *displayInfos = NULL;
struct drm_encoder *encoder =
nv_drm_add_encoder(dev, hDisplays[i]);
@@ -364,6 +499,34 @@ static void nv_drm_enumerate_encoders_and_connectors
"Failed to add connector for NvKmsKapiDisplay 0x%08x",
hDisplays[i]);
}
if (nv_drm_get_mst_display_infos(nv_dev, hDisplays[i],
&displayInfos, &nDynamicDisplays)) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to get dynamic displays");
} else if (nDynamicDisplays) {
nv_drm_sort_dynamic_displays_by_dp_addr(displayInfos, nDynamicDisplays);
for (j = 0; j < nDynamicDisplays; j++) {
if (displayInfos[j].isDpMST) {
struct drm_encoder *mst_encoder =
nv_drm_add_encoder(dev, displayInfos[j].handle);
NV_DRM_DEV_DEBUG_DRIVER(nv_dev, "found DP MST port display handle %u",
displayInfos[j].handle);
if (IS_ERR(mst_encoder)) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to add connector for NvKmsKapiDisplay 0x%08x",
displayInfos[j].handle);
}
}
}
nv_drm_free(displayInfos);
}
}
}
@@ -589,6 +752,7 @@ static int nv_drm_load(struct drm_device *dev, unsigned long flags)
memset(&allocateDeviceParams, 0, sizeof(allocateDeviceParams));
allocateDeviceParams.gpuId = nv_dev->gpu_info.gpu_id;
allocateDeviceParams.migDevice = nv_dev->gpu_mig_device;
allocateDeviceParams.privateData = nv_dev;
allocateDeviceParams.eventCallback = nv_drm_event_callback;
@@ -659,6 +823,9 @@ static int nv_drm_load(struct drm_device *dev, unsigned long flags)
nv_dev->requiresVrrSemaphores = resInfo.caps.requiresVrrSemaphores;
nv_dev->vtFbBaseAddress = resInfo.vtFbBaseAddress;
nv_dev->vtFbSize = resInfo.vtFbSize;
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
gen = nv_dev->pageKindGeneration;
kind = nv_dev->genericPageKind;
@@ -842,6 +1009,62 @@ static void nv_drm_master_set(struct drm_device *dev,
}
#endif
static
int nv_drm_reset_input_colorspace(struct drm_device *dev)
{
struct drm_atomic_state *state;
struct drm_plane_state *plane_state;
struct drm_plane *plane;
struct nv_drm_plane_state *nv_drm_plane_state;
struct drm_modeset_acquire_ctx ctx;
int ret = 0;
bool do_reset = false;
NvU32 flags = 0;
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
#if defined(DRM_MODESET_ACQUIRE_INTERRUPTIBLE)
flags |= DRM_MODESET_ACQUIRE_INTERRUPTIBLE;
#endif
drm_modeset_acquire_init(&ctx, flags);
state->acquire_ctx = &ctx;
nv_drm_for_each_plane(plane, dev) {
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
goto out;
}
nv_drm_plane_state = to_nv_drm_plane_state(plane_state);
if (nv_drm_plane_state) {
if (nv_drm_plane_state->input_colorspace != NV_DRM_INPUT_COLOR_SPACE_NONE) {
nv_drm_plane_state->input_colorspace = NV_DRM_INPUT_COLOR_SPACE_NONE;
do_reset = true;
}
}
}
if (do_reset) {
ret = drm_atomic_commit(state);
}
out:
#if defined(NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT)
drm_atomic_state_put(state);
#else
// In case of success, drm_atomic_commit() takes care to cleanup and free state.
if (ret != 0) {
drm_atomic_state_free(state);
}
#endif
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
}
#if defined(NV_DRM_MASTER_DROP_HAS_FROM_RELEASE_ARG)
static
@@ -885,6 +1108,12 @@ void nv_drm_master_drop(struct drm_device *dev, struct drm_file *file_priv)
drm_modeset_unlock_all(dev);
nvKms->releaseOwnership(nv_dev->pDevice);
} else {
int err = nv_drm_reset_input_colorspace(dev);
if (err != 0) {
NV_DRM_DEV_LOG_WARN(nv_dev,
"nv_drm_reset_input_colorspace failed with error code: %d !", err);
}
}
}
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
@@ -922,6 +1151,7 @@ static int nv_drm_get_dev_info_ioctl(struct drm_device *dev,
}
params->gpu_id = nv_dev->gpu_info.gpu_id;
params->mig_device = nv_dev->gpu_mig_device;
params->primary_index = dev->primary->index;
params->supports_alloc = false;
params->generic_page_kind = 0;
@@ -1712,7 +1942,7 @@ static const struct file_operations nv_drm_fops = {
.llseek = noop_llseek,
#if defined(NV_FILE_OPERATIONS_FOP_UNSIGNED_OFFSET_PRESENT)
#if defined(FOP_UNSIGNED_OFFSET)
.fop_flags = FOP_UNSIGNED_OFFSET,
#endif
};
@@ -1904,13 +2134,20 @@ static struct drm_driver nv_drm_driver = {
.name = "nvidia-drm",
.desc = "NVIDIA DRM driver",
#if defined(NV_DRM_DRIVER_HAS_DATE)
.date = "20160202",
#endif
#if defined(NV_DRM_DRIVER_HAS_DEVICE_LIST)
.device_list = LIST_HEAD_INIT(nv_drm_driver.device_list),
#elif defined(NV_DRM_DRIVER_HAS_LEGACY_DEV_LIST)
.legacy_dev_list = LIST_HEAD_INIT(nv_drm_driver.legacy_dev_list),
#endif
// XXX implement nvidia-drm's own .fbdev_probe callback that uses NVKMS kapi directly
#if defined(NV_DRM_FBDEV_AVAILABLE) && defined(DRM_FBDEV_TTM_DRIVER_OPS)
DRM_FBDEV_TTM_DRIVER_OPS,
#endif
};
@@ -1947,16 +2184,16 @@ void nv_drm_update_drm_driver_features(void)
/*
* Helper function for allocate/register DRM device for given NVIDIA GPU ID.
*/
void nv_drm_register_drm_device(const nv_gpu_info_t *gpu_info)
void nv_drm_register_drm_device(const struct NvKmsKapiGpuInfo *gpu_info)
{
struct nv_drm_device *nv_dev = NULL;
struct drm_device *dev = NULL;
struct device *device = gpu_info->os_device_ptr;
struct device *device = gpu_info->gpuInfo.os_device_ptr;
bool bus_is_pci;
DRM_DEBUG(
"Registering device for NVIDIA GPU ID 0x08%x",
gpu_info->gpu_id);
gpu_info->gpuInfo.gpu_id);
/* Allocate NVIDIA-DRM device */
@@ -1968,7 +2205,8 @@ void nv_drm_register_drm_device(const nv_gpu_info_t *gpu_info)
return;
}
nv_dev->gpu_info = *gpu_info;
nv_dev->gpu_info = gpu_info->gpuInfo;
nv_dev->gpu_mig_device = gpu_info->migDevice;
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
mutex_init(&nv_dev->lock);
@@ -2013,14 +2251,43 @@ void nv_drm_register_drm_device(const nv_gpu_info_t *gpu_info)
if (bus_is_pci) {
struct pci_dev *pdev = to_pci_dev(device);
#if defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
#if defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_HAS_DRIVER_ARG)
drm_aperture_remove_conflicting_pci_framebuffers(pdev, &nv_drm_driver);
#else
drm_aperture_remove_conflicting_pci_framebuffers(pdev, nv_drm_driver.name);
#endif
#elif defined(NV_APERTURE_REMOVE_CONFLICTING_PCI_DEVICES_PRESENT)
aperture_remove_conflicting_pci_devices(pdev, nv_drm_driver.name);
#endif
nvKms->framebufferConsoleDisabled(nv_dev->pDevice);
} else {
resource_size_t base = (resource_size_t) nv_dev->vtFbBaseAddress;
resource_size_t size = (resource_size_t) nv_dev->vtFbSize;
if (base > 0 && size > 0) {
#if defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_FRAMEBUFFERS_PRESENT)
#if defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_FRAMEBUFFERS_HAS_DRIVER_ARG)
drm_aperture_remove_conflicting_framebuffers(base, size, false, &nv_drm_driver);
#elif defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_FRAMEBUFFERS_HAS_NO_PRIMARY_ARG)
drm_aperture_remove_conflicting_framebuffers(base, size, &nv_drm_driver);
#else
drm_aperture_remove_conflicting_framebuffers(base, size, false, nv_drm_driver.name);
#endif
#elif defined(NV_APERTURE_REMOVE_CONFLICTING_DEVICES_PRESENT)
aperture_remove_conflicting_devices(base, size, nv_drm_driver.name);
#endif
} else {
NV_DRM_DEV_LOG_INFO(nv_dev, "Invalid framebuffer console info");
}
}
#if defined(NV_DRM_FBDEV_TTM_AVAILABLE)
#if defined(NV_DRM_CLIENT_AVAILABLE)
drm_client_setup(dev, NULL);
#elif defined(NV_DRM_FBDEV_TTM_AVAILABLE)
drm_fbdev_ttm_setup(dev, 32);
#elif defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
drm_fbdev_generic_setup(dev, 32);
@@ -2050,7 +2317,7 @@ failed_drm_alloc:
#if defined(NV_LINUX)
int nv_drm_probe_devices(void)
{
nv_gpu_info_t *gpu_info = NULL;
struct NvKmsKapiGpuInfo *gpu_info = NULL;
NvU32 gpu_count = 0;
NvU32 i;

View File

@@ -27,13 +27,15 @@
#if defined(NV_DRM_AVAILABLE)
struct NvKmsKapiGpuInfo;
int nv_drm_probe_devices(void);
void nv_drm_remove_devices(void);
void nv_drm_suspend_resume(NvBool suspend);
void nv_drm_register_drm_device(const nv_gpu_info_t *);
void nv_drm_register_drm_device(const struct NvKmsKapiGpuInfo *);
void nv_drm_update_drm_driver_features(void);

View File

@@ -319,7 +319,7 @@ void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev,
nv_encoder = get_nv_encoder_from_nvkms_display(dev, hDisplay);
if (nv_encoder != NULL) {
NV_DRM_DEV_LOG_ERR(
NV_DRM_DEV_LOG_INFO(
nv_dev,
"Encoder with NvKmsKapiDisplay 0x%08x already exists.",
hDisplay);

View File

@@ -161,6 +161,20 @@ static int nv_drm_framebuffer_init(struct drm_device *dev,
params.planes[i].memory = nv_gem->pMemory;
params.planes[i].offset = fb->offsets[i];
params.planes[i].pitch = fb->pitches[i];
/*
* XXX Use drm_framebuffer_funcs.dirty and
* drm_fb_helper_funcs.fb_dirty instead
*
* Currently using noDisplayCaching when registering surfaces with
* NVKMS that are using memory allocated through the DRM
* Dumb-Buffers API. This prevents Display Idle Frame Rate from
* kicking in and preventing CPU updates to the surface memory from
* not being reflected on the display. Ideally, DIFR would be
* dynamically disabled whenever a user of the memory blits to the
* frontbuffer. DRM provides the needed callbacks to achieve this.
*/
params.noDisplayCaching |= !!nv_gem->is_drm_dumb;
}
}
params.height = fb->height;
@@ -188,6 +202,43 @@ static int nv_drm_framebuffer_init(struct drm_device *dev,
params.explicit_layout = false;
}
/*
* XXX work around an invalid pitch assumption in DRM.
*
* The smallest pitch the display hardware allows is 256.
*
* If a DRM client allocates a 32x32 cursor surface through
* DRM_IOCTL_MODE_CREATE_DUMB, we'll correctly round the pitch to 256:
*
* pitch = round(32width * 4Bpp, 256) = 256
*
* and then allocate an 8k surface:
*
* size = pitch * 32height = 8196
*
* and report the rounded pitch and size back to the client through the
* struct drm_mode_create_dumb ioctl params.
*
* But when the DRM client passes that buffer object handle to
* DRM_IOCTL_MODE_CURSOR, the client has no way to specify the pitch. This
* path in drm:
*
* DRM_IOCTL_MODE_CURSOR
* drm_mode_cursor_ioctl()
* drm_mode_cursor_common()
* drm_mode_cursor_universal()
*
* will implicitly create a framebuffer from the buffer object, and compute
* the pitch as width x 32 (without aligning to our minimum pitch).
*
* Intercept this case and force the pitch back to 256.
*/
if ((params.width == 32) &&
(params.height == 32) &&
(params.planes[0].pitch == 128)) {
params.planes[0].pitch = 256;
}
/* Create NvKmsKapiSurface */
nv_fb->pSurface = nvKms->createSurface(nv_dev->pDevice, &params);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2025, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -1132,7 +1132,7 @@ static void __nv_drm_semsurf_fence_ctx_destroy(
*/
nv_drm_workthread_shutdown(&ctx->worker);
nv_drm_del_timer_sync(&ctx->timer);
nv_timer_delete_sync(&ctx->timer.kernel_timer);
/*
* The semaphore surface could still be sending callbacks, so it is still

View File

@@ -166,4 +166,37 @@ uint32_t *nv_drm_format_array_alloc(
return array;
}
bool nv_drm_format_is_yuv(u32 format)
{
#if defined(NV_DRM_FORMAT_INFO_HAS_IS_YUV)
const struct drm_format_info *format_info = drm_format_info(format);
return (format_info != NULL) && format_info->is_yuv;
#else
switch (format) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
#if defined(DRM_FORMAT_P210)
case DRM_FORMAT_P210:
#endif
#if defined(DRM_FORMAT_P010)
case DRM_FORMAT_P010:
#endif
#if defined(DRM_FORMAT_P012)
case DRM_FORMAT_P012:
#endif
return true;
default:
return false;
}
#endif
}
#endif

View File

@@ -38,6 +38,8 @@ uint32_t *nv_drm_format_array_alloc(
unsigned int *count,
const long unsigned int nvkms_format_mask);
bool nv_drm_format_is_yuv(u32 format);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* __NVIDIA_DRM_FORMAT_H__ */

View File

@@ -167,7 +167,7 @@ static int __nv_drm_gem_nvkms_map(
goto done;
}
if (!nv_dev->hasVideoMemory) {
if (!nvKms->isVidmem(pMemory)) {
goto done;
}
@@ -218,11 +218,13 @@ static void *__nv_drm_gem_nvkms_prime_vmap(
/*
* If this buffer isn't physically mapped, it might be backed by struct
* pages. Use vmap in that case.
* pages. Use vmap in that case. Do a noncached mapping for system memory
* as display is non io-coherent device in case of Tegra.
*/
if (nv_nvkms_memory->pages_count > 0) {
return nv_drm_vmap(nv_nvkms_memory->pages,
nv_nvkms_memory->pages_count);
nv_nvkms_memory->pages_count,
false);
}
return ERR_PTR(-ENOMEM);
@@ -306,12 +308,12 @@ static int __nv_drm_nvkms_gem_obj_init(
nv_nvkms_memory->pWriteCombinedIORemapAddress = NULL;
nv_nvkms_memory->physically_mapped = false;
if (!nvKms->getMemoryPages(nv_dev->pDevice,
if (!nvKms->isVidmem(pMemory) &&
!nvKms->getMemoryPages(nv_dev->pDevice,
pMemory,
&pages,
&numPages) &&
!nv_dev->hasVideoMemory) {
/* GetMemoryPages may fail for vidmem allocations,
&numPages)) {
/* GetMemoryPages will fail for vidmem allocations,
* but it should not fail for sysmem allocations. */
NV_DRM_DEV_LOG_ERR(nv_dev,
"Failed to get memory pages for NvKmsKapiMemory 0x%p",
@@ -383,6 +385,8 @@ int nv_drm_dumb_create(
goto nvkms_gem_obj_init_failed;
}
nv_nvkms_memory->base.is_drm_dumb = true;
/* Always map dumb buffer memory up front. Clients are only expected
* to use dumb buffers for software rendering, so they're not much use
* without a CPU mapping.

View File

@@ -72,7 +72,8 @@ static void *__nv_drm_gem_user_memory_prime_vmap(
struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
return nv_drm_vmap(nv_user_memory->pages,
nv_user_memory->pages_count);
nv_user_memory->pages_count,
true);
}
static void __nv_drm_gem_user_memory_prime_vunmap(

View File

@@ -172,8 +172,11 @@ struct drm_gem_object *nv_drm_gem_prime_import(struct drm_device *dev,
*/
gem_dst = nv_gem_src->ops->prime_dup(dev, nv_gem_src);
if (gem_dst)
return gem_dst;
if (gem_dst == NULL) {
return ERR_PTR(-ENOTSUPP);
}
return gem_dst;
}
}
#endif /* NV_DMA_BUF_OWNER_PRESENT */

View File

@@ -73,6 +73,8 @@ struct nv_drm_gem_object {
struct NvKmsKapiMemory *pMemory;
bool is_drm_dumb;
#if defined(NV_DRM_FENCE_AVAILABLE) && !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
nv_dma_resv_t resv;
#endif

View File

@@ -69,6 +69,13 @@
#endif //NV_DRM_ROTATION_AVAILABLE
/*
* Commit 1e13c5644c44 ("drm/drm_mode_object: increase max objects to
* accommodate new color props") in Linux v6.8 increased the pre-object
* property limit to from 24 to 64.
*/
#define NV_DRM_USE_EXTENDED_PROPERTIES (DRM_OBJECT_MAX_PROPERTY >= 64)
/*
* drm_dev_put() is added by commit 9a96f55034e41b4e002b767e9218d55f03bdff7d
* (2017-09-26) and drm_dev_unref() is removed by

View File

@@ -182,6 +182,7 @@ struct drm_nvidia_gem_import_userspace_memory_params {
struct drm_nvidia_get_dev_info_params {
uint32_t gpu_id; /* OUT */
uint32_t mig_device; /* OUT */
uint32_t primary_index; /* OUT; the "card%d" value */
uint32_t supports_alloc; /* OUT */

View File

@@ -37,7 +37,7 @@ module_param_named(modeset, nv_drm_modeset_module_param, bool, 0400);
#if defined(NV_DRM_FBDEV_AVAILABLE)
MODULE_PARM_DESC(
fbdev,
"Create a framebuffer device (1 = enable, 0 = disable (default)) (EXPERIMENTAL)");
"Create a framebuffer device (1 = enable (default), 0 = disable)");
module_param_named(fbdev, nv_drm_fbdev_module_param, bool, 0400);
#endif

View File

@@ -677,6 +677,33 @@ int nv_drm_atomic_commit(struct drm_device *dev,
"Flip event timeout on head %u", nv_crtc->head);
}
}
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
/*
* If the legacy LUT needs to be updated, ensure that the previous LUT
* update is complete first.
*/
if (crtc_state->color_mgmt_changed) {
NvBool complete = nvKms->checkLutNotifier(nv_dev->pDevice,
nv_crtc->head,
!nonblock /* waitForCompletion */);
/* If checking the LUT notifier failed, assume no LUT notifier is set. */
if (!complete) {
if (nonblock) {
return -EBUSY;
} else {
/*
* checkLutNotifier should wait on the notifier in this
* case, so we should only get here if the wait timed out.
*/
NV_DRM_DEV_LOG_ERR(
nv_dev,
"LUT notifier timeout on head %u", nv_crtc->head);
}
}
}
#endif
}
#if defined(NV_DRM_ATOMIC_HELPER_SWAP_STATE_HAS_STALL_ARG)
@@ -803,6 +830,19 @@ int nv_drm_atomic_commit(struct drm_device *dev,
__nv_drm_handle_flip_event(nv_crtc);
}
}
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
if (crtc_state->color_mgmt_changed) {
NvBool complete = nvKms->checkLutNotifier(nv_dev->pDevice,
nv_crtc->head,
true /* waitForCompletion */);
if (!complete) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"LUT notifier timeout on head %u", nv_crtc->head);
}
}
#endif
}
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2023, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015-2025, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -42,7 +42,7 @@
#endif
bool nv_drm_modeset_module_param = false;
bool nv_drm_fbdev_module_param = false;
bool nv_drm_fbdev_module_param = true;
void *nv_drm_calloc(size_t nmemb, size_t size)
{
@@ -156,9 +156,15 @@ void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages)
#define VM_USERMAP 0
#endif
void *nv_drm_vmap(struct page **pages, unsigned long pages_count)
void *nv_drm_vmap(struct page **pages, unsigned long pages_count, bool cached)
{
return vmap(pages, pages_count, VM_USERMAP, PAGE_KERNEL);
pgprot_t prot = PAGE_KERNEL;
if (!cached) {
prot = pgprot_noncached(PAGE_KERNEL);
}
return vmap(pages, pages_count, VM_USERMAP, prot);
}
void nv_drm_vunmap(void *address)
@@ -230,15 +236,6 @@ unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms)
return jiffies + msecs_to_jiffies(relative_timeout_ms);
}
bool nv_drm_del_timer_sync(nv_drm_timer *timer)
{
if (del_timer_sync(&timer->kernel_timer)) {
return true;
} else {
return false;
}
}
#if defined(NV_DRM_FENCE_AVAILABLE)
int nv_drm_create_sync_file(nv_dma_fence_t *fence)
{

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015-2025, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -58,16 +58,6 @@ typedef struct nv_timer nv_drm_timer;
#error "Need to define kernel timer callback primitives for this OS"
#endif
#if defined(NV_DRM_FBDEV_GENERIC_SETUP_PRESENT) && defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
#define NV_DRM_FBDEV_AVAILABLE
#define NV_DRM_FBDEV_GENERIC_AVAILABLE
#endif
#if defined(NV_DRM_FBDEV_TTM_SETUP_PRESENT) && defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
#define NV_DRM_FBDEV_AVAILABLE
#define NV_DRM_FBDEV_TTM_AVAILABLE
#endif
struct page;
/* Set to true when the atomic modeset feature is enabled. */
@@ -90,7 +80,7 @@ int nv_drm_lock_user_pages(unsigned long address,
void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages);
void *nv_drm_vmap(struct page **pages, unsigned long pages_count);
void *nv_drm_vmap(struct page **pages, unsigned long pages_count, bool cached);
void nv_drm_vunmap(void *address);
@@ -111,8 +101,6 @@ void nv_drm_timer_setup(nv_drm_timer *timer,
void nv_drm_mod_timer(nv_drm_timer *timer, unsigned long relative_timeout_ms);
bool nv_drm_del_timer_sync(nv_drm_timer *timer);
unsigned long nv_drm_timer_now(void);
unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms);

View File

@@ -85,8 +85,15 @@
DRM_DEBUG_DRIVER("[GPU ID 0x%08x] " __fmt, \
__dev->gpu_info.gpu_id, ##__VA_ARGS__)
enum nv_drm_input_color_space {
NV_DRM_INPUT_COLOR_SPACE_NONE,
NV_DRM_INPUT_COLOR_SPACE_SCRGB_LINEAR,
NV_DRM_INPUT_COLOR_SPACE_BT2100_PQ
};
struct nv_drm_device {
nv_gpu_info_t gpu_info;
MIGDeviceId gpu_mig_device;
struct drm_device *dev;
@@ -182,6 +189,9 @@ struct nv_drm_device {
struct drm_property *nv_crtc_regamma_divisor_property;
struct nv_drm_device *next;
NvU64 vtFbBaseAddress;
NvU64 vtFbSize;
};
static inline NvU32 nv_drm_next_display_semaphore(

View File

@@ -37,6 +37,8 @@ NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_dec_and_test
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_alpha_blending_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_fd_to_handle
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_handle_to_fd
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_timer_delete_sync
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl___vma_start_write
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_unref
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_reinit_primary_mode_group
@@ -65,12 +67,17 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_fence_set_error
NV_CONFTEST_FUNCTION_COMPILE_TESTS += fence_set_error
NV_CONFTEST_FUNCTION_COMPILE_TESTS += sync_file_get_fence
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_aperture_remove_conflicting_framebuffers
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffers
NV_CONFTEST_FUNCTION_COMPILE_TESTS += aperture_remove_conflicting_devices
NV_CONFTEST_FUNCTION_COMPILE_TESTS += aperture_remove_conflicting_pci_devices
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_fbdev_generic_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_fbdev_ttm_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_client_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_attach_hdr_output_metadata_property
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_helper_crtc_enable_color_mgmt
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_crtc_enable_color_mgmt
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_plane_create_color_properties
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_helper_legacy_gamma_set
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_mixed
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pfn_to_pfn_t
@@ -130,6 +137,8 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_put
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_dumb_destroy
NV_CONFTEST_TYPE_COMPILE_TESTS += fence_ops_use_64bit_seqno
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_framebuffers_has_driver_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_framebuffers_has_no_primary_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffers_has_driver_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_create_dp_colorspace_property_has_supported_colorspaces_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_syncobj_features_present
@@ -137,7 +146,9 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += drm_unlocked_ioctl_flag_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_framebuffer_obj_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_color_ctm_3x4_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_color_lut
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_format_info_has_is_yuv
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_property_blob_put
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_mmap
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_output_poll_changed
NV_CONFTEST_TYPE_COMPILE_TESTS += file_operations_fop_unsigned_offset_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_date
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_helper_funcs_mode_valid_has_const_mode_arg

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2015-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -53,6 +53,7 @@
#include "nv-procfs.h"
#include "nv-kthread-q.h"
#include "nv-time.h"
#include "nv-timer.h"
#include "nv-lock.h"
#include "nv-chardev-numbers.h"
@@ -89,6 +90,9 @@ module_param_named(opportunistic_display_sync, opportunistic_display_sync, bool,
static enum NvKmsDebugForceColorSpace debug_force_color_space = NVKMS_DEBUG_FORCE_COLOR_SPACE_NONE;
module_param_named(debug_force_color_space, debug_force_color_space, uint, 0400);
static bool enable_overlay_layers = true;
module_param_named(enable_overlay_layers, enable_overlay_layers, bool, 0400);
/* These parameters are used for fault injection tests. Normally the defaults
* should be used. */
MODULE_PARM_DESC(fail_malloc, "Fail the Nth call to nvkms_alloc");
@@ -99,19 +103,50 @@ MODULE_PARM_DESC(malloc_verbose, "Report information about malloc calls on modul
static bool malloc_verbose = false;
module_param_named(malloc_verbose, malloc_verbose, bool, 0400);
MODULE_PARM_DESC(conceal_vrr_caps,
"Conceal all display VRR capabilities");
static bool conceal_vrr_caps = false;
module_param_named(conceal_vrr_caps, conceal_vrr_caps, bool, 0400);
/* Fail allocating the RM core channel for NVKMS using the i-th method (see
* FailAllocCoreChannelMethod). Failures not using the i-th method are ignored. */
MODULE_PARM_DESC(fail_alloc_core_channel, "Control testing for hardware core channel allocation failure");
static int fail_alloc_core_channel_method = -1;
module_param_named(fail_alloc_core_channel, fail_alloc_core_channel_method, int, 0400);
#if NVKMS_CONFIG_FILE_SUPPORTED
/* This parameter is used to find the dpy override conf file */
#define NVKMS_CONF_FILE_SPECIFIED (nvkms_conf != NULL)
MODULE_PARM_DESC(config_file,
"Path to the nvidia-modeset configuration file "
"(default: disabled)");
"Path to the nvidia-modeset configuration file (default: disabled)");
static char *nvkms_conf = NULL;
module_param_named(config_file, nvkms_conf, charp, 0400);
#endif
static atomic_t nvkms_alloc_called_count;
NvBool nvkms_test_fail_alloc_core_channel(
enum FailAllocCoreChannelMethod method
)
{
if (method != fail_alloc_core_channel_method) {
// don't fail if it's not the currently specified method
return NV_FALSE;
}
printk(KERN_INFO NVKMS_LOG_PREFIX
"Failing core channel allocation using method %d",
fail_alloc_core_channel_method);
return NV_TRUE;
}
NvBool nvkms_conceal_vrr_caps(void)
{
return conceal_vrr_caps;
}
NvBool nvkms_output_rounding_fix(void)
{
return output_rounding_fix;
@@ -150,6 +185,11 @@ enum NvKmsDebugForceColorSpace nvkms_debug_force_color_space(void)
return debug_force_color_space;
}
NvBool nvkms_enable_overlay_layers(void)
{
return enable_overlay_layers;
}
NvBool nvkms_kernel_supports_syncpts(void)
{
/*
@@ -709,7 +749,7 @@ static void nvkms_kthread_q_callback(void *arg)
* pending timers and than waiting for workqueue callbacks.
*/
if (timer->kernel_timer_created) {
del_timer_sync(&timer->kernel_timer);
nv_timer_delete_sync(&timer->kernel_timer);
}
/*
@@ -1021,6 +1061,11 @@ nvkms_register_backlight(NvU32 gpu_id, NvU32 display_id, void *drv_priv,
#if defined(NV_ACPI_VIDEO_BACKLIGHT_USE_NATIVE)
if (!acpi_video_backlight_use_native()) {
#if defined(NV_ACPI_VIDEO_REGISTER_BACKLIGHT)
nvkms_log(NVKMS_LOG_LEVEL_INFO, NVKMS_LOG_PREFIX,
"ACPI reported no NVIDIA native backlight available; attempting to use ACPI backlight.");
acpi_video_register_backlight();
#endif
return NULL;
}
#endif
@@ -1463,6 +1508,8 @@ static size_t nvkms_config_file_open
loff_t pos = 0;
#endif
*buff = NULL;
if (!nvkms_fs_mounted()) {
printk(KERN_ERR NVKMS_LOG_PREFIX "ERROR: Filesystems not mounted\n");
return 0;
@@ -1486,6 +1533,11 @@ static size_t nvkms_config_file_open
goto done;
}
// Do not alloc a 0 sized buffer
if (file_size == 0) {
goto done;
}
*buff = nvkms_alloc(file_size, NV_FALSE);
if (*buff == NULL) {
printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: Out of memory\n");
@@ -1881,7 +1933,11 @@ restart:
* completion, and we wait for queue completion with
* nv_kthread_q_stop below.
*/
#if !defined(NV_BSD) && NV_IS_EXPORT_SYMBOL_PRESENT_timer_delete_sync
if (timer_delete_sync(&timer->kernel_timer) == 1) {
#else
if (del_timer_sync(&timer->kernel_timer) == 1) {
#endif
/* We've deactivated timer so we need to clean after it */
list_del(&timer->timers_list);

View File

@@ -104,6 +104,13 @@ typedef struct {
} read_minval;
} NvKmsSyncPtOpParams;
enum FailAllocCoreChannelMethod {
FAIL_ALLOC_CORE_CHANNEL_RM_SETUP_CORE_CHANNEL = 0,
FAIL_ALLOC_CORE_CHANNEL_RESTORE_CONSOLE = 1,
};
NvBool nvkms_test_fail_alloc_core_channel(enum FailAllocCoreChannelMethod method);
NvBool nvkms_conceal_vrr_caps(void);
NvBool nvkms_output_rounding_fix(void);
NvBool nvkms_disable_hdmi_frl(void);
NvBool nvkms_disable_vrr_memclk_switch(void);
@@ -111,6 +118,7 @@ NvBool nvkms_hdmi_deepcolor(void);
NvBool nvkms_vblank_sem_control(void);
NvBool nvkms_opportunistic_display_sync(void);
enum NvKmsDebugForceColorSpace nvkms_debug_force_color_space(void);
NvBool nvkms_enable_overlay_layers(void);
void nvkms_call_rm (void *ops);
void* nvkms_alloc (size_t size,

View File

@@ -40,9 +40,6 @@ NV_KERNEL_MODULE_TARGETS += $(NVIDIA_MODESET_KO)
NVIDIA_MODESET_BINARY_OBJECT := $(src)/nvidia-modeset/nv-modeset-kernel.o_binary
NVIDIA_MODESET_BINARY_OBJECT_O := nvidia-modeset/nv-modeset-kernel.o
quiet_cmd_symlink = SYMLINK $@
cmd_symlink = ln -sf $< $@
targets += $(NVIDIA_MODESET_BINARY_OBJECT_O)
$(obj)/$(NVIDIA_MODESET_BINARY_OBJECT_O): $(NVIDIA_MODESET_BINARY_OBJECT) FORCE
@@ -55,7 +52,7 @@ nvidia-modeset-y += $(NVIDIA_MODESET_BINARY_OBJECT_O)
# Define nvidia-modeset.ko-specific CFLAGS.
#
NVIDIA_MODESET_CFLAGS += -I$(src)/nvidia-modeset
NVIDIA_MODESET_CFLAGS += -I$(src)/nvidia-modeset -I$(src)/common/inc
NVIDIA_MODESET_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_BUILD_MODULE_INSTANCES=0
# Some Android kernels prohibit driver use of filesystem functions like
@@ -105,4 +102,6 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_real_ts64
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64
NV_CONFTEST_FUNCTION_COMPILE_TESTS += acpi_video_backlight_use_native
NV_CONFTEST_FUNCTION_COMPILE_TESTS += acpi_video_register_backlight
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_timer_delete_sync
NV_CONFTEST_FUNCTION_COMPILE_TESTS += kernel_read_has_pointer_pos_arg

View File

@@ -189,6 +189,12 @@ int nvidia_p2p_get_pages( uint64_t p2p_token, uint32_t va_space,
struct nvidia_p2p_page_table **page_table,
void (*free_callback)(void *data), void *data);
/*
* Flags to be used with persistent APIs
*/
#define NVIDIA_P2P_FLAGS_DEFAULT 0
#define NVIDIA_P2P_FLAGS_FORCE_BAR1_MAPPING 1
/*
* @brief
* Pin and make the pages underlying a range of GPU virtual memory
@@ -212,7 +218,11 @@ int nvidia_p2p_get_pages( uint64_t p2p_token, uint32_t va_space,
* @param[out] page_table
* A pointer to an array of structures with P2P PTEs.
* @param[in] flags
* Must be set to zero for now.
* NVIDIA_P2P_FLAGS_DEFAULT:
* Default value to be used if no specific behavior is expected.
* NVIDIA_P2P_FLAGS_FORCE_BAR1_MAPPING:
* Force BAR1 mappings on certain coherent platforms,
* subject to capability and supported topology.
*
* @return
* 0 upon successful completion.
@@ -443,35 +453,19 @@ typedef struct nvidia_p2p_rsync_reg_info {
/*
* @brief
* Gets rsync (GEN-ID) register information associated with the supported
* NPUs.
*
* The caller would use the returned information {GPU device, NPU device,
* socket-id, cluster-id} to pick the optimal generation registers to issue
* RSYNC (NVLink HW flush).
*
* The interface allocates structures to return the information, hence
* nvidia_p2p_put_rsync_registers() must be called to free the structures.
*
* Note, cluster-id is hardcoded to zero as early system configurations would
* only support cluster mode i.e. all devices would share the same cluster-id
* (0). In the future, appropriate kernel support would be needed to query
* cluster-ids.
*
* @param[out] reg_info
* A pointer to the rsync reg info structure.
* This interface is no longer supported and will always return an error. It
* is left in place (for now) to allow third-party callers to build without
* any errors.
*
* @Returns
* 0 Upon successful completion. Otherwise, returns negative value.
* -ENODEV
*/
int nvidia_p2p_get_rsync_registers(nvidia_p2p_rsync_reg_info_t **reg_info);
/*
* @brief
* Frees the structures allocated by nvidia_p2p_get_rsync_registers().
*
* @param[in] reg_info
* A pointer to the rsync reg info structure.
* This interface is no longer supported. It is left in place (for now) to
* allow third-party callers to build without any errors.
*/
void nvidia_p2p_put_rsync_registers(nvidia_p2p_rsync_reg_info_t *reg_info);

View File

@@ -1,30 +1,25 @@
/*******************************************************************************
Copyright (c) 2024 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
// AUTO GENERATED -- DO NOT EDIT - this file automatically generated by refhdr2class.pl
// Command: ../../../bin/manuals/refhdr2class.pl clc365.h c365 ACCESS_COUNTER_NOTIFY_BUFFER --search_str=NV_ACCESS_COUNTER --input_file=nv_ref_dev_access_counter.h
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _clc365_h_
#define _clc365_h_

View File

@@ -1,30 +1,25 @@
/*******************************************************************************
Copyright (c) 2024 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
// AUTO GENERATED -- DO NOT EDIT - this file automatically generated by refhdr2class.pl
// Command: ../../../bin/manuals/refhdr2class.pl clc369.h c369 MMU_FAULT_BUFFER --search_str=NV_MMU_FAULT --input_file=nv_ref_dev_mmu_fault.h
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _clc369_h_
#define _clc369_h_

View File

@@ -1,26 +1,25 @@
/*******************************************************************************
Copyright (c) 2012-2015 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _clc36f_h_
#define _clc36f_h_
@@ -257,7 +256,6 @@ typedef volatile struct Nvc36fControl_struct {
#define NVC36F_CLEAR_FAULTED_TYPE 31:31
#define NVC36F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000
#define NVC36F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001
#define NVC36F_QUADRO_VERIFY (0x000000a0)
/* GPFIFO entry format */

View File

@@ -1,26 +1,25 @@
/*******************************************************************************
Copyright (c) 2012-2015 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _clc46f_h_
#define _clc46f_h_
@@ -259,7 +258,6 @@ typedef volatile struct Nvc46fControl_struct {
#define NVC46F_CLEAR_FAULTED_TYPE 31:31
#define NVC46F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000
#define NVC46F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001
#define NVC46F_QUADRO_VERIFY (0x000000a0)
/* GPFIFO entry format */

View File

@@ -1,26 +1,25 @@
/*******************************************************************************
Copyright (c) 2012-2015 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _clc56f_h_
#define _clc56f_h_
@@ -261,7 +260,6 @@ typedef volatile struct Nvc56fControl_struct {
#define NVC56F_CLEAR_FAULTED_TYPE 31:31
#define NVC56F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000
#define NVC56F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001
#define NVC56F_QUADRO_VERIFY (0x000000a0)
/* GPFIFO entry format */

View File

@@ -1,19 +1,19 @@
/*******************************************************************************
Copyright (c) 1993-2004 NVIDIA Corporation
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
@@ -21,8 +21,6 @@
*******************************************************************************/
#include "nvtypes.h"
#ifndef _clc5b5_h_
@@ -34,64 +32,6 @@ extern "C" {
#define TURING_DMA_COPY_A (0x0000C5B5)
typedef volatile struct _clc5b5_tag0 {
NvV32 Reserved00[0x40];
NvV32 Nop; // 0x00000100 - 0x00000103
NvV32 Reserved01[0xF];
NvV32 PmTrigger; // 0x00000140 - 0x00000143
NvV32 Reserved02[0x3F];
NvV32 SetSemaphoreA; // 0x00000240 - 0x00000243
NvV32 SetSemaphoreB; // 0x00000244 - 0x00000247
NvV32 SetSemaphorePayload; // 0x00000248 - 0x0000024B
NvV32 Reserved03[0x2];
NvV32 SetRenderEnableA; // 0x00000254 - 0x00000257
NvV32 SetRenderEnableB; // 0x00000258 - 0x0000025B
NvV32 SetRenderEnableC; // 0x0000025C - 0x0000025F
NvV32 SetSrcPhysMode; // 0x00000260 - 0x00000263
NvV32 SetDstPhysMode; // 0x00000264 - 0x00000267
NvV32 Reserved04[0x6];
NvV32 SetGlobalCounterUpper; // 0x00000280 - 0x00000283
NvV32 SetGlobalCounterLower; // 0x00000284 - 0x00000287
NvV32 SetPageoutStartPAUpper; // 0x00000288 - 0x0000028B
NvV32 SetPageoutStartPALower; // 0x0000028C - 0x0000028F
NvV32 Reserved05[0x1C];
NvV32 LaunchDma; // 0x00000300 - 0x00000303
NvV32 Reserved06[0x3F];
NvV32 OffsetInUpper; // 0x00000400 - 0x00000403
NvV32 OffsetInLower; // 0x00000404 - 0x00000407
NvV32 OffsetOutUpper; // 0x00000408 - 0x0000040B
NvV32 OffsetOutLower; // 0x0000040C - 0x0000040F
NvV32 PitchIn; // 0x00000410 - 0x00000413
NvV32 PitchOut; // 0x00000414 - 0x00000417
NvV32 LineLengthIn; // 0x00000418 - 0x0000041B
NvV32 LineCount; // 0x0000041C - 0x0000041F
NvV32 Reserved07[0xB8];
NvV32 SetRemapConstA; // 0x00000700 - 0x00000703
NvV32 SetRemapConstB; // 0x00000704 - 0x00000707
NvV32 SetRemapComponents; // 0x00000708 - 0x0000070B
NvV32 SetDstBlockSize; // 0x0000070C - 0x0000070F
NvV32 SetDstWidth; // 0x00000710 - 0x00000713
NvV32 SetDstHeight; // 0x00000714 - 0x00000717
NvV32 SetDstDepth; // 0x00000718 - 0x0000071B
NvV32 SetDstLayer; // 0x0000071C - 0x0000071F
NvV32 SetDstOrigin; // 0x00000720 - 0x00000723
NvV32 Reserved08[0x1];
NvV32 SetSrcBlockSize; // 0x00000728 - 0x0000072B
NvV32 SetSrcWidth; // 0x0000072C - 0x0000072F
NvV32 SetSrcHeight; // 0x00000730 - 0x00000733
NvV32 SetSrcDepth; // 0x00000734 - 0x00000737
NvV32 SetSrcLayer; // 0x00000738 - 0x0000073B
NvV32 SetSrcOrigin; // 0x0000073C - 0x0000073F
NvV32 Reserved09[0x1];
NvV32 SrcOriginX; // 0x00000744 - 0x00000747
NvV32 SrcOriginY; // 0x00000748 - 0x0000074B
NvV32 DstOriginX; // 0x0000074C - 0x0000074F
NvV32 DstOriginY; // 0x00000750 - 0x00000753
NvV32 Reserved10[0x270];
NvV32 PmTriggerEnd; // 0x00001114 - 0x00001117
NvV32 Reserved11[0x3BA];
} turing_dma_copy_aControlPio;
#define NVC5B5_NOP (0x00000100)
#define NVC5B5_NOP_PARAMETER 31:0
#define NVC5B5_PM_TRIGGER (0x00000140)
@@ -125,14 +65,6 @@ typedef volatile struct _clc5b5_tag0 {
#define NVC5B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001)
#define NVC5B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002)
#define NVC5B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2
#define NVC5B5_SET_GLOBAL_COUNTER_UPPER (0x00000280)
#define NVC5B5_SET_GLOBAL_COUNTER_UPPER_V 31:0
#define NVC5B5_SET_GLOBAL_COUNTER_LOWER (0x00000284)
#define NVC5B5_SET_GLOBAL_COUNTER_LOWER_V 31:0
#define NVC5B5_SET_PAGEOUT_START_PAUPPER (0x00000288)
#define NVC5B5_SET_PAGEOUT_START_PAUPPER_V 4:0
#define NVC5B5_SET_PAGEOUT_START_PALOWER (0x0000028C)
#define NVC5B5_SET_PAGEOUT_START_PALOWER_V 31:0
#define NVC5B5_LAUNCH_DMA (0x00000300)
#define NVC5B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0
#define NVC5B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000)
@@ -199,8 +131,6 @@ typedef volatile struct _clc5b5_tag0 {
#define NVC5B5_LAUNCH_DMA_VPRMODE 23:22
#define NVC5B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000)
#define NVC5B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001)
#define NVC5B5_LAUNCH_DMA_VPRMODE_VPR_VID2SYS (0x00000002)
#define NVC5B5_LAUNCH_DMA_VPRMODE_VPR_SYS2VID (0x00000003)
#define NVC5B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24
#define NVC5B5_LAUNCH_DMA_DISABLE_PLC 26:26
#define NVC5B5_LAUNCH_DMA_DISABLE_PLC_FALSE (0x00000000)

View File

@@ -1,19 +1,19 @@
/*******************************************************************************
Copyright (c) 1993-2004 NVIDIA Corporation
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
@@ -21,8 +21,6 @@
*******************************************************************************/
#include "nvtypes.h"
#ifndef _clc6b5_h_
@@ -34,64 +32,6 @@ extern "C" {
#define AMPERE_DMA_COPY_A (0x0000C6B5)
typedef volatile struct _clc6b5_tag0 {
NvV32 Reserved00[0x40];
NvV32 Nop; // 0x00000100 - 0x00000103
NvV32 Reserved01[0xF];
NvV32 PmTrigger; // 0x00000140 - 0x00000143
NvV32 Reserved02[0x3F];
NvV32 SetSemaphoreA; // 0x00000240 - 0x00000243
NvV32 SetSemaphoreB; // 0x00000244 - 0x00000247
NvV32 SetSemaphorePayload; // 0x00000248 - 0x0000024B
NvV32 Reserved03[0x2];
NvV32 SetRenderEnableA; // 0x00000254 - 0x00000257
NvV32 SetRenderEnableB; // 0x00000258 - 0x0000025B
NvV32 SetRenderEnableC; // 0x0000025C - 0x0000025F
NvV32 SetSrcPhysMode; // 0x00000260 - 0x00000263
NvV32 SetDstPhysMode; // 0x00000264 - 0x00000267
NvV32 Reserved04[0x6];
NvV32 SetGlobalCounterUpper; // 0x00000280 - 0x00000283
NvV32 SetGlobalCounterLower; // 0x00000284 - 0x00000287
NvV32 SetPageoutStartPAUpper; // 0x00000288 - 0x0000028B
NvV32 SetPageoutStartPALower; // 0x0000028C - 0x0000028F
NvV32 Reserved05[0x1C];
NvV32 LaunchDma; // 0x00000300 - 0x00000303
NvV32 Reserved06[0x3F];
NvV32 OffsetInUpper; // 0x00000400 - 0x00000403
NvV32 OffsetInLower; // 0x00000404 - 0x00000407
NvV32 OffsetOutUpper; // 0x00000408 - 0x0000040B
NvV32 OffsetOutLower; // 0x0000040C - 0x0000040F
NvV32 PitchIn; // 0x00000410 - 0x00000413
NvV32 PitchOut; // 0x00000414 - 0x00000417
NvV32 LineLengthIn; // 0x00000418 - 0x0000041B
NvV32 LineCount; // 0x0000041C - 0x0000041F
NvV32 Reserved07[0xB8];
NvV32 SetRemapConstA; // 0x00000700 - 0x00000703
NvV32 SetRemapConstB; // 0x00000704 - 0x00000707
NvV32 SetRemapComponents; // 0x00000708 - 0x0000070B
NvV32 SetDstBlockSize; // 0x0000070C - 0x0000070F
NvV32 SetDstWidth; // 0x00000710 - 0x00000713
NvV32 SetDstHeight; // 0x00000714 - 0x00000717
NvV32 SetDstDepth; // 0x00000718 - 0x0000071B
NvV32 SetDstLayer; // 0x0000071C - 0x0000071F
NvV32 SetDstOrigin; // 0x00000720 - 0x00000723
NvV32 Reserved08[0x1];
NvV32 SetSrcBlockSize; // 0x00000728 - 0x0000072B
NvV32 SetSrcWidth; // 0x0000072C - 0x0000072F
NvV32 SetSrcHeight; // 0x00000730 - 0x00000733
NvV32 SetSrcDepth; // 0x00000734 - 0x00000737
NvV32 SetSrcLayer; // 0x00000738 - 0x0000073B
NvV32 SetSrcOrigin; // 0x0000073C - 0x0000073F
NvV32 Reserved09[0x1];
NvV32 SrcOriginX; // 0x00000744 - 0x00000747
NvV32 SrcOriginY; // 0x00000748 - 0x0000074B
NvV32 DstOriginX; // 0x0000074C - 0x0000074F
NvV32 DstOriginY; // 0x00000750 - 0x00000753
NvV32 Reserved10[0x270];
NvV32 PmTriggerEnd; // 0x00001114 - 0x00001117
NvV32 Reserved11[0x3BA];
} ampere_dma_copy_aControlPio;
#define NVC6B5_NOP (0x00000100)
#define NVC6B5_NOP_PARAMETER 31:0
#define NVC6B5_PM_TRIGGER (0x00000140)
@@ -131,14 +71,6 @@ typedef volatile struct _clc6b5_tag0 {
#define NVC6B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2
#define NVC6B5_SET_DST_PHYS_MODE_PEER_ID 8:6
#define NVC6B5_SET_DST_PHYS_MODE_FLA 9:9
#define NVC6B5_SET_GLOBAL_COUNTER_UPPER (0x00000280)
#define NVC6B5_SET_GLOBAL_COUNTER_UPPER_V 31:0
#define NVC6B5_SET_GLOBAL_COUNTER_LOWER (0x00000284)
#define NVC6B5_SET_GLOBAL_COUNTER_LOWER_V 31:0
#define NVC6B5_SET_PAGEOUT_START_PAUPPER (0x00000288)
#define NVC6B5_SET_PAGEOUT_START_PAUPPER_V 4:0
#define NVC6B5_SET_PAGEOUT_START_PALOWER (0x0000028C)
#define NVC6B5_SET_PAGEOUT_START_PALOWER_V 31:0
#define NVC6B5_LAUNCH_DMA (0x00000300)
#define NVC6B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0
#define NVC6B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000)
@@ -199,8 +131,6 @@ typedef volatile struct _clc6b5_tag0 {
#define NVC6B5_LAUNCH_DMA_VPRMODE 23:22
#define NVC6B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000)
#define NVC6B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001)
#define NVC6B5_LAUNCH_DMA_VPRMODE_VPR_VID2SYS (0x00000002)
#define NVC6B5_LAUNCH_DMA_VPRMODE_VPR_SYS2VID (0x00000003)
#define NVC6B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24
#define NVC6B5_LAUNCH_DMA_DISABLE_PLC 26:26
#define NVC6B5_LAUNCH_DMA_DISABLE_PLC_FALSE (0x00000000)

View File

@@ -1,19 +1,19 @@
/*******************************************************************************
Copyright (c) 1993-2004 NVIDIA Corporation
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
@@ -21,8 +21,6 @@
*******************************************************************************/
#include "nvtypes.h"
#ifndef _clc7b5_h_
@@ -34,69 +32,6 @@ extern "C" {
#define AMPERE_DMA_COPY_B (0x0000C7B5)
typedef volatile struct _clc7b5_tag0 {
NvV32 Reserved00[0x40];
NvV32 Nop; // 0x00000100 - 0x00000103
NvV32 Reserved01[0xF];
NvV32 PmTrigger; // 0x00000140 - 0x00000143
NvV32 Reserved02[0x36];
NvV32 SetMonitoredFenceType; // 0x0000021C - 0x0000021F
NvV32 SetMonitoredFenceSignalAddrBaseUpper; // 0x00000220 - 0x00000223
NvV32 SetMonitoredFenceSignalAddrBaseLower; // 0x00000224 - 0x00000227
NvV32 Reserved03[0x6];
NvV32 SetSemaphoreA; // 0x00000240 - 0x00000243
NvV32 SetSemaphoreB; // 0x00000244 - 0x00000247
NvV32 SetSemaphorePayload; // 0x00000248 - 0x0000024B
NvV32 SetSemaphorePayloadUpper; // 0x0000024C - 0x0000024F
NvV32 Reserved04[0x1];
NvV32 SetRenderEnableA; // 0x00000254 - 0x00000257
NvV32 SetRenderEnableB; // 0x00000258 - 0x0000025B
NvV32 SetRenderEnableC; // 0x0000025C - 0x0000025F
NvV32 SetSrcPhysMode; // 0x00000260 - 0x00000263
NvV32 SetDstPhysMode; // 0x00000264 - 0x00000267
NvV32 Reserved05[0x6];
NvV32 SetGlobalCounterUpper; // 0x00000280 - 0x00000283
NvV32 SetGlobalCounterLower; // 0x00000284 - 0x00000287
NvV32 SetPageoutStartPAUpper; // 0x00000288 - 0x0000028B
NvV32 SetPageoutStartPALower; // 0x0000028C - 0x0000028F
NvV32 Reserved06[0x1C];
NvV32 LaunchDma; // 0x00000300 - 0x00000303
NvV32 Reserved07[0x3F];
NvV32 OffsetInUpper; // 0x00000400 - 0x00000403
NvV32 OffsetInLower; // 0x00000404 - 0x00000407
NvV32 OffsetOutUpper; // 0x00000408 - 0x0000040B
NvV32 OffsetOutLower; // 0x0000040C - 0x0000040F
NvV32 PitchIn; // 0x00000410 - 0x00000413
NvV32 PitchOut; // 0x00000414 - 0x00000417
NvV32 LineLengthIn; // 0x00000418 - 0x0000041B
NvV32 LineCount; // 0x0000041C - 0x0000041F
NvV32 Reserved08[0xB8];
NvV32 SetRemapConstA; // 0x00000700 - 0x00000703
NvV32 SetRemapConstB; // 0x00000704 - 0x00000707
NvV32 SetRemapComponents; // 0x00000708 - 0x0000070B
NvV32 SetDstBlockSize; // 0x0000070C - 0x0000070F
NvV32 SetDstWidth; // 0x00000710 - 0x00000713
NvV32 SetDstHeight; // 0x00000714 - 0x00000717
NvV32 SetDstDepth; // 0x00000718 - 0x0000071B
NvV32 SetDstLayer; // 0x0000071C - 0x0000071F
NvV32 SetDstOrigin; // 0x00000720 - 0x00000723
NvV32 Reserved09[0x1];
NvV32 SetSrcBlockSize; // 0x00000728 - 0x0000072B
NvV32 SetSrcWidth; // 0x0000072C - 0x0000072F
NvV32 SetSrcHeight; // 0x00000730 - 0x00000733
NvV32 SetSrcDepth; // 0x00000734 - 0x00000737
NvV32 SetSrcLayer; // 0x00000738 - 0x0000073B
NvV32 SetSrcOrigin; // 0x0000073C - 0x0000073F
NvV32 Reserved10[0x1];
NvV32 SrcOriginX; // 0x00000744 - 0x00000747
NvV32 SrcOriginY; // 0x00000748 - 0x0000074B
NvV32 DstOriginX; // 0x0000074C - 0x0000074F
NvV32 DstOriginY; // 0x00000750 - 0x00000753
NvV32 Reserved11[0x270];
NvV32 PmTriggerEnd; // 0x00001114 - 0x00001117
NvV32 Reserved12[0x3BA];
} ampere_dma_copy_bControlPio;
#define NVC7B5_NOP (0x00000100)
#define NVC7B5_NOP_PARAMETER 31:0
#define NVC7B5_PM_TRIGGER (0x00000140)
@@ -146,14 +81,6 @@ typedef volatile struct _clc7b5_tag0 {
#define NVC7B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2
#define NVC7B5_SET_DST_PHYS_MODE_PEER_ID 8:6
#define NVC7B5_SET_DST_PHYS_MODE_FLA 9:9
#define NVC7B5_SET_GLOBAL_COUNTER_UPPER (0x00000280)
#define NVC7B5_SET_GLOBAL_COUNTER_UPPER_V 31:0
#define NVC7B5_SET_GLOBAL_COUNTER_LOWER (0x00000284)
#define NVC7B5_SET_GLOBAL_COUNTER_LOWER_V 31:0
#define NVC7B5_SET_PAGEOUT_START_PAUPPER (0x00000288)
#define NVC7B5_SET_PAGEOUT_START_PAUPPER_V 4:0
#define NVC7B5_SET_PAGEOUT_START_PALOWER (0x0000028C)
#define NVC7B5_SET_PAGEOUT_START_PALOWER_V 31:0
#define NVC7B5_LAUNCH_DMA (0x00000300)
#define NVC7B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0
#define NVC7B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000)
@@ -223,8 +150,6 @@ typedef volatile struct _clc7b5_tag0 {
#define NVC7B5_LAUNCH_DMA_VPRMODE 23:22
#define NVC7B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000)
#define NVC7B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001)
#define NVC7B5_LAUNCH_DMA_VPRMODE_VPR_VID2SYS (0x00000002)
#define NVC7B5_LAUNCH_DMA_VPRMODE_VPR_SYS2VID (0x00000003)
#define NVC7B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24
#define NVC7B5_LAUNCH_DMA_DISABLE_PLC 26:26
#define NVC7B5_LAUNCH_DMA_DISABLE_PLC_FALSE (0x00000000)

View File

@@ -1,51 +1,31 @@
/*******************************************************************************
Copyright (c) 2012-2015 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
#ifndef _clc86f_h_
#define _clc86f_h_
#ifdef __cplusplus
extern "C" {
#endif
#include "nvtypes.h"
/* class HOPPER_CHANNEL_GPFIFO */
/*
* Documentation for HOPPER_CHANNEL_GPFIFO can be found in dev_pbdma.ref,
* chapter "User Control Registers". It is documented as device NV_UDMA.
* The GPFIFO format itself is also documented in dev_pbdma.ref,
* NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref,
* chapter "FIFO DMA RAM", NV_FIFO_DMA_*.
* SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES
* SPDX-License-Identifier: MIT
*
* Note there is no .mfs file for this class.
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __gh100_clc86f_h__
#define __gh100_clc86f_h__
#define HOPPER_CHANNEL_GPFIFO_A (0x0000C86F)
#define NVC86F_TYPEDEF HOPPER_CHANNELChannelGPFifoA
/* dma flow control data structure */
typedef volatile struct Nvc86fControl_struct {
NvU32 Ignored00[0x010]; /* 0000-003f*/
NvU32 Put; /* put offset, read/write 0040-0043*/
@@ -64,54 +44,7 @@ typedef volatile struct Nvc86fControl_struct {
NvU32 Ignored05[0x5c];
} Nvc86fControl, HopperAControlGPFifo;
/* fields and values */
#define NVC86F_NUMBER_OF_SUBCHANNELS (8)
#define NVC86F_SET_OBJECT (0x00000000)
#define NVC86F_SET_OBJECT_NVCLASS 15:0
#define NVC86F_SET_OBJECT_ENGINE 20:16
#define NVC86F_SET_OBJECT_ENGINE_SW 0x0000001f
#define NVC86F_ILLEGAL (0x00000004)
#define NVC86F_ILLEGAL_HANDLE 31:0
#define NVC86F_NOP (0x00000008)
#define NVC86F_NOP_HANDLE 31:0
#define NVC86F_SEMAPHOREA (0x00000010)
#define NVC86F_SEMAPHOREA_OFFSET_UPPER 7:0
#define NVC86F_SEMAPHOREB (0x00000014)
#define NVC86F_SEMAPHOREB_OFFSET_LOWER 31:2
#define NVC86F_SEMAPHOREC (0x00000018)
#define NVC86F_SEMAPHOREC_PAYLOAD 31:0
#define NVC86F_SEMAPHORED (0x0000001C)
#define NVC86F_SEMAPHORED_OPERATION 4:0
#define NVC86F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001
#define NVC86F_SEMAPHORED_OPERATION_RELEASE 0x00000002
#define NVC86F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004
#define NVC86F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008
#define NVC86F_SEMAPHORED_OPERATION_REDUCTION 0x00000010
#define NVC86F_SEMAPHORED_ACQUIRE_SWITCH 12:12
#define NVC86F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000
#define NVC86F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001
#define NVC86F_SEMAPHORED_RELEASE_WFI 20:20
#define NVC86F_SEMAPHORED_RELEASE_WFI_EN 0x00000000
#define NVC86F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001
#define NVC86F_SEMAPHORED_RELEASE_SIZE 24:24
#define NVC86F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000
#define NVC86F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001
#define NVC86F_SEMAPHORED_REDUCTION 30:27
#define NVC86F_SEMAPHORED_REDUCTION_MIN 0x00000000
#define NVC86F_SEMAPHORED_REDUCTION_MAX 0x00000001
#define NVC86F_SEMAPHORED_REDUCTION_XOR 0x00000002
#define NVC86F_SEMAPHORED_REDUCTION_AND 0x00000003
#define NVC86F_SEMAPHORED_REDUCTION_OR 0x00000004
#define NVC86F_SEMAPHORED_REDUCTION_ADD 0x00000005
#define NVC86F_SEMAPHORED_REDUCTION_INC 0x00000006
#define NVC86F_SEMAPHORED_REDUCTION_DEC 0x00000007
#define NVC86F_SEMAPHORED_FORMAT 31:31
#define NVC86F_SEMAPHORED_FORMAT_SIGNED 0x00000000
#define NVC86F_SEMAPHORED_FORMAT_UNSIGNED 0x00000001
#define NVC86F_NON_STALL_INTERRUPT (0x00000020)
#define NVC86F_NON_STALL_INTERRUPT_HANDLE 31:0
#define NVC86F_FB_FLUSH (0x00000024) // Deprecated - use MEMBAR TYPE SYS_MEMBAR
#define NVC86F_FB_FLUSH_HANDLE 31:0
// NOTE - MEM_OP_A and MEM_OP_B have been replaced in gp100 with methods for
// specifying the page address for a targeted TLB invalidate and the uTLB for
// a targeted REPLAY_CANCEL for UVM.
@@ -206,67 +139,31 @@ typedef volatile struct Nvc86fControl_struct {
#define NVC86F_MEM_OP_D_MMU_OPERATION_TYPE 23:20
#define NVC86F_MEM_OP_D_MMU_OPERATION_TYPE_RESERVED 0x00000000
#define NVC86F_MEM_OP_D_MMU_OPERATION_TYPE_VIDMEM_ACCESS_BIT_DUMP 0x00000001
#define NVC86F_SET_REFERENCE (0x00000050)
#define NVC86F_SET_REFERENCE_COUNT 31:0
#define NVC86F_SEM_ADDR_LO (0x0000005c)
#define NVC86F_SEM_ADDR_LO_OFFSET 31:2
#define NVC86F_SEM_ADDR_HI (0x00000060)
#define NVC86F_SEM_ADDR_HI_OFFSET 24:0
#define NVC86F_SEM_PAYLOAD_LO (0x00000064)
#define NVC86F_SEM_PAYLOAD_LO_PAYLOAD 31:0
#define NVC86F_SEM_PAYLOAD_HI (0x00000068)
#define NVC86F_SEM_PAYLOAD_HI_PAYLOAD 31:0
#define NVC86F_SEM_EXECUTE (0x0000006c)
#define NVC86F_SEM_EXECUTE_OPERATION 2:0
#define NVC86F_SEM_EXECUTE_OPERATION_ACQUIRE 0x00000000
#define NVC86F_SEM_EXECUTE_OPERATION_RELEASE 0x00000001
#define NVC86F_SEM_EXECUTE_OPERATION_ACQ_STRICT_GEQ 0x00000002
#define NVC86F_SEM_EXECUTE_OPERATION_ACQ_CIRC_GEQ 0x00000003
#define NVC86F_SEM_EXECUTE_OPERATION_ACQ_AND 0x00000004
#define NVC86F_SEM_EXECUTE_OPERATION_ACQ_NOR 0x00000005
#define NVC86F_SEM_EXECUTE_OPERATION_REDUCTION 0x00000006
#define NVC86F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG 12:12
#define NVC86F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_DIS 0x00000000
#define NVC86F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_EN 0x00000001
#define NVC86F_SEM_EXECUTE_RELEASE_WFI 20:20
#define NVC86F_SEM_EXECUTE_RELEASE_WFI_DIS 0x00000000
#define NVC86F_SEM_EXECUTE_RELEASE_WFI_EN 0x00000001
#define NVC86F_SEM_EXECUTE_PAYLOAD_SIZE 24:24
#define NVC86F_SEM_EXECUTE_PAYLOAD_SIZE_32BIT 0x00000000
#define NVC86F_SEM_EXECUTE_PAYLOAD_SIZE_64BIT 0x00000001
#define NVC86F_SEM_EXECUTE_RELEASE_TIMESTAMP 25:25
#define NVC86F_SEM_EXECUTE_RELEASE_TIMESTAMP_DIS 0x00000000
#define NVC86F_SEM_EXECUTE_RELEASE_TIMESTAMP_EN 0x00000001
#define NVC86F_SEM_EXECUTE_REDUCTION 30:27
#define NVC86F_SEM_EXECUTE_REDUCTION_IMIN 0x00000000
#define NVC86F_SEM_EXECUTE_REDUCTION_IMAX 0x00000001
#define NVC86F_SEM_EXECUTE_REDUCTION_IXOR 0x00000002
#define NVC86F_SEM_EXECUTE_REDUCTION_IAND 0x00000003
#define NVC86F_SEM_EXECUTE_REDUCTION_IOR 0x00000004
#define NVC86F_SEM_EXECUTE_REDUCTION_IADD 0x00000005
#define NVC86F_SEM_EXECUTE_REDUCTION_INC 0x00000006
#define NVC86F_SEM_EXECUTE_REDUCTION_DEC 0x00000007
#define NVC86F_SEM_EXECUTE_REDUCTION_FORMAT 31:31
#define NVC86F_SEM_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000000
#define NVC86F_SEM_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000001
#define NVC86F_WFI (0x00000078)
#define NVC86F_WFI_SCOPE 0:0
#define NVC86F_WFI_SCOPE_CURRENT_SCG_TYPE 0x00000000
#define NVC86F_WFI_SCOPE_CURRENT_VEID 0x00000000
#define NVC86F_WFI_SCOPE_ALL 0x00000001
#define NVC86F_YIELD (0x00000080)
#define NVC86F_YIELD_OP 1:0
#define NVC86F_YIELD_OP_NOP 0x00000000
#define NVC86F_YIELD_OP_TSG 0x00000003
#define NVC86F_CLEAR_FAULTED (0x00000084)
// Note: RM provides the HANDLE as an opaque value; the internal detail fields
// are intentionally not exposed to the driver through these defines.
#define NVC86F_CLEAR_FAULTED_HANDLE 30:0
#define NVC86F_CLEAR_FAULTED_TYPE 31:31
#define NVC86F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000
#define NVC86F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001
#define NVC86F_QUADRO_VERIFY (0x000000a0)
/* GPFIFO entry format */
#define NVC86F_GP_ENTRY__SIZE 8
@@ -291,85 +188,4 @@ typedef volatile struct Nvc86fControl_struct {
#define NVC86F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003
#define NVC86F_GP_ENTRY1_OPCODE_SET_PB_SEGMENT_EXTENDED_BASE 0x00000004
/* dma method formats */
#define NVC86F_DMA_METHOD_ADDRESS_OLD 12:2
#define NVC86F_DMA_METHOD_ADDRESS 11:0
#define NVC86F_DMA_SUBDEVICE_MASK 15:4
#define NVC86F_DMA_METHOD_SUBCHANNEL 15:13
#define NVC86F_DMA_TERT_OP 17:16
#define NVC86F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000)
#define NVC86F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001)
#define NVC86F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002)
#define NVC86F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003)
#define NVC86F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000)
#define NVC86F_DMA_METHOD_COUNT_OLD 28:18
#define NVC86F_DMA_METHOD_COUNT 28:16
#define NVC86F_DMA_IMMD_DATA 28:16
#define NVC86F_DMA_SEC_OP 31:29
#define NVC86F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000)
#define NVC86F_DMA_SEC_OP_INC_METHOD (0x00000001)
#define NVC86F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002)
#define NVC86F_DMA_SEC_OP_NON_INC_METHOD (0x00000003)
#define NVC86F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004)
#define NVC86F_DMA_SEC_OP_ONE_INC (0x00000005)
#define NVC86F_DMA_SEC_OP_RESERVED6 (0x00000006)
#define NVC86F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007)
/* dma incrementing method format */
#define NVC86F_DMA_INCR_ADDRESS 11:0
#define NVC86F_DMA_INCR_SUBCHANNEL 15:13
#define NVC86F_DMA_INCR_COUNT 28:16
#define NVC86F_DMA_INCR_OPCODE 31:29
#define NVC86F_DMA_INCR_OPCODE_VALUE (0x00000001)
#define NVC86F_DMA_INCR_DATA 31:0
/* dma non-incrementing method format */
#define NVC86F_DMA_NONINCR_ADDRESS 11:0
#define NVC86F_DMA_NONINCR_SUBCHANNEL 15:13
#define NVC86F_DMA_NONINCR_COUNT 28:16
#define NVC86F_DMA_NONINCR_OPCODE 31:29
#define NVC86F_DMA_NONINCR_OPCODE_VALUE (0x00000003)
#define NVC86F_DMA_NONINCR_DATA 31:0
/* dma increment-once method format */
#define NVC86F_DMA_ONEINCR_ADDRESS 11:0
#define NVC86F_DMA_ONEINCR_SUBCHANNEL 15:13
#define NVC86F_DMA_ONEINCR_COUNT 28:16
#define NVC86F_DMA_ONEINCR_OPCODE 31:29
#define NVC86F_DMA_ONEINCR_OPCODE_VALUE (0x00000005)
#define NVC86F_DMA_ONEINCR_DATA 31:0
/* dma no-operation format */
#define NVC86F_DMA_NOP (0x00000000)
/* dma immediate-data format */
#define NVC86F_DMA_IMMD_ADDRESS 11:0
#define NVC86F_DMA_IMMD_SUBCHANNEL 15:13
#define NVC86F_DMA_IMMD_DATA 28:16
#define NVC86F_DMA_IMMD_OPCODE 31:29
#define NVC86F_DMA_IMMD_OPCODE_VALUE (0x00000004)
/* dma set sub-device mask format */
#define NVC86F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4
#define NVC86F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16
#define NVC86F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001)
/* dma store sub-device mask format */
#define NVC86F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4
#define NVC86F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16
#define NVC86F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002)
/* dma use sub-device mask format */
#define NVC86F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16
#define NVC86F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003)
/* dma end-segment format */
#define NVC86F_DMA_ENDSEG_OPCODE 31:29
#define NVC86F_DMA_ENDSEG_OPCODE_VALUE (0x00000007)
/* dma legacy incrementing/non-incrementing formats */
#define NVC86F_DMA_ADDRESS 12:2
#define NVC86F_DMA_SUBCH 15:13
#define NVC86F_DMA_OPCODE3 17:16
#define NVC86F_DMA_OPCODE3_NONE (0x00000000)
#define NVC86F_DMA_COUNT 28:18
#define NVC86F_DMA_OPCODE 31:29
#define NVC86F_DMA_OPCODE_METHOD (0x00000000)
#define NVC86F_DMA_OPCODE_NONINC_METHOD (0x00000002)
#define NVC86F_DMA_DATA 31:0
#ifdef __cplusplus
}; /* extern "C" */
#endif
#endif /* _clc86f_h_ */
#endif // __gh100_clc86f_h__

View File

@@ -1,160 +1,46 @@
/*******************************************************************************
Copyright (c) 1993-2004 NVIDIA Corporation
/*
* SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
#ifndef __gh100_clc8b5_h__
#define __gh100_clc8b5_h__
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
#include "nvtypes.h"
#ifndef _clc8b5_h_
#define _clc8b5_h_
#ifdef __cplusplus
extern "C" {
#endif
#define HOPPER_DMA_COPY_A (0x0000C8B5)
typedef volatile struct _clc8b5_tag0 {
NvV32 Reserved00[0x40];
NvV32 Nop; // 0x00000100 - 0x00000103
NvV32 Reserved01[0xF];
NvV32 PmTrigger; // 0x00000140 - 0x00000143
NvV32 Reserved02[0x36];
NvV32 SetMonitoredFenceType; // 0x0000021C - 0x0000021F
NvV32 SetMonitoredFenceSignalAddrBaseUpper; // 0x00000220 - 0x00000223
NvV32 SetMonitoredFenceSignalAddrBaseLower; // 0x00000224 - 0x00000227
NvV32 Reserved03[0x6];
NvV32 SetSemaphoreA; // 0x00000240 - 0x00000243
NvV32 SetSemaphoreB; // 0x00000244 - 0x00000247
NvV32 SetSemaphorePayload; // 0x00000248 - 0x0000024B
NvV32 SetSemaphorePayloadUpper; // 0x0000024C - 0x0000024F
NvV32 Reserved04[0x1];
NvV32 SetRenderEnableA; // 0x00000254 - 0x00000257
NvV32 SetRenderEnableB; // 0x00000258 - 0x0000025B
NvV32 SetRenderEnableC; // 0x0000025C - 0x0000025F
NvV32 SetSrcPhysMode; // 0x00000260 - 0x00000263
NvV32 SetDstPhysMode; // 0x00000264 - 0x00000267
NvV32 Reserved05[0x26];
NvV32 LaunchDma; // 0x00000300 - 0x00000303
NvV32 Reserved06[0x3F];
NvV32 OffsetInUpper; // 0x00000400 - 0x00000403
NvV32 OffsetInLower; // 0x00000404 - 0x00000407
NvV32 OffsetOutUpper; // 0x00000408 - 0x0000040B
NvV32 OffsetOutLower; // 0x0000040C - 0x0000040F
NvV32 PitchIn; // 0x00000410 - 0x00000413
NvV32 PitchOut; // 0x00000414 - 0x00000417
NvV32 LineLengthIn; // 0x00000418 - 0x0000041B
NvV32 LineCount; // 0x0000041C - 0x0000041F
NvV32 Reserved07[0x38];
NvV32 SetSecureCopyMode; // 0x00000500 - 0x00000503
NvV32 SetDecryptIv0; // 0x00000504 - 0x00000507
NvV32 SetDecryptIv1; // 0x00000508 - 0x0000050B
NvV32 SetDecryptIv2; // 0x0000050C - 0x0000050F
NvV32 Reserved_SetAESCounter; // 0x00000510 - 0x00000513
NvV32 SetDecryptAuthTagCompareAddrUpper; // 0x00000514 - 0x00000517
NvV32 SetDecryptAuthTagCompareAddrLower; // 0x00000518 - 0x0000051B
NvV32 Reserved08[0x5];
NvV32 SetEncryptAuthTagAddrUpper; // 0x00000530 - 0x00000533
NvV32 SetEncryptAuthTagAddrLower; // 0x00000534 - 0x00000537
NvV32 SetEncryptIvAddrUpper; // 0x00000538 - 0x0000053B
NvV32 SetEncryptIvAddrLower; // 0x0000053C - 0x0000053F
NvV32 Reserved09[0x6F];
NvV32 SetMemoryScrubParameters; // 0x000006FC - 0x000006FF
NvV32 SetRemapConstA; // 0x00000700 - 0x00000703
NvV32 SetRemapConstB; // 0x00000704 - 0x00000707
NvV32 SetRemapComponents; // 0x00000708 - 0x0000070B
NvV32 SetDstBlockSize; // 0x0000070C - 0x0000070F
NvV32 SetDstWidth; // 0x00000710 - 0x00000713
NvV32 SetDstHeight; // 0x00000714 - 0x00000717
NvV32 SetDstDepth; // 0x00000718 - 0x0000071B
NvV32 SetDstLayer; // 0x0000071C - 0x0000071F
NvV32 SetDstOrigin; // 0x00000720 - 0x00000723
NvV32 Reserved10[0x1];
NvV32 SetSrcBlockSize; // 0x00000728 - 0x0000072B
NvV32 SetSrcWidth; // 0x0000072C - 0x0000072F
NvV32 SetSrcHeight; // 0x00000730 - 0x00000733
NvV32 SetSrcDepth; // 0x00000734 - 0x00000737
NvV32 SetSrcLayer; // 0x00000738 - 0x0000073B
NvV32 SetSrcOrigin; // 0x0000073C - 0x0000073F
NvV32 Reserved11[0x1];
NvV32 SrcOriginX; // 0x00000744 - 0x00000747
NvV32 SrcOriginY; // 0x00000748 - 0x0000074B
NvV32 DstOriginX; // 0x0000074C - 0x0000074F
NvV32 DstOriginY; // 0x00000750 - 0x00000753
NvV32 Reserved12[0x270];
NvV32 PmTriggerEnd; // 0x00001114 - 0x00001117
NvV32 Reserved13[0x3BA];
} hopper_dma_copy_aControlPio;
#define NVC8B5_NOP (0x00000100)
#define NVC8B5_NOP_PARAMETER 31:0
#define NVC8B5_PM_TRIGGER (0x00000140)
#define NVC8B5_PM_TRIGGER_V 31:0
#define NVC8B5_SET_MONITORED_FENCE_TYPE (0x0000021C)
#define NVC8B5_SET_MONITORED_FENCE_TYPE_TYPE 0:0
#define NVC8B5_SET_MONITORED_FENCE_TYPE_TYPE_MONITORED_FENCE (0x00000000)
#define NVC8B5_SET_MONITORED_FENCE_TYPE_TYPE_MONITORED_FENCE_EXT (0x00000001)
#define NVC8B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_UPPER (0x00000220)
#define NVC8B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_UPPER_UPPER 24:0
#define NVC8B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_LOWER (0x00000224)
#define NVC8B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_LOWER_LOWER 31:0
#define HOPPER_DMA_COPY_A (0x0000C8B5)
#define NVC8B5_SET_SEMAPHORE_A (0x00000240)
#define NVC8B5_SET_SEMAPHORE_A_UPPER 24:0
#define NVC8B5_SET_SEMAPHORE_B (0x00000244)
#define NVC8B5_SET_SEMAPHORE_B_LOWER 31:0
#define NVC8B5_SET_SEMAPHORE_PAYLOAD (0x00000248)
#define NVC8B5_SET_SEMAPHORE_PAYLOAD_PAYLOAD 31:0
#define NVC8B5_SET_SEMAPHORE_PAYLOAD_UPPER (0x0000024C)
#define NVC8B5_SET_SEMAPHORE_PAYLOAD_UPPER_PAYLOAD 31:0
#define NVC8B5_SET_RENDER_ENABLE_A (0x00000254)
#define NVC8B5_SET_RENDER_ENABLE_A_UPPER 24:0
#define NVC8B5_SET_RENDER_ENABLE_B (0x00000258)
#define NVC8B5_SET_RENDER_ENABLE_B_LOWER 31:0
#define NVC8B5_SET_RENDER_ENABLE_C (0x0000025C)
#define NVC8B5_SET_RENDER_ENABLE_C_MODE 2:0
#define NVC8B5_SET_RENDER_ENABLE_C_MODE_FALSE (0x00000000)
#define NVC8B5_SET_RENDER_ENABLE_C_MODE_TRUE (0x00000001)
#define NVC8B5_SET_RENDER_ENABLE_C_MODE_CONDITIONAL (0x00000002)
#define NVC8B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL (0x00000003)
#define NVC8B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL (0x00000004)
#define NVC8B5_SET_SRC_PHYS_MODE (0x00000260)
#define NVC8B5_SET_SRC_PHYS_MODE_TARGET 1:0
#define NVC8B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000)
#define NVC8B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001)
#define NVC8B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002)
#define NVC8B5_SET_SRC_PHYS_MODE_TARGET_PEERMEM (0x00000003)
#define NVC8B5_SET_SRC_PHYS_MODE_BASIC_KIND 5:2
#define NVC8B5_SET_SRC_PHYS_MODE_PEER_ID 8:6
#define NVC8B5_SET_SRC_PHYS_MODE_FLA 9:9
#define NVC8B5_SET_DST_PHYS_MODE (0x00000264)
#define NVC8B5_SET_DST_PHYS_MODE_TARGET 1:0
#define NVC8B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000)
#define NVC8B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001)
#define NVC8B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002)
#define NVC8B5_SET_DST_PHYS_MODE_TARGET_PEERMEM (0x00000003)
#define NVC8B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2
#define NVC8B5_SET_DST_PHYS_MODE_PEER_ID 8:6
#define NVC8B5_SET_DST_PHYS_MODE_FLA 9:9
#define NVC8B5_LAUNCH_DMA (0x00000300)
#define NVC8B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0
#define NVC8B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000)
@@ -167,80 +53,41 @@ typedef volatile struct _clc8b5_tag0 {
#define NVC8B5_LAUNCH_DMA_FLUSH_TYPE_SYS (0x00000000)
#define NVC8B5_LAUNCH_DMA_FLUSH_TYPE_GL (0x00000001)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_SEMAPHORE_NO_TIMESTAMP (0x00000001)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_SEMAPHORE_WITH_TIMESTAMP (0x00000002)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_CONDITIONAL_INTR_SEMAPHORE (0x00000003)
#define NVC8B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5
#define NVC8B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000)
#define NVC8B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001)
#define NVC8B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002)
#define NVC8B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7
#define NVC8B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000)
#define NVC8B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001)
#define NVC8B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8
#define NVC8B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000)
#define NVC8B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001)
#define NVC8B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9
#define NVC8B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000)
#define NVC8B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001)
#define NVC8B5_LAUNCH_DMA_REMAP_ENABLE 10:10
#define NVC8B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000)
#define NVC8B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001)
#define NVC8B5_LAUNCH_DMA_FORCE_RMWDISABLE 11:11
#define NVC8B5_LAUNCH_DMA_FORCE_RMWDISABLE_FALSE (0x00000000)
#define NVC8B5_LAUNCH_DMA_FORCE_RMWDISABLE_TRUE (0x00000001)
#define NVC8B5_LAUNCH_DMA_SRC_TYPE 12:12
#define NVC8B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000)
#define NVC8B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001)
#define NVC8B5_LAUNCH_DMA_DST_TYPE 13:13
#define NVC8B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000)
#define NVC8B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDA (0x00000008)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDB (0x00000009)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMIN (0x0000000B)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMAX (0x0000000C)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDC (0x0000000D)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDD (0x0000000E)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDE (0x0000000F)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001)
#define NVC8B5_LAUNCH_DMA_COPY_TYPE 21:20
#define NVC8B5_LAUNCH_DMA_COPY_TYPE_PROT2PROT (0x00000000)
#define NVC8B5_LAUNCH_DMA_COPY_TYPE_DEFAULT (0x00000000)
#define NVC8B5_LAUNCH_DMA_COPY_TYPE_SECURE (0x00000001)
#define NVC8B5_LAUNCH_DMA_COPY_TYPE_NONPROT2NONPROT (0x00000002)
#define NVC8B5_LAUNCH_DMA_COPY_TYPE_RESERVED (0x00000003)
#define NVC8B5_LAUNCH_DMA_VPRMODE 22:22
#define NVC8B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000)
#define NVC8B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001)
#define NVC8B5_LAUNCH_DMA_MEMORY_SCRUB_ENABLE 23:23
#define NVC8B5_LAUNCH_DMA_MEMORY_SCRUB_ENABLE_FALSE (0x00000000)
#define NVC8B5_LAUNCH_DMA_MEMORY_SCRUB_ENABLE_TRUE (0x00000001)
#define NVC8B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24
#define NVC8B5_LAUNCH_DMA_DISABLE_PLC 26:26
#define NVC8B5_LAUNCH_DMA_DISABLE_PLC_FALSE (0x00000000)
#define NVC8B5_LAUNCH_DMA_DISABLE_PLC_TRUE (0x00000001)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_PAYLOAD_SIZE 27:27
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_PAYLOAD_SIZE_ONE_WORD (0x00000000)
#define NVC8B5_LAUNCH_DMA_SEMAPHORE_PAYLOAD_SIZE_TWO_WORD (0x00000001)
#define NVC8B5_LAUNCH_DMA_RESERVED_ERR_CODE 31:28
#define NVC8B5_OFFSET_IN_UPPER (0x00000400)
#define NVC8B5_OFFSET_IN_UPPER_UPPER 24:0
#define NVC8B5_OFFSET_IN_LOWER (0x00000404)
@@ -249,41 +96,11 @@ typedef volatile struct _clc8b5_tag0 {
#define NVC8B5_OFFSET_OUT_UPPER_UPPER 24:0
#define NVC8B5_OFFSET_OUT_LOWER (0x0000040C)
#define NVC8B5_OFFSET_OUT_LOWER_VALUE 31:0
#define NVC8B5_PITCH_IN (0x00000410)
#define NVC8B5_PITCH_IN_VALUE 31:0
#define NVC8B5_PITCH_OUT (0x00000414)
#define NVC8B5_PITCH_OUT_VALUE 31:0
#define NVC8B5_LINE_LENGTH_IN (0x00000418)
#define NVC8B5_LINE_LENGTH_IN_VALUE 31:0
#define NVC8B5_LINE_COUNT (0x0000041C)
#define NVC8B5_LINE_COUNT_VALUE 31:0
#define NVC8B5_SET_SECURE_COPY_MODE (0x00000500)
#define NVC8B5_SET_SECURE_COPY_MODE_MODE 0:0
#define NVC8B5_SET_SECURE_COPY_MODE_MODE_ENCRYPT (0x00000000)
#define NVC8B5_SET_SECURE_COPY_MODE_MODE_DECRYPT (0x00000001)
#define NVC8B5_SET_SECURE_COPY_MODE_RESERVED_SRC_TARGET 20:19
#define NVC8B5_SET_SECURE_COPY_MODE_RESERVED_SRC_TARGET_LOCAL_FB (0x00000000)
#define NVC8B5_SET_SECURE_COPY_MODE_RESERVED_SRC_TARGET_COHERENT_SYSMEM (0x00000001)
#define NVC8B5_SET_SECURE_COPY_MODE_RESERVED_SRC_TARGET_NONCOHERENT_SYSMEM (0x00000002)
#define NVC8B5_SET_SECURE_COPY_MODE_RESERVED_SRC_TARGET_PEERMEM (0x00000003)
#define NVC8B5_SET_SECURE_COPY_MODE_RESERVED_SRC_PEER_ID 23:21
#define NVC8B5_SET_SECURE_COPY_MODE_RESERVED_SRC_FLA 24:24
#define NVC8B5_SET_SECURE_COPY_MODE_RESERVED_DST_TARGET 26:25
#define NVC8B5_SET_SECURE_COPY_MODE_RESERVED_DST_TARGET_LOCAL_FB (0x00000000)
#define NVC8B5_SET_SECURE_COPY_MODE_RESERVED_DST_TARGET_COHERENT_SYSMEM (0x00000001)
#define NVC8B5_SET_SECURE_COPY_MODE_RESERVED_DST_TARGET_NONCOHERENT_SYSMEM (0x00000002)
#define NVC8B5_SET_SECURE_COPY_MODE_RESERVED_DST_TARGET_PEERMEM (0x00000003)
#define NVC8B5_SET_SECURE_COPY_MODE_RESERVED_DST_PEER_ID 29:27
#define NVC8B5_SET_SECURE_COPY_MODE_RESERVED_DST_FLA 30:30
#define NVC8B5_SET_SECURE_COPY_MODE_RESERVED_END_OF_COPY 31:31
#define NVC8B5_SET_DECRYPT_IV0 (0x00000504)
#define NVC8B5_SET_DECRYPT_IV0_VALUE 31:0
#define NVC8B5_SET_DECRYPT_IV1 (0x00000508)
#define NVC8B5_SET_DECRYPT_IV1_VALUE 31:0
#define NVC8B5_SET_DECRYPT_IV2 (0x0000050C)
#define NVC8B5_SET_DECRYPT_IV2_VALUE 31:0
#define NVC8B5_RESERVED_SET_AESCOUNTER (0x00000510)
#define NVC8B5_RESERVED_SET_AESCOUNTER_VALUE 31:0
#define NVC8B5_SET_DECRYPT_AUTH_TAG_COMPARE_ADDR_UPPER (0x00000514)
#define NVC8B5_SET_DECRYPT_AUTH_TAG_COMPARE_ADDR_UPPER_UPPER 24:0
#define NVC8B5_SET_DECRYPT_AUTH_TAG_COMPARE_ADDR_LOWER (0x00000518)
@@ -299,132 +116,18 @@ typedef volatile struct _clc8b5_tag0 {
#define NVC8B5_SET_MEMORY_SCRUB_PARAMETERS (0x000006FC)
#define NVC8B5_SET_MEMORY_SCRUB_PARAMETERS_DISCARDABLE 0:0
#define NVC8B5_SET_MEMORY_SCRUB_PARAMETERS_DISCARDABLE_FALSE (0x00000000)
#define NVC8B5_SET_MEMORY_SCRUB_PARAMETERS_DISCARDABLE_TRUE (0x00000001)
#define NVC8B5_SET_REMAP_CONST_A (0x00000700)
#define NVC8B5_SET_REMAP_CONST_A_V 31:0
#define NVC8B5_SET_REMAP_CONST_B (0x00000704)
#define NVC8B5_SET_REMAP_CONST_B_V 31:0
#define NVC8B5_SET_REMAP_COMPONENTS (0x00000708)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_X 2:0
#define NVC8B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_Y 6:4
#define NVC8B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_Z 10:8
#define NVC8B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_W 14:12
#define NVC8B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005)
#define NVC8B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006)
#define NVC8B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16
#define NVC8B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000)
#define NVC8B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001)
#define NVC8B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002)
#define NVC8B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003)
#define NVC8B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20
#define NVC8B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000)
#define NVC8B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001)
#define NVC8B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002)
#define NVC8B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003)
#define NVC8B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24
#define NVC8B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000)
#define NVC8B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001)
#define NVC8B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002)
#define NVC8B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003)
#define NVC8B5_SET_DST_BLOCK_SIZE (0x0000070C)
#define NVC8B5_SET_DST_BLOCK_SIZE_WIDTH 3:0
#define NVC8B5_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000)
#define NVC8B5_SET_DST_BLOCK_SIZE_HEIGHT 7:4
#define NVC8B5_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000)
#define NVC8B5_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001)
#define NVC8B5_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002)
#define NVC8B5_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003)
#define NVC8B5_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004)
#define NVC8B5_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005)
#define NVC8B5_SET_DST_BLOCK_SIZE_DEPTH 11:8
#define NVC8B5_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000)
#define NVC8B5_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001)
#define NVC8B5_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002)
#define NVC8B5_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003)
#define NVC8B5_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004)
#define NVC8B5_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005)
#define NVC8B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT 15:12
#define NVC8B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001)
#define NVC8B5_SET_DST_WIDTH (0x00000710)
#define NVC8B5_SET_DST_WIDTH_V 31:0
#define NVC8B5_SET_DST_HEIGHT (0x00000714)
#define NVC8B5_SET_DST_HEIGHT_V 31:0
#define NVC8B5_SET_DST_DEPTH (0x00000718)
#define NVC8B5_SET_DST_DEPTH_V 31:0
#define NVC8B5_SET_DST_LAYER (0x0000071C)
#define NVC8B5_SET_DST_LAYER_V 31:0
#define NVC8B5_SET_DST_ORIGIN (0x00000720)
#define NVC8B5_SET_DST_ORIGIN_X 15:0
#define NVC8B5_SET_DST_ORIGIN_Y 31:16
#define NVC8B5_SET_SRC_BLOCK_SIZE (0x00000728)
#define NVC8B5_SET_SRC_BLOCK_SIZE_WIDTH 3:0
#define NVC8B5_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000)
#define NVC8B5_SET_SRC_BLOCK_SIZE_HEIGHT 7:4
#define NVC8B5_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000)
#define NVC8B5_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001)
#define NVC8B5_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002)
#define NVC8B5_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003)
#define NVC8B5_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004)
#define NVC8B5_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005)
#define NVC8B5_SET_SRC_BLOCK_SIZE_DEPTH 11:8
#define NVC8B5_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000)
#define NVC8B5_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001)
#define NVC8B5_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002)
#define NVC8B5_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003)
#define NVC8B5_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004)
#define NVC8B5_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005)
#define NVC8B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT 15:12
#define NVC8B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001)
#define NVC8B5_SET_SRC_WIDTH (0x0000072C)
#define NVC8B5_SET_SRC_WIDTH_V 31:0
#define NVC8B5_SET_SRC_HEIGHT (0x00000730)
#define NVC8B5_SET_SRC_HEIGHT_V 31:0
#define NVC8B5_SET_SRC_DEPTH (0x00000734)
#define NVC8B5_SET_SRC_DEPTH_V 31:0
#define NVC8B5_SET_SRC_LAYER (0x00000738)
#define NVC8B5_SET_SRC_LAYER_V 31:0
#define NVC8B5_SET_SRC_ORIGIN (0x0000073C)
#define NVC8B5_SET_SRC_ORIGIN_X 15:0
#define NVC8B5_SET_SRC_ORIGIN_Y 31:16
#define NVC8B5_SRC_ORIGIN_X (0x00000744)
#define NVC8B5_SRC_ORIGIN_X_VALUE 31:0
#define NVC8B5_SRC_ORIGIN_Y (0x00000748)
#define NVC8B5_SRC_ORIGIN_Y_VALUE 31:0
#define NVC8B5_DST_ORIGIN_X (0x0000074C)
#define NVC8B5_DST_ORIGIN_X_VALUE 31:0
#define NVC8B5_DST_ORIGIN_Y (0x00000750)
#define NVC8B5_DST_ORIGIN_Y_VALUE 31:0
#define NVC8B5_PM_TRIGGER_END (0x00001114)
#define NVC8B5_PM_TRIGGER_END_V 31:0
#ifdef __cplusplus
}; /* extern "C" */
#endif
#endif // _clc8b5_h
#endif // __gh100_clc8b5_h__

View File

@@ -1,84 +1,42 @@
/*******************************************************************************
Copyright (c) 2012-2015 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
#ifndef _clc96f_h_
#define _clc96f_h_
#ifdef __cplusplus
extern "C" {
#endif
#include "nvtypes.h"
/* class BLACKWELL_CHANNEL_GPFIFO */
/*
* Documentation for BLACKWELL_CHANNEL_GPFIFO can be found in dev_pbdma.ref,
* chapter "User Control Registers". It is documented as device NV_UDMA.
* The GPFIFO format itself is also documented in dev_pbdma.ref,
* NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref,
* chapter "FIFO DMA RAM", NV_FIFO_DMA_*.
* SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES
* SPDX-License-Identifier: MIT
*
* Note there is no .mfs file for this class.
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __gb100_clc96f_h__
#define __gb100_clc96f_h__
#define BLACKWELL_CHANNEL_GPFIFO_A (0x0000C96F)
#define NVC96F_TYPEDEF BLACKWELL_CHANNELChannelGPFifoA
/* dma flow control data structure */
typedef volatile struct Nvc96fControl_struct {
NvU32 Ignored00[0x23]; /* 0000-008b*/
NvU32 GPPut; /* GP FIFO put offset 008c-008f*/
NvU32 Ignored01[0x5c];
} Nvc96fControl, BlackwellAControlGPFifo;
/* fields and values */
#define NVC96F_NUMBER_OF_SUBCHANNELS (8)
#define NVC96F_SET_OBJECT (0x00000000)
#define NVC96F_SET_OBJECT_NVCLASS 15:0
#define NVC96F_SET_OBJECT_ENGINE 20:16
#define NVC96F_SET_OBJECT_ENGINE_SW 0x0000001f
#define NVC96F_NOP (0x00000008)
#define NVC96F_NOP_HANDLE 31:0
#define NVC96F_NON_STALL_INTERRUPT (0x00000020)
#define NVC96F_NON_STALL_INTERRUPT_HANDLE 31:0
#define NVC96F_FB_FLUSH (0x00000024) // Deprecated - use MEMBAR TYPE SYS_MEMBAR
#define NVC96F_FB_FLUSH_HANDLE 31:0
// NOTE - MEM_OP_A and MEM_OP_B have been replaced in gp100 with methods for
// specifying the page address for a targeted TLB invalidate and the uTLB for
// a targeted REPLAY_CANCEL for UVM.
// The previous MEM_OP_A/B functionality is in MEM_OP_C/D, with slightly
// rearranged fields.
#define NVC96F_MEM_OP_A (0x00000028)
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_CLIENT_UNIT_ID 5:0 // only relevant for REPLAY_CANCEL_TARGETED
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_INVALIDATION_SIZE 5:0 // Used to specify size of invalidate, used for invalidates which are not of the REPLAY_CANCEL_TARGETED type
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_GPC_ID 10:6 // only relevant for REPLAY_CANCEL_TARGETED
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE 7:6 // only relevant for invalidates with NVC96F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE for invalidating link TLB only, or non-link TLB only or all TLBs
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_ALL_TLBS 0
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_LINK_TLBS 1
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_NON_LINK_TLBS 2
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_RSVRVD 3
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_CANCEL_MMU_ENGINE_ID 8:0 // only relevant for REPLAY_CANCEL_VA_GLOBAL
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR 11:11
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_EN 0x00000001
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_DIS 0x00000000
@@ -86,9 +44,6 @@ typedef volatile struct Nvc96fControl_struct {
#define NVC96F_MEM_OP_B (0x0000002c)
#define NVC96F_MEM_OP_B_TLB_INVALIDATE_TARGET_ADDR_HI 31:0
#define NVC96F_MEM_OP_C (0x00000030)
#define NVC96F_MEM_OP_C_MEMBAR_TYPE 2:0
#define NVC96F_MEM_OP_C_MEMBAR_TYPE_SYS_MEMBAR 0x00000000
#define NVC96F_MEM_OP_C_MEMBAR_TYPE_MEMBAR 0x00000001
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 // Probably nonsensical for MMU_TLB_INVALIDATE_TARGETED
@@ -97,130 +52,38 @@ typedef volatile struct Nvc96fControl_struct {
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_REPLAY 4:2 // only relevant if GPC ENABLE
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE 0x00000000
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START 0x00000001
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_VA_GLOBAL 0x00000005
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE 6:5 // only relevant if GPC ENABLE
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_NONE 0x00000000
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_GLOBALLY 0x00000001
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_INTRANODE 0x00000002
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE 9:7 //only relevant for REPLAY_CANCEL_VA_GLOBAL
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_READ 0
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE 1
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_STRONG 2
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_RSVRVD 3
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_WEAK 4
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_ALL 5
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE_AND_ATOMIC 6
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ALL 7
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL 9:7 // Invalidate affects this level and all below
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_ALL 0x00000000 // Invalidate tlb caches at all levels of the page table
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_PTE_ONLY 0x00000001
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE0 0x00000002
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE1 0x00000003
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE2 0x00000004
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 0x00000005
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE4 0x00000006
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE5 0x00000007
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE 11:10 // only relevant if PDB_ONE
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_VID_MEM 0x00000000
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_COHERENT 0x00000002
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_NONCOHERENT 0x00000003
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PDB_ADDR_LO 31:12 // only relevant if PDB_ONE
#define NVC96F_MEM_OP_C_ACCESS_COUNTER_CLR_TARGETED_NOTIFY_TAG 19:0
// MEM_OP_D MUST be preceded by MEM_OPs A-C.
#define NVC96F_MEM_OP_D (0x00000034)
#define NVC96F_MEM_OP_D_TLB_INVALIDATE_PDB_ADDR_HI 26:0 // only relevant if PDB_ONE
#define NVC96F_MEM_OP_D_OPERATION 31:27
#define NVC96F_MEM_OP_D_OPERATION_MEMBAR 0x00000005
#define NVC96F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009
#define NVC96F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE_TARGETED 0x0000000a
#define NVC96F_MEM_OP_D_OPERATION_MMU_OPERATION 0x0000000b
#define NVC96F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d
#define NVC96F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e
// CLEAN_LINES is an alias for Tegra/GPU IP usage
#define NVC96F_MEM_OP_B_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e
#define NVC96F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f
#define NVC96F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010
#define NVC96F_MEM_OP_D_OPERATION_L2_SYSMEM_NCOH_INVALIDATE 0x00000011
#define NVC96F_MEM_OP_D_OPERATION_L2_SYSMEM_COH_INVALIDATE 0x00000012
#define NVC96F_MEM_OP_D_OPERATION_L2_WAIT_FOR_SYS_PENDING_READS 0x00000015
#define NVC96F_MEM_OP_D_OPERATION_ACCESS_COUNTER_CLR 0x00000016
#define NVC96F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE 1:0
#define NVC96F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MIMC 0x00000000
#define NVC96F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MOMC 0x00000001
#define NVC96F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_ALL 0x00000002
#define NVC96F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_TARGETED 0x00000003
#define NVC96F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE 2:2
#define NVC96F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MIMC 0x00000000
#define NVC96F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MOMC 0x00000001
#define NVC96F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_BANK 6:3
#define NVC96F_MEM_OP_D_MMU_OPERATION_TYPE 23:20
#define NVC96F_MEM_OP_D_MMU_OPERATION_TYPE_RESERVED 0x00000000
#define NVC96F_MEM_OP_D_MMU_OPERATION_TYPE_VIDMEM_ACCESS_BIT_DUMP 0x00000001
#define NVC96F_SEM_ADDR_LO (0x0000005c)
#define NVC96F_SEM_ADDR_LO_OFFSET 31:2
#define NVC96F_SEM_ADDR_HI (0x00000060)
#define NVC96F_SEM_ADDR_HI_OFFSET 24:0
#define NVC96F_SEM_PAYLOAD_LO (0x00000064)
#define NVC96F_SEM_PAYLOAD_LO_PAYLOAD 31:0
#define NVC96F_SEM_PAYLOAD_HI (0x00000068)
#define NVC96F_SEM_PAYLOAD_HI_PAYLOAD 31:0
#define NVC96F_SEM_EXECUTE (0x0000006c)
#define NVC96F_SEM_EXECUTE_OPERATION 2:0
#define NVC96F_SEM_EXECUTE_OPERATION_ACQUIRE 0x00000000
#define NVC96F_SEM_EXECUTE_OPERATION_RELEASE 0x00000001
#define NVC96F_SEM_EXECUTE_OPERATION_ACQ_STRICT_GEQ 0x00000002
#define NVC96F_SEM_EXECUTE_OPERATION_ACQ_CIRC_GEQ 0x00000003
#define NVC96F_SEM_EXECUTE_OPERATION_ACQ_AND 0x00000004
#define NVC96F_SEM_EXECUTE_OPERATION_ACQ_NOR 0x00000005
#define NVC96F_SEM_EXECUTE_OPERATION_REDUCTION 0x00000006
#define NVC96F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG 12:12
#define NVC96F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_DIS 0x00000000
#define NVC96F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_EN 0x00000001
#define NVC96F_SEM_EXECUTE_ACQUIRE_RECHECK 18:18
#define NVC96F_SEM_EXECUTE_ACQUIRE_RECHECK_DIS 0x00000000
#define NVC96F_SEM_EXECUTE_ACQUIRE_RECHECK_EN 0x00000001
#define NVC96F_SEM_EXECUTE_RELEASE_WFI 20:20
#define NVC96F_SEM_EXECUTE_RELEASE_WFI_DIS 0x00000000
#define NVC96F_SEM_EXECUTE_RELEASE_WFI_EN 0x00000001
#define NVC96F_SEM_EXECUTE_PAYLOAD_SIZE 24:24
#define NVC96F_SEM_EXECUTE_PAYLOAD_SIZE_32BIT 0x00000000
#define NVC96F_SEM_EXECUTE_PAYLOAD_SIZE_64BIT 0x00000001
#define NVC96F_SEM_EXECUTE_RELEASE_TIMESTAMP 25:25
#define NVC96F_SEM_EXECUTE_RELEASE_TIMESTAMP_DIS 0x00000000
#define NVC96F_SEM_EXECUTE_RELEASE_TIMESTAMP_EN 0x00000001
#define NVC96F_SEM_EXECUTE_REDUCTION 30:27
#define NVC96F_SEM_EXECUTE_REDUCTION_IMIN 0x00000000
#define NVC96F_SEM_EXECUTE_REDUCTION_IMAX 0x00000001
#define NVC96F_SEM_EXECUTE_REDUCTION_IXOR 0x00000002
#define NVC96F_SEM_EXECUTE_REDUCTION_IAND 0x00000003
#define NVC96F_SEM_EXECUTE_REDUCTION_IOR 0x00000004
#define NVC96F_SEM_EXECUTE_REDUCTION_IADD 0x00000005
#define NVC96F_SEM_EXECUTE_REDUCTION_INC 0x00000006
#define NVC96F_SEM_EXECUTE_REDUCTION_DEC 0x00000007
#define NVC96F_SEM_EXECUTE_REDUCTION_FORMAT 31:31
#define NVC96F_SEM_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000000
#define NVC96F_SEM_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000001
#define NVC96F_WFI (0x00000078)
#define NVC96F_WFI_SCOPE 0:0
#define NVC96F_WFI_SCOPE_CURRENT_SCG_TYPE 0x00000000
#define NVC96F_WFI_SCOPE_CURRENT_VEID 0x00000000
#define NVC96F_WFI_SCOPE_ALL 0x00000001
#define NVC96F_YIELD (0x00000080)
#define NVC96F_YIELD_OP 1:0
#define NVC96F_YIELD_OP_NOP 0x00000000
#define NVC96F_YIELD_OP_TSG 0x00000003
#define NVC96F_CLEAR_FAULTED (0x00000084)
// Note: RM provides the HANDLE as an opaque value; the internal detail fields
// are intentionally not exposed to the driver through these defines.
#define NVC96F_CLEAR_FAULTED_HANDLE 30:0
#define NVC96F_CLEAR_FAULTED_TYPE 31:31
#define NVC96F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000
#define NVC96F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001
/* GPFIFO entry format */
#define NVC96F_GP_ENTRY__SIZE 8
@@ -245,85 +108,4 @@ typedef volatile struct Nvc96fControl_struct {
#define NVC96F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003
#define NVC96F_GP_ENTRY1_OPCODE_SET_PB_SEGMENT_EXTENDED_BASE 0x00000004
/* dma method formats */
#define NVC96F_DMA_METHOD_ADDRESS_OLD 12:2
#define NVC96F_DMA_METHOD_ADDRESS 11:0
#define NVC96F_DMA_SUBDEVICE_MASK 15:4
#define NVC96F_DMA_METHOD_SUBCHANNEL 15:13
#define NVC96F_DMA_TERT_OP 17:16
#define NVC96F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000)
#define NVC96F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001)
#define NVC96F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002)
#define NVC96F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003)
#define NVC96F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000)
#define NVC96F_DMA_METHOD_COUNT_OLD 28:18
#define NVC96F_DMA_METHOD_COUNT 28:16
#define NVC96F_DMA_IMMD_DATA 28:16
#define NVC96F_DMA_SEC_OP 31:29
#define NVC96F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000)
#define NVC96F_DMA_SEC_OP_INC_METHOD (0x00000001)
#define NVC96F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002)
#define NVC96F_DMA_SEC_OP_NON_INC_METHOD (0x00000003)
#define NVC96F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004)
#define NVC96F_DMA_SEC_OP_ONE_INC (0x00000005)
#define NVC96F_DMA_SEC_OP_RESERVED6 (0x00000006)
#define NVC96F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007)
/* dma incrementing method format */
#define NVC96F_DMA_INCR_ADDRESS 11:0
#define NVC96F_DMA_INCR_SUBCHANNEL 15:13
#define NVC96F_DMA_INCR_COUNT 28:16
#define NVC96F_DMA_INCR_OPCODE 31:29
#define NVC96F_DMA_INCR_OPCODE_VALUE (0x00000001)
#define NVC96F_DMA_INCR_DATA 31:0
/* dma non-incrementing method format */
#define NVC96F_DMA_NONINCR_ADDRESS 11:0
#define NVC96F_DMA_NONINCR_SUBCHANNEL 15:13
#define NVC96F_DMA_NONINCR_COUNT 28:16
#define NVC96F_DMA_NONINCR_OPCODE 31:29
#define NVC96F_DMA_NONINCR_OPCODE_VALUE (0x00000003)
#define NVC96F_DMA_NONINCR_DATA 31:0
/* dma increment-once method format */
#define NVC96F_DMA_ONEINCR_ADDRESS 11:0
#define NVC96F_DMA_ONEINCR_SUBCHANNEL 15:13
#define NVC96F_DMA_ONEINCR_COUNT 28:16
#define NVC96F_DMA_ONEINCR_OPCODE 31:29
#define NVC96F_DMA_ONEINCR_OPCODE_VALUE (0x00000005)
#define NVC96F_DMA_ONEINCR_DATA 31:0
/* dma no-operation format */
#define NVC96F_DMA_NOP (0x00000000)
/* dma immediate-data format */
#define NVC96F_DMA_IMMD_ADDRESS 11:0
#define NVC96F_DMA_IMMD_SUBCHANNEL 15:13
#define NVC96F_DMA_IMMD_DATA 28:16
#define NVC96F_DMA_IMMD_OPCODE 31:29
#define NVC96F_DMA_IMMD_OPCODE_VALUE (0x00000004)
/* dma set sub-device mask format */
#define NVC96F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4
#define NVC96F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16
#define NVC96F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001)
/* dma store sub-device mask format */
#define NVC96F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4
#define NVC96F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16
#define NVC96F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002)
/* dma use sub-device mask format */
#define NVC96F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16
#define NVC96F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003)
/* dma end-segment format */
#define NVC96F_DMA_ENDSEG_OPCODE 31:29
#define NVC96F_DMA_ENDSEG_OPCODE_VALUE (0x00000007)
/* dma legacy incrementing/non-incrementing formats */
#define NVC96F_DMA_ADDRESS 12:2
#define NVC96F_DMA_SUBCH 15:13
#define NVC96F_DMA_OPCODE3 17:16
#define NVC96F_DMA_OPCODE3_NONE (0x00000000)
#define NVC96F_DMA_COUNT 28:18
#define NVC96F_DMA_OPCODE 31:29
#define NVC96F_DMA_OPCODE_METHOD (0x00000000)
#define NVC96F_DMA_OPCODE_NONINC_METHOD (0x00000002)
#define NVC96F_DMA_DATA 31:0
#ifdef __cplusplus
}; /* extern "C" */
#endif
#endif /* _clc96f_h_ */
#endif // __gb100_clc96f_h__

View File

@@ -1,460 +1,29 @@
/*******************************************************************************
Copyright (c) 1993-2004 NVIDIA Corporation
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
#include "nvtypes.h"
#ifndef _clc9b5_h_
#define _clc9b5_h_
#ifdef __cplusplus
extern "C" {
#endif
#ifndef __gb100_clc9b5_h__
#define __gb100_clc9b5_h__
#define BLACKWELL_DMA_COPY_A (0x0000C9B5)
typedef volatile struct _clc9b5_tag0 {
NvV32 Reserved00[0x40];
NvV32 Nop; // 0x00000100 - 0x00000103
NvV32 Reserved01[0xF];
NvV32 PmTrigger; // 0x00000140 - 0x00000143
NvV32 Reserved02[0x36];
NvV32 SetMonitoredFenceType; // 0x0000021C - 0x0000021F
NvV32 SetMonitoredFenceSignalAddrBaseUpper; // 0x00000220 - 0x00000223
NvV32 SetMonitoredFenceSignalAddrBaseLower; // 0x00000224 - 0x00000227
NvV32 Reserved03[0x6];
NvV32 SetSemaphoreA; // 0x00000240 - 0x00000243
NvV32 SetSemaphoreB; // 0x00000244 - 0x00000247
NvV32 SetSemaphorePayload; // 0x00000248 - 0x0000024B
NvV32 SetSemaphorePayloadUpper; // 0x0000024C - 0x0000024F
NvV32 Reserved04[0x1];
NvV32 SetRenderEnableA; // 0x00000254 - 0x00000257
NvV32 SetRenderEnableB; // 0x00000258 - 0x0000025B
NvV32 SetRenderEnableC; // 0x0000025C - 0x0000025F
NvV32 SetSrcPhysMode; // 0x00000260 - 0x00000263
NvV32 SetDstPhysMode; // 0x00000264 - 0x00000267
NvV32 Reserved05[0x26];
NvV32 LaunchDma; // 0x00000300 - 0x00000303
NvV32 Reserved06[0x3F];
NvV32 OffsetInUpper; // 0x00000400 - 0x00000403
NvV32 OffsetInLower; // 0x00000404 - 0x00000407
NvV32 OffsetOutUpper; // 0x00000408 - 0x0000040B
NvV32 OffsetOutLower; // 0x0000040C - 0x0000040F
NvV32 PitchIn; // 0x00000410 - 0x00000413
NvV32 PitchOut; // 0x00000414 - 0x00000417
NvV32 LineLengthIn; // 0x00000418 - 0x0000041B
NvV32 LineCount; // 0x0000041C - 0x0000041F
NvV32 Reserved07[0x38];
NvV32 SetSecureCopyMode; // 0x00000500 - 0x00000503
NvV32 SetDecryptIv0; // 0x00000504 - 0x00000507
NvV32 SetDecryptIv1; // 0x00000508 - 0x0000050B
NvV32 SetDecryptIv2; // 0x0000050C - 0x0000050F
NvV32 Reserved_SetAESCounter; // 0x00000510 - 0x00000513
NvV32 SetDecryptAuthTagCompareAddrUpper; // 0x00000514 - 0x00000517
NvV32 SetDecryptAuthTagCompareAddrLower; // 0x00000518 - 0x0000051B
NvV32 Reserved08[0x5];
NvV32 SetEncryptAuthTagAddrUpper; // 0x00000530 - 0x00000533
NvV32 SetEncryptAuthTagAddrLower; // 0x00000534 - 0x00000537
NvV32 SetEncryptIvAddrUpper; // 0x00000538 - 0x0000053B
NvV32 SetEncryptIvAddrLower; // 0x0000053C - 0x0000053F
NvV32 Reserved09[0x10];
NvV32 SetCompressionParameters; // 0x00000580 - 0x00000583
NvV32 SetDecompressOutLength; // 0x00000584 - 0x00000587
NvV32 SetDecompressOutLengthAddrUpper; // 0x00000588 - 0x0000058B
NvV32 SetDecompressOutLengthAddrLower; // 0x0000058C - 0x0000058F
NvV32 SetDecompressChecksum; // 0x00000590 - 0x00000593
NvV32 Reserved10[0x5A];
NvV32 SetMemoryScrubParameters; // 0x000006FC - 0x000006FF
NvV32 SetRemapConstA; // 0x00000700 - 0x00000703
NvV32 SetRemapConstB; // 0x00000704 - 0x00000707
NvV32 SetRemapComponents; // 0x00000708 - 0x0000070B
NvV32 SetDstBlockSize; // 0x0000070C - 0x0000070F
NvV32 SetDstWidth; // 0x00000710 - 0x00000713
NvV32 SetDstHeight; // 0x00000714 - 0x00000717
NvV32 SetDstDepth; // 0x00000718 - 0x0000071B
NvV32 SetDstLayer; // 0x0000071C - 0x0000071F
NvV32 SetDstOrigin; // 0x00000720 - 0x00000723
NvV32 Reserved11[0x1];
NvV32 SetSrcBlockSize; // 0x00000728 - 0x0000072B
NvV32 SetSrcWidth; // 0x0000072C - 0x0000072F
NvV32 SetSrcHeight; // 0x00000730 - 0x00000733
NvV32 SetSrcDepth; // 0x00000734 - 0x00000737
NvV32 SetSrcLayer; // 0x00000738 - 0x0000073B
NvV32 SetSrcOrigin; // 0x0000073C - 0x0000073F
NvV32 Reserved12[0x1];
NvV32 SrcOriginX; // 0x00000744 - 0x00000747
NvV32 SrcOriginY; // 0x00000748 - 0x0000074B
NvV32 DstOriginX; // 0x0000074C - 0x0000074F
NvV32 DstOriginY; // 0x00000750 - 0x00000753
NvV32 Reserved13[0x270];
NvV32 PmTriggerEnd; // 0x00001114 - 0x00001117
NvV32 Reserved14[0x3BA];
} blackwell_dma_copy_aControlPio;
#define NVC9B5_NOP (0x00000100)
#define NVC9B5_NOP_PARAMETER 31:0
#define NVC9B5_PM_TRIGGER (0x00000140)
#define NVC9B5_PM_TRIGGER_V 31:0
#define NVC9B5_SET_MONITORED_FENCE_TYPE (0x0000021C)
#define NVC9B5_SET_MONITORED_FENCE_TYPE_TYPE 0:0
#define NVC9B5_SET_MONITORED_FENCE_TYPE_TYPE_MONITORED_FENCE (0x00000000)
#define NVC9B5_SET_MONITORED_FENCE_TYPE_TYPE_MONITORED_FENCE_EXT (0x00000001)
#define NVC9B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_UPPER (0x00000220)
#define NVC9B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_UPPER_UPPER 24:0
#define NVC9B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_LOWER (0x00000224)
#define NVC9B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_LOWER_LOWER 31:0
#define NVC9B5_SET_SEMAPHORE_A (0x00000240)
#define NVC9B5_SET_SEMAPHORE_A_UPPER 24:0
#define NVC9B5_SET_SEMAPHORE_B (0x00000244)
#define NVC9B5_SET_SEMAPHORE_B_LOWER 31:0
#define NVC9B5_SET_SEMAPHORE_PAYLOAD (0x00000248)
#define NVC9B5_SET_SEMAPHORE_PAYLOAD_PAYLOAD 31:0
#define NVC9B5_SET_SEMAPHORE_PAYLOAD_UPPER (0x0000024C)
#define NVC9B5_SET_SEMAPHORE_PAYLOAD_UPPER_PAYLOAD 31:0
#define NVC9B5_SET_RENDER_ENABLE_A (0x00000254)
#define NVC9B5_SET_RENDER_ENABLE_A_UPPER 24:0
#define NVC9B5_SET_RENDER_ENABLE_B (0x00000258)
#define NVC9B5_SET_RENDER_ENABLE_B_LOWER 31:0
#define NVC9B5_SET_RENDER_ENABLE_C (0x0000025C)
#define NVC9B5_SET_RENDER_ENABLE_C_MODE 2:0
#define NVC9B5_SET_RENDER_ENABLE_C_MODE_FALSE (0x00000000)
#define NVC9B5_SET_RENDER_ENABLE_C_MODE_TRUE (0x00000001)
#define NVC9B5_SET_RENDER_ENABLE_C_MODE_CONDITIONAL (0x00000002)
#define NVC9B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL (0x00000003)
#define NVC9B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL (0x00000004)
#define NVC9B5_SET_SRC_PHYS_MODE (0x00000260)
#define NVC9B5_SET_SRC_PHYS_MODE_TARGET 1:0
#define NVC9B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000)
#define NVC9B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001)
#define NVC9B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002)
#define NVC9B5_SET_SRC_PHYS_MODE_TARGET_PEERMEM (0x00000003)
#define NVC9B5_SET_SRC_PHYS_MODE_BASIC_KIND 5:2
#define NVC9B5_SET_SRC_PHYS_MODE_PEER_ID 8:6
#define NVC9B5_SET_SRC_PHYS_MODE_FLA 9:9
#define NVC9B5_SET_DST_PHYS_MODE (0x00000264)
#define NVC9B5_SET_DST_PHYS_MODE_TARGET 1:0
#define NVC9B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000)
#define NVC9B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001)
#define NVC9B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002)
#define NVC9B5_SET_DST_PHYS_MODE_TARGET_PEERMEM (0x00000003)
#define NVC9B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2
#define NVC9B5_SET_DST_PHYS_MODE_PEER_ID 8:6
#define NVC9B5_SET_DST_PHYS_MODE_FLA 9:9
#define NVC9B5_LAUNCH_DMA (0x00000300)
#define NVC9B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0
#define NVC9B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000)
#define NVC9B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001)
#define NVC9B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002)
#define NVC9B5_LAUNCH_DMA_FLUSH_ENABLE 2:2
#define NVC9B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000)
#define NVC9B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001)
#define NVC9B5_LAUNCH_DMA_FLUSH_TYPE 25:25
#define NVC9B5_LAUNCH_DMA_FLUSH_TYPE_SYS (0x00000000)
#define NVC9B5_LAUNCH_DMA_FLUSH_TYPE_GL (0x00000001)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_SEMAPHORE_NO_TIMESTAMP (0x00000001)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_SEMAPHORE_WITH_TIMESTAMP (0x00000002)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_CONDITIONAL_INTR_SEMAPHORE (0x00000003)
#define NVC9B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5
#define NVC9B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000)
#define NVC9B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001)
#define NVC9B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002)
#define NVC9B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7
#define NVC9B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000)
#define NVC9B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001)
#define NVC9B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8
#define NVC9B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000)
#define NVC9B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001)
#define NVC9B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9
#define NVC9B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000)
#define NVC9B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001)
#define NVC9B5_LAUNCH_DMA_REMAP_ENABLE 10:10
#define NVC9B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000)
#define NVC9B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001)
#define NVC9B5_LAUNCH_DMA_COMPRESSION_ENABLE 11:11
#define NVC9B5_LAUNCH_DMA_COMPRESSION_ENABLE_FALSE (0x00000000)
#define NVC9B5_LAUNCH_DMA_COMPRESSION_ENABLE_TRUE (0x00000001)
#define NVC9B5_LAUNCH_DMA_SRC_TYPE 12:12
#define NVC9B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000)
#define NVC9B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001)
#define NVC9B5_LAUNCH_DMA_DST_TYPE 13:13
#define NVC9B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000)
#define NVC9B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDA (0x00000008)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDB (0x00000009)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMIN (0x0000000B)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMAX (0x0000000C)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDC (0x0000000D)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDD (0x0000000E)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDE (0x0000000F)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001)
#define NVC9B5_LAUNCH_DMA_COPY_TYPE 21:20
#define NVC9B5_LAUNCH_DMA_COPY_TYPE_PROT2PROT (0x00000000)
#define NVC9B5_LAUNCH_DMA_COPY_TYPE_DEFAULT (0x00000000)
#define NVC9B5_LAUNCH_DMA_COPY_TYPE_SECURE (0x00000001)
#define NVC9B5_LAUNCH_DMA_COPY_TYPE_NONPROT2NONPROT (0x00000002)
#define NVC9B5_LAUNCH_DMA_COPY_TYPE_RESERVED (0x00000003)
#define NVC9B5_LAUNCH_DMA_VPRMODE 22:22
#define NVC9B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000)
#define NVC9B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001)
#define NVC9B5_LAUNCH_DMA_MEMORY_SCRUB_ENABLE 23:23
#define NVC9B5_LAUNCH_DMA_MEMORY_SCRUB_ENABLE_FALSE (0x00000000)
#define NVC9B5_LAUNCH_DMA_MEMORY_SCRUB_ENABLE_TRUE (0x00000001)
#define NVC9B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24
#define NVC9B5_LAUNCH_DMA_DISABLE_PLC 26:26
#define NVC9B5_LAUNCH_DMA_DISABLE_PLC_FALSE (0x00000000)
#define NVC9B5_LAUNCH_DMA_DISABLE_PLC_TRUE (0x00000001)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_PAYLOAD_SIZE 27:27
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_PAYLOAD_SIZE_ONE_WORD (0x00000000)
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_PAYLOAD_SIZE_TWO_WORD (0x00000001)
#define NVC9B5_LAUNCH_DMA_RESERVED_ERR_CODE 31:28
#define NVC9B5_OFFSET_IN_UPPER (0x00000400)
#define NVC9B5_OFFSET_IN_UPPER_UPPER 24:0
#define NVC9B5_OFFSET_IN_LOWER (0x00000404)
#define NVC9B5_OFFSET_IN_LOWER_VALUE 31:0
#define NVC9B5_OFFSET_OUT_UPPER (0x00000408)
#define NVC9B5_OFFSET_OUT_UPPER_UPPER 24:0
#define NVC9B5_OFFSET_OUT_LOWER (0x0000040C)
#define NVC9B5_OFFSET_OUT_LOWER_VALUE 31:0
#define NVC9B5_PITCH_IN (0x00000410)
#define NVC9B5_PITCH_IN_VALUE 31:0
#define NVC9B5_PITCH_OUT (0x00000414)
#define NVC9B5_PITCH_OUT_VALUE 31:0
#define NVC9B5_LINE_LENGTH_IN (0x00000418)
#define NVC9B5_LINE_LENGTH_IN_VALUE 31:0
#define NVC9B5_LINE_COUNT (0x0000041C)
#define NVC9B5_LINE_COUNT_VALUE 31:0
#define NVC9B5_SET_SECURE_COPY_MODE (0x00000500)
#define NVC9B5_SET_SECURE_COPY_MODE_MODE 0:0
#define NVC9B5_SET_SECURE_COPY_MODE_MODE_ENCRYPT (0x00000000)
#define NVC9B5_SET_SECURE_COPY_MODE_MODE_DECRYPT (0x00000001)
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_SRC_TARGET 20:19
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_SRC_TARGET_LOCAL_FB (0x00000000)
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_SRC_TARGET_COHERENT_SYSMEM (0x00000001)
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_SRC_TARGET_NONCOHERENT_SYSMEM (0x00000002)
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_SRC_TARGET_PEERMEM (0x00000003)
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_SRC_PEER_ID 23:21
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_SRC_FLA 24:24
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_DST_TARGET 26:25
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_DST_TARGET_LOCAL_FB (0x00000000)
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_DST_TARGET_COHERENT_SYSMEM (0x00000001)
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_DST_TARGET_NONCOHERENT_SYSMEM (0x00000002)
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_DST_TARGET_PEERMEM (0x00000003)
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_DST_PEER_ID 29:27
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_DST_FLA 30:30
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_END_OF_COPY 31:31
#define NVC9B5_SET_DECRYPT_IV0 (0x00000504)
#define NVC9B5_SET_DECRYPT_IV0_VALUE 31:0
#define NVC9B5_SET_DECRYPT_IV1 (0x00000508)
#define NVC9B5_SET_DECRYPT_IV1_VALUE 31:0
#define NVC9B5_SET_DECRYPT_IV2 (0x0000050C)
#define NVC9B5_SET_DECRYPT_IV2_VALUE 31:0
#define NVC9B5_RESERVED_SET_AESCOUNTER (0x00000510)
#define NVC9B5_RESERVED_SET_AESCOUNTER_VALUE 31:0
#define NVC9B5_SET_DECRYPT_AUTH_TAG_COMPARE_ADDR_UPPER (0x00000514)
#define NVC9B5_SET_DECRYPT_AUTH_TAG_COMPARE_ADDR_UPPER_UPPER 24:0
#define NVC9B5_SET_DECRYPT_AUTH_TAG_COMPARE_ADDR_LOWER (0x00000518)
#define NVC9B5_SET_DECRYPT_AUTH_TAG_COMPARE_ADDR_LOWER_LOWER 31:0
#define NVC9B5_SET_ENCRYPT_AUTH_TAG_ADDR_UPPER (0x00000530)
#define NVC9B5_SET_ENCRYPT_AUTH_TAG_ADDR_UPPER_UPPER 24:0
#define NVC9B5_SET_ENCRYPT_AUTH_TAG_ADDR_LOWER (0x00000534)
#define NVC9B5_SET_ENCRYPT_AUTH_TAG_ADDR_LOWER_LOWER 31:0
#define NVC9B5_SET_ENCRYPT_IV_ADDR_UPPER (0x00000538)
#define NVC9B5_SET_ENCRYPT_IV_ADDR_UPPER_UPPER 24:0
#define NVC9B5_SET_ENCRYPT_IV_ADDR_LOWER (0x0000053C)
#define NVC9B5_SET_ENCRYPT_IV_ADDR_LOWER_LOWER 31:0
#define NVC9B5_SET_COMPRESSION_PARAMETERS (0x00000580)
#define NVC9B5_SET_COMPRESSION_PARAMETERS_OPERATION 0:0
#define NVC9B5_SET_COMPRESSION_PARAMETERS_OPERATION_DECOMPRESS (0x00000000)
#define NVC9B5_SET_COMPRESSION_PARAMETERS_OPERATION_COMPRESS (0x00000001)
#define NVC9B5_SET_COMPRESSION_PARAMETERS_ALGO 3:1
#define NVC9B5_SET_COMPRESSION_PARAMETERS_ALGO_SNAPPY (0x00000000)
#define NVC9B5_SET_COMPRESSION_PARAMETERS_ALGO_LZ4_DATA_ONLY (0x00000001)
#define NVC9B5_SET_COMPRESSION_PARAMETERS_ALGO_LZ4_BLOCK (0x00000002)
#define NVC9B5_SET_COMPRESSION_PARAMETERS_ALGO_LZ4_BLOCK_CHECKSUM (0x00000003)
#define NVC9B5_SET_COMPRESSION_PARAMETERS_ALGO_DEFLATE (0x00000004)
#define NVC9B5_SET_COMPRESSION_PARAMETERS_ALGO_SNAPPY_WITH_LONG_FETCH (0x00000005)
#define NVC9B5_SET_COMPRESSION_PARAMETERS_CHECK_SUM 29:28
#define NVC9B5_SET_COMPRESSION_PARAMETERS_CHECK_SUM_NONE (0x00000000)
#define NVC9B5_SET_COMPRESSION_PARAMETERS_CHECK_SUM_ADLER32 (0x00000001)
#define NVC9B5_SET_COMPRESSION_PARAMETERS_CHECK_SUM_CRC32 (0x00000002)
#define NVC9B5_SET_COMPRESSION_PARAMETERS_CHECK_SUM_SNAPPY_CRC (0x00000003)
#define NVC9B5_SET_DECOMPRESS_OUT_LENGTH (0x00000584)
#define NVC9B5_SET_DECOMPRESS_OUT_LENGTH_V 31:0
#define NVC9B5_SET_DECOMPRESS_OUT_LENGTH_ADDR_UPPER (0x00000588)
#define NVC9B5_SET_DECOMPRESS_OUT_LENGTH_ADDR_UPPER_UPPER 24:0
#define NVC9B5_SET_DECOMPRESS_OUT_LENGTH_ADDR_LOWER (0x0000058C)
#define NVC9B5_SET_DECOMPRESS_OUT_LENGTH_ADDR_LOWER_LOWER 31:0
#define NVC9B5_SET_DECOMPRESS_CHECKSUM (0x00000590)
#define NVC9B5_SET_DECOMPRESS_CHECKSUM_V 31:0
#define NVC9B5_SET_MEMORY_SCRUB_PARAMETERS (0x000006FC)
#define NVC9B5_SET_MEMORY_SCRUB_PARAMETERS_DISCARDABLE 0:0
#define NVC9B5_SET_MEMORY_SCRUB_PARAMETERS_DISCARDABLE_FALSE (0x00000000)
#define NVC9B5_SET_MEMORY_SCRUB_PARAMETERS_DISCARDABLE_TRUE (0x00000001)
#define NVC9B5_SET_REMAP_CONST_A (0x00000700)
#define NVC9B5_SET_REMAP_CONST_A_V 31:0
#define NVC9B5_SET_REMAP_CONST_B (0x00000704)
#define NVC9B5_SET_REMAP_CONST_B_V 31:0
#define NVC9B5_SET_REMAP_COMPONENTS (0x00000708)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_X 2:0
#define NVC9B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Y 6:4
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Z 10:8
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_W 14:12
#define NVC9B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005)
#define NVC9B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006)
#define NVC9B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16
#define NVC9B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000)
#define NVC9B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001)
#define NVC9B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002)
#define NVC9B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003)
#define NVC9B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20
#define NVC9B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000)
#define NVC9B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001)
#define NVC9B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002)
#define NVC9B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003)
#define NVC9B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24
#define NVC9B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000)
#define NVC9B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001)
#define NVC9B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002)
#define NVC9B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003)
#define NVC9B5_SET_DST_BLOCK_SIZE (0x0000070C)
#define NVC9B5_SET_DST_BLOCK_SIZE_WIDTH 3:0
#define NVC9B5_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000)
#define NVC9B5_SET_DST_BLOCK_SIZE_HEIGHT 7:4
#define NVC9B5_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000)
#define NVC9B5_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001)
#define NVC9B5_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002)
#define NVC9B5_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003)
#define NVC9B5_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004)
#define NVC9B5_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005)
#define NVC9B5_SET_DST_BLOCK_SIZE_DEPTH 11:8
#define NVC9B5_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000)
#define NVC9B5_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001)
#define NVC9B5_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002)
#define NVC9B5_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003)
#define NVC9B5_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004)
#define NVC9B5_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005)
#define NVC9B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT 15:12
#define NVC9B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001)
#define NVC9B5_SET_DST_WIDTH (0x00000710)
#define NVC9B5_SET_DST_WIDTH_V 31:0
#define NVC9B5_SET_DST_HEIGHT (0x00000714)
#define NVC9B5_SET_DST_HEIGHT_V 31:0
#define NVC9B5_SET_DST_DEPTH (0x00000718)
#define NVC9B5_SET_DST_DEPTH_V 31:0
#define NVC9B5_SET_DST_LAYER (0x0000071C)
#define NVC9B5_SET_DST_LAYER_V 31:0
#define NVC9B5_SET_DST_ORIGIN (0x00000720)
#define NVC9B5_SET_DST_ORIGIN_X 15:0
#define NVC9B5_SET_DST_ORIGIN_Y 31:16
#define NVC9B5_SET_SRC_BLOCK_SIZE (0x00000728)
#define NVC9B5_SET_SRC_BLOCK_SIZE_WIDTH 3:0
#define NVC9B5_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000)
#define NVC9B5_SET_SRC_BLOCK_SIZE_HEIGHT 7:4
#define NVC9B5_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000)
#define NVC9B5_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001)
#define NVC9B5_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002)
#define NVC9B5_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003)
#define NVC9B5_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004)
#define NVC9B5_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005)
#define NVC9B5_SET_SRC_BLOCK_SIZE_DEPTH 11:8
#define NVC9B5_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000)
#define NVC9B5_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001)
#define NVC9B5_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002)
#define NVC9B5_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003)
#define NVC9B5_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004)
#define NVC9B5_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005)
#define NVC9B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT 15:12
#define NVC9B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001)
#define NVC9B5_SET_SRC_WIDTH (0x0000072C)
#define NVC9B5_SET_SRC_WIDTH_V 31:0
#define NVC9B5_SET_SRC_HEIGHT (0x00000730)
#define NVC9B5_SET_SRC_HEIGHT_V 31:0
#define NVC9B5_SET_SRC_DEPTH (0x00000734)
#define NVC9B5_SET_SRC_DEPTH_V 31:0
#define NVC9B5_SET_SRC_LAYER (0x00000738)
#define NVC9B5_SET_SRC_LAYER_V 31:0
#define NVC9B5_SET_SRC_ORIGIN (0x0000073C)
#define NVC9B5_SET_SRC_ORIGIN_X 15:0
#define NVC9B5_SET_SRC_ORIGIN_Y 31:16
#define NVC9B5_SRC_ORIGIN_X (0x00000744)
#define NVC9B5_SRC_ORIGIN_X_VALUE 31:0
#define NVC9B5_SRC_ORIGIN_Y (0x00000748)
#define NVC9B5_SRC_ORIGIN_Y_VALUE 31:0
#define NVC9B5_DST_ORIGIN_X (0x0000074C)
#define NVC9B5_DST_ORIGIN_X_VALUE 31:0
#define NVC9B5_DST_ORIGIN_Y (0x00000750)
#define NVC9B5_DST_ORIGIN_Y_VALUE 31:0
#define NVC9B5_PM_TRIGGER_END (0x00001114)
#define NVC9B5_PM_TRIGGER_END_V 31:0
#ifdef __cplusplus
}; /* extern "C" */
#endif
#endif // _clc9b5_h
#endif // __gb100_clc9b5_h__

View File

@@ -0,0 +1,74 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2003-2023 NVIDIA CORPORATION & AFFILIATES
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __gb202_clca6f_h__
#define __gb202_clca6f_h__
typedef volatile struct Nvca6fControl_struct {
NvU32 Ignored00[0x23]; /* 0000-008b*/
NvU32 GPPut; /* GP FIFO put offset 008c-008f*/
NvU32 Ignored01[0x5c];
} Nvca6fControl, BlackwellBControlGPFifo;
#define BLACKWELL_CHANNEL_GPFIFO_B (0x0000CA6F)
#define NVCA6F_SET_OBJECT (0x00000000)
#define NVCA6F_SEM_ADDR_LO (0x0000005c)
#define NVCA6F_SEM_ADDR_LO_OFFSET 31:2
#define NVCA6F_SEM_ADDR_HI (0x00000060)
#define NVCA6F_SEM_ADDR_HI_OFFSET 24:0
#define NVCA6F_SEM_PAYLOAD_LO (0x00000064)
#define NVCA6F_SEM_PAYLOAD_HI (0x00000068)
#define NVCA6F_SEM_EXECUTE (0x0000006c)
#define NVCA6F_SEM_EXECUTE_OPERATION 2:0
#define NVCA6F_SEM_EXECUTE_OPERATION_ACQUIRE 0x00000000
#define NVCA6F_SEM_EXECUTE_OPERATION_RELEASE 0x00000001
#define NVCA6F_SEM_EXECUTE_RELEASE_WFI 20:20
#define NVCA6F_SEM_EXECUTE_RELEASE_WFI_DIS 0x00000000
#define NVCA6F_SEM_EXECUTE_PAYLOAD_SIZE 24:24
#define NVCA6F_SEM_EXECUTE_PAYLOAD_SIZE_32BIT 0x00000000
/* GPFIFO entry format */
#define NVCA6F_GP_ENTRY__SIZE 8
#define NVCA6F_GP_ENTRY0_FETCH 0:0
#define NVCA6F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000
#define NVCA6F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001
#define NVCA6F_GP_ENTRY0_GET 31:2
#define NVCA6F_GP_ENTRY0_OPERAND 31:0
#define NVCA6F_GP_ENTRY0_PB_EXTENDED_BASE_OPERAND 24:8
#define NVCA6F_GP_ENTRY1_GET_HI 7:0
#define NVCA6F_GP_ENTRY1_LEVEL 9:9
#define NVCA6F_GP_ENTRY1_LEVEL_MAIN 0x00000000
#define NVCA6F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001
#define NVCA6F_GP_ENTRY1_LENGTH 30:10
#define NVCA6F_GP_ENTRY1_SYNC 31:31
#define NVCA6F_GP_ENTRY1_SYNC_PROCEED 0x00000000
#define NVCA6F_GP_ENTRY1_SYNC_WAIT 0x00000001
#define NVCA6F_GP_ENTRY1_OPCODE 7:0
#define NVCA6F_GP_ENTRY1_OPCODE_NOP 0x00000000
#define NVCA6F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001
#define NVCA6F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002
#define NVCA6F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003
#define NVCA6F_GP_ENTRY1_OPCODE_SET_PB_SEGMENT_EXTENDED_BASE 0x00000004
#endif // __gb202_clca6f_h__

View File

@@ -0,0 +1,44 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _clcab5_h_
#define _clcab5_h_
#define BLACKWELL_DMA_COPY_B (0x0000CAB5)
#define NVCAB5_LAUNCH_DMA (0x00000300)
#define NVCAB5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0
#define NVCAB5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000)
#define NVCAB5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001)
#define NVCAB5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002)
#define NVCAB5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PREFETCH (0x00000003)
#define NVCAB5_REQ_ATTR (0x00000754)
#define NVCAB5_REQ_ATTR_PREFETCH_L2_CLASS 1:0
#define NVCAB5_REQ_ATTR_PREFETCH_L2_CLASS_EVICT_FIRST (0x00000000)
#define NVCAB5_REQ_ATTR_PREFETCH_L2_CLASS_EVICT_NORMAL (0x00000001)
#define NVCAB5_REQ_ATTR_PREFETCH_L2_CLASS_EVICT_LAST (0x00000002)
#define NVCAB5_REQ_ATTR_PREFETCH_L2_CLASS_EVICT_DEMOTE (0x00000003)
#endif /* _clcab5_h_ */

View File

@@ -1,25 +1,25 @@
/*******************************************************************************
Copyright (c) 2021-2022 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvtypes.h"
@@ -32,6 +32,28 @@ extern "C" {
#define HOPPER_SEC2_WORK_LAUNCH_A (0x0000CBA2)
typedef volatile struct _clcba2_tag0 {
NvV32 Reserved00[0x100];
NvV32 DecryptCopySrcAddrHi; // 0x00000400 - 0x00000403
NvV32 DecryptCopySrcAddrLo; // 0x00000404 - 0x00000407
NvV32 DecryptCopyDstAddrHi; // 0x00000408 - 0x0000040B
NvV32 DecryptCopyDstAddrLo; // 0x0000040c - 0x0000040F
NvU32 DecryptCopySize; // 0x00000410 - 0x00000413
NvU32 DecryptCopyAuthTagAddrHi; // 0x00000414 - 0x00000417
NvU32 DecryptCopyAuthTagAddrLo; // 0x00000418 - 0x0000041B
NvV32 DigestAddrHi; // 0x0000041C - 0x0000041F
NvV32 DigestAddrLo; // 0x00000420 - 0x00000423
NvV32 Reserved01[0x7];
NvV32 SemaphoreA; // 0x00000440 - 0x00000443
NvV32 SemaphoreB; // 0x00000444 - 0x00000447
NvV32 SemaphoreSetPayloadLower; // 0x00000448 - 0x0000044B
NvV32 SemaphoreSetPayloadUppper; // 0x0000044C - 0x0000044F
NvV32 SemaphoreD; // 0x00000450 - 0x00000453
NvU32 Reserved02[0x7];
NvV32 Execute; // 0x00000470 - 0x00000473
NvV32 Reserved03[0x23];
} NVCBA2_HOPPER_SEC2_WORK_LAUNCH_AControlPio;
#define NVCBA2_DECRYPT_COPY_SRC_ADDR_HI (0x00000400)
#define NVCBA2_DECRYPT_COPY_SRC_ADDR_HI_DATA 24:0
#define NVCBA2_DECRYPT_COPY_SRC_ADDR_LO (0x00000404)
@@ -90,6 +112,46 @@ extern "C" {
#define NVCBA2_EXECUTE_TIMESTAMP 5:5
#define NVCBA2_EXECUTE_TIMESTAMP_DISABLE (0x00000000)
#define NVCBA2_EXECUTE_TIMESTAMP_ENABLE (0x00000001)
#define NVCBA2_EXECUTE_PHYSICAL_SCRUBBER 6:6
#define NVCBA2_EXECUTE_PHYSICAL_SCRUBBER_DISABLE (0x00000000)
#define NVCBA2_EXECUTE_PHYSICAL_SCRUBBER_ENABLE (0x00000001)
// Class definitions
#define NVCBA2_DECRYPT_COPY_SIZE_MAX_BYTES (2*1024*1024)
#define NVCBA2_DECRYPT_SCRUB_SIZE_MAX_BYTES (1024*1024*1024)
// Errors
#define NVCBA2_ERROR_NONE (0x00000000)
#define NVCBA2_ERROR_DECRYPT_COPY_SRC_ADDR_MISALIGNED_POINTER (0x00000001)
#define NVCBA2_ERROR_DECRYPT_COPY_DEST_ADDR_MISALIGNED_POINTER (0x00000002)
#define NVCBA2_ERROR_DECRYPT_COPY_AUTH_TAG_ADDR_MISALIGNED_POINTER (0x00000003)
#define NVCBA2_ERROR_DECRYPT_COPY_DMA_NACK (0x00000004)
#define NVCBA2_ERROR_DECRYPT_COPY_AUTH_TAG_MISMATCH (0x00000005)
#define NVCBA2_ERROR_METHOD_STREAM_AUTH_TAG_ADDR_MISALIGNED_POINTER (0x00000006)
#define NVCBA2_ERROR_METHOD_STREAM_AUTH_TAG_ADDR_DMA_NACK (0x00000007)
#define NVCBA2_ERROR_METHOD_STREAM_AUTH_TAG_CHECK_FAILURE (0x00000008)
#define NVCBA2_ERROR_MISALIGNED_SIZE (0x00000009)
#define NVCBA2_ERROR_MISSING_METHODS (0x0000000A)
#define NVCBA2_ERROR_SEMAPHORE_RELEASE_DMA_NACK (0x0000000B)
#define NVCBA2_ERROR_DECRYPT_SIZE_MAX_EXCEEDED (0x0000000C)
#define NVCBA2_ERROR_OS_APPLICATION (0x0000000D)
#define NVCBA2_ERROR_INVALID_CTXSW_REQUEST (0x0000000E)
#define NVCBA2_ERROR_BUFFER_OVERFLOW (0x0000000F)
#define NVCBA2_ERROR_IV_OVERFLOW (0x00000010)
#define NVCBA2_ERROR_INTERNAL_SETUP_FAILURE (0x00000011)
#define NVCBA2_ERROR_DECRYPT_COPY_INTERNAL_DMA_FAILURE (0x00000012)
#define NVCBA2_ERROR_METHOD_STREAM_AUTH_TAG_ADDR_INTERNAL_DMA_FAILURE (0x00000013)
#define NVCBA2_ERROR_METHOD_STREAM_AUTH_TAG_HMAC_CALC_FAILURE (0x00000014)
#define NVCBA2_ERROR_NONCE_OVERFLOW (0x00000015)
#define NVCBA2_ERROR_AES_GCM_DECRYPTION_FAILURE (0x00000016)
#define NVCBA2_ERROR_SEMAPHORE_RELEASE_INTERNAL_DMA_FAILURE (0x00000017)
#define NVCBA2_ERROR_KEY_DERIVATION_FAILURE (0x00000018)
#define NVCBA2_ERROR_SCRUBBER_FAILURE (0x00000019)
#define NVCBA2_ERROR_SCRUBBER_INVALD_ADDRESS (0x0000001a)
#define NVCBA2_ERROR_SCRUBBER_INSUFFICIENT_PERMISSIONS (0x0000001b)
#define NVCBA2_ERROR_SCRUBBER_MUTEX_ACQUIRE_FAILURE (0x0000001c)
#define NVCBA2_ERROR_SCRUB_SIZE_MAX_EXCEEDED (0x0000001d)
#define NVCBA2_ERROR_SIZE_ZERO (0x0000001e)
#ifdef __cplusplus
}; /* extern "C" */

View File

@@ -35,6 +35,7 @@
#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GH100 (0x00000180)
#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_AD100 (0x00000190)
#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GB100 (0x000001A0)
#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GB200 (0x000001B0)
/* valid ARCHITECTURE_GP10x implementation values */
#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GP100 (0x00000000)
@@ -42,4 +43,7 @@
#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA100 (0x00000000)
#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA000 (0x00000001)
#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GB10B (0x0000000B)
#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GB20B (0x0000000B)
#endif /* _ctrl2080mc_h_ */

View File

@@ -1,560 +0,0 @@
/*******************************************************************************
Copyright (c) 2003-2016 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
#ifndef __gb100_dev_mmu_h__
#define __gb100_dev_mmu_h__
/* This file is autogenerated. Do not edit */
#define NV_MMU_PDE /* ----G */
#define NV_MMU_PDE_APERTURE_BIG (0*32+1):(0*32+0) /* RWXVF */
#define NV_MMU_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */
#define NV_MMU_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */
#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
#define NV_MMU_PDE_SIZE (0*32+3):(0*32+2) /* RWXVF */
#define NV_MMU_PDE_SIZE_FULL 0x00000000 /* RW--V */
#define NV_MMU_PDE_SIZE_HALF 0x00000001 /* RW--V */
#define NV_MMU_PDE_SIZE_QUARTER 0x00000002 /* RW--V */
#define NV_MMU_PDE_SIZE_EIGHTH 0x00000003 /* RW--V */
#define NV_MMU_PDE_ADDRESS_BIG_SYS (0*32+31):(0*32+4) /* RWXVF */
#define NV_MMU_PDE_ADDRESS_BIG_VID (0*32+31-3):(0*32+4) /* RWXVF */
#define NV_MMU_PDE_ADDRESS_BIG_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */
#define NV_MMU_PDE_ADDRESS_BIG_VID_PEER_0 0x00000000 /* RW--V */
#define NV_MMU_PDE_APERTURE_SMALL (1*32+1):(1*32+0) /* RWXVF */
#define NV_MMU_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */
#define NV_MMU_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */
#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
#define NV_MMU_PDE_VOL_SMALL (1*32+2):(1*32+2) /* RWXVF */
#define NV_MMU_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */
#define NV_MMU_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */
#define NV_MMU_PDE_VOL_BIG (1*32+3):(1*32+3) /* RWXVF */
#define NV_MMU_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */
#define NV_MMU_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */
#define NV_MMU_PDE_ADDRESS_SMALL_SYS (1*32+31):(1*32+4) /* RWXVF */
#define NV_MMU_PDE_ADDRESS_SMALL_VID (1*32+31-3):(1*32+4) /* RWXVF */
#define NV_MMU_PDE_ADDRESS_SMALL_VID_PEER (1*32+31):(1*32+32-3) /* RWXVF */
#define NV_MMU_PDE_ADDRESS_SMALL_VID_PEER_0 0x00000000 /* RW--V */
#define NV_MMU_PDE_ADDRESS_SHIFT 0x0000000c /* */
#define NV_MMU_PDE__SIZE 8
#define NV_MMU_PTE /* ----G */
#define NV_MMU_PTE_VALID (0*32+0):(0*32+0) /* RWXVF */
#define NV_MMU_PTE_VALID_TRUE 0x1 /* RW--V */
#define NV_MMU_PTE_VALID_FALSE 0x0 /* RW--V */
#define NV_MMU_PTE_PRIVILEGE (0*32+1):(0*32+1) /* RWXVF */
#define NV_MMU_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */
#define NV_MMU_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */
#define NV_MMU_PTE_READ_ONLY (0*32+2):(0*32+2) /* RWXVF */
#define NV_MMU_PTE_READ_ONLY_TRUE 0x1 /* RW--V */
#define NV_MMU_PTE_READ_ONLY_FALSE 0x0 /* RW--V */
#define NV_MMU_PTE_ENCRYPTED (0*32+3):(0*32+3) /* RWXVF */
#define NV_MMU_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */
#define NV_MMU_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */
#define NV_MMU_PTE_ADDRESS_SYS (0*32+31):(0*32+4) /* RWXVF */
#define NV_MMU_PTE_ADDRESS_VID (0*32+31-3):(0*32+4) /* RWXVF */
#define NV_MMU_PTE_ADDRESS_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */
#define NV_MMU_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */
#define NV_MMU_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */
#define NV_MMU_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */
#define NV_MMU_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */
#define NV_MMU_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */
#define NV_MMU_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */
#define NV_MMU_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */
#define NV_MMU_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */
#define NV_MMU_PTE_VOL (1*32+0):(1*32+0) /* RWXVF */
#define NV_MMU_PTE_VOL_TRUE 0x00000001 /* RW--V */
#define NV_MMU_PTE_VOL_FALSE 0x00000000 /* RW--V */
#define NV_MMU_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */
#define NV_MMU_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */
#define NV_MMU_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */
#define NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
#define NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
#define NV_MMU_PTE_LOCK (1*32+3):(1*32+3) /* RWXVF */
#define NV_MMU_PTE_LOCK_TRUE 0x1 /* RW--V */
#define NV_MMU_PTE_LOCK_FALSE 0x0 /* RW--V */
#define NV_MMU_PTE_ATOMIC_DISABLE (1*32+3):(1*32+3) /* RWXVF */
#define NV_MMU_PTE_ATOMIC_DISABLE_TRUE 0x1 /* RW--V */
#define NV_MMU_PTE_ATOMIC_DISABLE_FALSE 0x0 /* RW--V */
#define NV_MMU_PTE_COMPTAGLINE (1*32+20+11):(1*32+12) /* RWXVF */
#define NV_MMU_PTE_READ_DISABLE (1*32+30):(1*32+30) /* RWXVF */
#define NV_MMU_PTE_READ_DISABLE_TRUE 0x1 /* RW--V */
#define NV_MMU_PTE_READ_DISABLE_FALSE 0x0 /* RW--V */
#define NV_MMU_PTE_WRITE_DISABLE (1*32+31):(1*32+31) /* RWXVF */
#define NV_MMU_PTE_WRITE_DISABLE_TRUE 0x1 /* RW--V */
#define NV_MMU_PTE_WRITE_DISABLE_FALSE 0x0 /* RW--V */
#define NV_MMU_PTE_ADDRESS_SHIFT 0x0000000c /* */
#define NV_MMU_PTE__SIZE 8
#define NV_MMU_PTE_COMPTAGS_NONE 0x0 /* */
#define NV_MMU_PTE_COMPTAGS_1 0x1 /* */
#define NV_MMU_PTE_COMPTAGS_2 0x2 /* */
#define NV_MMU_PTE_KIND (1*32+7):(1*32+4) /* RWXVF */
#define NV_MMU_PTE_KIND_INVALID 0x07 /* R---V */
#define NV_MMU_PTE_KIND_PITCH 0x00 /* R---V */
#define NV_MMU_PTE_KIND_GENERIC_MEMORY 0x6 /* R---V */
#define NV_MMU_PTE_KIND_Z16 0x1 /* R---V */
#define NV_MMU_PTE_KIND_S8 0x2 /* R---V */
#define NV_MMU_PTE_KIND_S8Z24 0x3 /* R---V */
#define NV_MMU_PTE_KIND_ZF32_X24S8 0x4 /* R---V */
#define NV_MMU_PTE_KIND_Z24S8 0x5 /* R---V */
#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE 0x8 /* R---V */
#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE_DISABLE_PLC 0x9 /* R---V */
#define NV_MMU_PTE_KIND_S8_COMPRESSIBLE_DISABLE_PLC 0xA /* R---V */
#define NV_MMU_PTE_KIND_Z16_COMPRESSIBLE_DISABLE_PLC 0xB /* R---V */
#define NV_MMU_PTE_KIND_S8Z24_COMPRESSIBLE_DISABLE_PLC 0xC /* R---V */
#define NV_MMU_PTE_KIND_ZF32_X24S8_COMPRESSIBLE_DISABLE_PLC 0xD /* R---V */
#define NV_MMU_PTE_KIND_Z24S8_COMPRESSIBLE_DISABLE_PLC 0xE /* R---V */
#define NV_MMU_PTE_KIND_SMSKED_MESSAGE 0xF /* R---V */
#define NV_MMU_VER1_PDE /* ----G */
#define NV_MMU_VER1_PDE_APERTURE_BIG (0*32+1):(0*32+0) /* RWXVF */
#define NV_MMU_VER1_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */
#define NV_MMU_VER1_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */
#define NV_MMU_VER1_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
#define NV_MMU_VER1_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
#define NV_MMU_VER1_PDE_SIZE (0*32+3):(0*32+2) /* RWXVF */
#define NV_MMU_VER1_PDE_SIZE_FULL 0x00000000 /* RW--V */
#define NV_MMU_VER1_PDE_SIZE_HALF 0x00000001 /* RW--V */
#define NV_MMU_VER1_PDE_SIZE_QUARTER 0x00000002 /* RW--V */
#define NV_MMU_VER1_PDE_SIZE_EIGHTH 0x00000003 /* RW--V */
#define NV_MMU_VER1_PDE_ADDRESS_BIG_SYS (0*32+31):(0*32+4) /* RWXVF */
#define NV_MMU_VER1_PDE_ADDRESS_BIG_VID (0*32+31-3):(0*32+4) /* RWXVF */
#define NV_MMU_VER1_PDE_ADDRESS_BIG_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */
#define NV_MMU_VER1_PDE_ADDRESS_BIG_VID_PEER_0 0x00000000 /* RW--V */
#define NV_MMU_VER1_PDE_APERTURE_SMALL (1*32+1):(1*32+0) /* RWXVF */
#define NV_MMU_VER1_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */
#define NV_MMU_VER1_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */
#define NV_MMU_VER1_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
#define NV_MMU_VER1_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
#define NV_MMU_VER1_PDE_VOL_SMALL (1*32+2):(1*32+2) /* RWXVF */
#define NV_MMU_VER1_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */
#define NV_MMU_VER1_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */
#define NV_MMU_VER1_PDE_VOL_BIG (1*32+3):(1*32+3) /* RWXVF */
#define NV_MMU_VER1_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */
#define NV_MMU_VER1_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */
#define NV_MMU_VER1_PDE_ADDRESS_SMALL_SYS (1*32+31):(1*32+4) /* RWXVF */
#define NV_MMU_VER1_PDE_ADDRESS_SMALL_VID (1*32+31-3):(1*32+4) /* RWXVF */
#define NV_MMU_VER1_PDE_ADDRESS_SMALL_VID_PEER (1*32+31):(1*32+32-3) /* RWXVF */
#define NV_MMU_VER1_PDE_ADDRESS_SMALL_VID_PEER_0 0x00000000 /* RW--V */
#define NV_MMU_VER1_PDE_ADDRESS_SHIFT 0x0000000c /* */
#define NV_MMU_VER1_PDE__SIZE 8
#define NV_MMU_VER1_PTE /* ----G */
#define NV_MMU_VER1_PTE_VALID (0*32+0):(0*32+0) /* RWXVF */
#define NV_MMU_VER1_PTE_VALID_TRUE 0x1 /* RW--V */
#define NV_MMU_VER1_PTE_VALID_FALSE 0x0 /* RW--V */
#define NV_MMU_VER1_PTE_PRIVILEGE (0*32+1):(0*32+1) /* RWXVF */
#define NV_MMU_VER1_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */
#define NV_MMU_VER1_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */
#define NV_MMU_VER1_PTE_READ_ONLY (0*32+2):(0*32+2) /* RWXVF */
#define NV_MMU_VER1_PTE_READ_ONLY_TRUE 0x1 /* RW--V */
#define NV_MMU_VER1_PTE_READ_ONLY_FALSE 0x0 /* RW--V */
#define NV_MMU_VER1_PTE_ENCRYPTED (0*32+3):(0*32+3) /* RWXVF */
#define NV_MMU_VER1_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */
#define NV_MMU_VER1_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */
#define NV_MMU_VER1_PTE_ADDRESS_SYS (0*32+31):(0*32+4) /* RWXVF */
#define NV_MMU_VER1_PTE_ADDRESS_VID (0*32+31-3):(0*32+4) /* RWXVF */
#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */
#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */
#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */
#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */
#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */
#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */
#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */
#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */
#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */
#define NV_MMU_VER1_PTE_VOL (1*32+0):(1*32+0) /* RWXVF */
#define NV_MMU_VER1_PTE_VOL_TRUE 0x00000001 /* RW--V */
#define NV_MMU_VER1_PTE_VOL_FALSE 0x00000000 /* RW--V */
#define NV_MMU_VER1_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */
#define NV_MMU_VER1_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */
#define NV_MMU_VER1_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */
#define NV_MMU_VER1_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
#define NV_MMU_VER1_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
#define NV_MMU_VER1_PTE_ATOMIC_DISABLE (1*32+3):(1*32+3) /* RWXVF */
#define NV_MMU_VER1_PTE_ATOMIC_DISABLE_TRUE 0x1 /* RW--V */
#define NV_MMU_VER1_PTE_ATOMIC_DISABLE_FALSE 0x0 /* RW--V */
#define NV_MMU_VER1_PTE_COMPTAGLINE (1*32+20+11):(1*32+12) /* RWXVF */
#define NV_MMU_VER1_PTE_KIND (1*32+11):(1*32+4) /* RWXVF */
#define NV_MMU_VER1_PTE_ADDRESS_SHIFT 0x0000000c /* */
#define NV_MMU_VER1_PTE__SIZE 8
#define NV_MMU_VER1_PTE_COMPTAGS_NONE 0x0 /* */
#define NV_MMU_VER1_PTE_COMPTAGS_1 0x1 /* */
#define NV_MMU_VER1_PTE_COMPTAGS_2 0x2 /* */
#define NV_MMU_NEW_PDE /* ----G */
#define NV_MMU_NEW_PDE_IS_PTE 0:0 /* RWXVF */
#define NV_MMU_NEW_PDE_IS_PTE_TRUE 0x1 /* RW--V */
#define NV_MMU_NEW_PDE_IS_PTE_FALSE 0x0 /* RW--V */
#define NV_MMU_NEW_PDE_IS_PDE 0:0 /* RWXVF */
#define NV_MMU_NEW_PDE_IS_PDE_TRUE 0x0 /* RW--V */
#define NV_MMU_NEW_PDE_IS_PDE_FALSE 0x1 /* RW--V */
#define NV_MMU_NEW_PDE_VALID 0:0 /* RWXVF */
#define NV_MMU_NEW_PDE_VALID_TRUE 0x1 /* RW--V */
#define NV_MMU_NEW_PDE_VALID_FALSE 0x0 /* RW--V */
#define NV_MMU_NEW_PDE_APERTURE 2:1 /* RWXVF */
#define NV_MMU_NEW_PDE_APERTURE_INVALID 0x00000000 /* RW--V */
#define NV_MMU_NEW_PDE_APERTURE_VIDEO_MEMORY 0x00000001 /* RW--V */
#define NV_MMU_NEW_PDE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
#define NV_MMU_NEW_PDE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
#define NV_MMU_NEW_PDE_VOL 3:3 /* RWXVF */
#define NV_MMU_NEW_PDE_VOL_TRUE 0x00000001 /* RW--V */
#define NV_MMU_NEW_PDE_VOL_FALSE 0x00000000 /* RW--V */
#define NV_MMU_NEW_PDE_NO_ATS 5:5 /* RWXVF */
#define NV_MMU_NEW_PDE_NO_ATS_TRUE 0x1 /* RW--V */
#define NV_MMU_NEW_PDE_NO_ATS_FALSE 0x0 /* RW--V */
#define NV_MMU_NEW_PDE_ADDRESS_SYS 53:8 /* RWXVF */
#define NV_MMU_NEW_PDE_ADDRESS_VID (35-3):8 /* RWXVF */
#define NV_MMU_NEW_PDE_ADDRESS_VID_PEER 35:(36-3) /* RWXVF */
#define NV_MMU_NEW_PDE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */
#define NV_MMU_NEW_PDE_ADDRESS_SHIFT 0x0000000c /* */
#define NV_MMU_NEW_PDE__SIZE 8
#define NV_MMU_NEW_DUAL_PDE /* ----G */
#define NV_MMU_NEW_DUAL_PDE_IS_PTE 0:0 /* RWXVF */
#define NV_MMU_NEW_DUAL_PDE_IS_PTE_TRUE 0x1 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_IS_PTE_FALSE 0x0 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_IS_PDE 0:0 /* RWXVF */
#define NV_MMU_NEW_DUAL_PDE_IS_PDE_TRUE 0x0 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_IS_PDE_FALSE 0x1 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_VALID 0:0 /* RWXVF */
#define NV_MMU_NEW_DUAL_PDE_VALID_TRUE 0x1 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_VALID_FALSE 0x0 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_APERTURE_BIG 2:1 /* RWXVF */
#define NV_MMU_NEW_DUAL_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_VOL_BIG 3:3 /* RWXVF */
#define NV_MMU_NEW_DUAL_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_NO_ATS 5:5 /* RWXVF */
#define NV_MMU_NEW_DUAL_PDE_NO_ATS_TRUE 0x1 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_NO_ATS_FALSE 0x0 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_ADDRESS_BIG_SYS 53:(8-4) /* RWXVF */
#define NV_MMU_NEW_DUAL_PDE_ADDRESS_BIG_VID (35-3):(8-4) /* RWXVF */
#define NV_MMU_NEW_DUAL_PDE_ADDRESS_BIG_VID_PEER 35:(36-3) /* RWXVF */
#define NV_MMU_NEW_DUAL_PDE_ADDRESS_BIG_VID_PEER_0 0x00000000 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_APERTURE_SMALL 66:65 /* RWXVF */
#define NV_MMU_NEW_DUAL_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_VOL_SMALL 67:67 /* RWXVF */
#define NV_MMU_NEW_DUAL_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_ADDRESS_SMALL_SYS 117:72 /* RWXVF */
#define NV_MMU_NEW_DUAL_PDE_ADDRESS_SMALL_VID (99-3):72 /* RWXVF */
#define NV_MMU_NEW_DUAL_PDE_ADDRESS_SMALL_VID_PEER 99:(100-3) /* RWXVF */
#define NV_MMU_NEW_DUAL_PDE_ADDRESS_SMALL_VID_PEER_0 0x00000000 /* RW--V */
#define NV_MMU_NEW_DUAL_PDE_ADDRESS_SHIFT 0x0000000c /* */
#define NV_MMU_NEW_DUAL_PDE_ADDRESS_BIG_SHIFT 8 /* */
#define NV_MMU_NEW_DUAL_PDE__SIZE 16
#define NV_MMU_NEW_PTE /* ----G */
#define NV_MMU_NEW_PTE_VALID 0:0 /* RWXVF */
#define NV_MMU_NEW_PTE_VALID_TRUE 0x1 /* RW--V */
#define NV_MMU_NEW_PTE_VALID_FALSE 0x0 /* RW--V */
#define NV_MMU_NEW_PTE_APERTURE 2:1 /* RWXVF */
#define NV_MMU_NEW_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */
#define NV_MMU_NEW_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */
#define NV_MMU_NEW_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
#define NV_MMU_NEW_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
#define NV_MMU_NEW_PTE_VOL 3:3 /* RWXVF */
#define NV_MMU_NEW_PTE_VOL_TRUE 0x00000001 /* RW--V */
#define NV_MMU_NEW_PTE_VOL_FALSE 0x00000000 /* RW--V */
#define NV_MMU_NEW_PTE_ENCRYPTED 4:4 /* RWXVF */
#define NV_MMU_NEW_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */
#define NV_MMU_NEW_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */
#define NV_MMU_NEW_PTE_PRIVILEGE 5:5 /* RWXVF */
#define NV_MMU_NEW_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */
#define NV_MMU_NEW_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */
#define NV_MMU_NEW_PTE_READ_ONLY 6:6 /* RWXVF */
#define NV_MMU_NEW_PTE_READ_ONLY_TRUE 0x1 /* RW--V */
#define NV_MMU_NEW_PTE_READ_ONLY_FALSE 0x0 /* RW--V */
#define NV_MMU_NEW_PTE_ATOMIC_DISABLE 7:7 /* RWXVF */
#define NV_MMU_NEW_PTE_ATOMIC_DISABLE_TRUE 0x1 /* RW--V */
#define NV_MMU_NEW_PTE_ATOMIC_DISABLE_FALSE 0x0 /* RW--V */
#define NV_MMU_NEW_PTE_ADDRESS_SYS 53:8 /* RWXVF */
#define NV_MMU_NEW_PTE_ADDRESS_VID (35-3):8 /* RWXVF */
#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER 35:(36-3) /* RWXVF */
#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */
#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */
#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */
#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */
#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */
#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */
#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */
#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */
#define NV_MMU_NEW_PTE_COMPTAGLINE (20+35):36 /* RWXVF */
#define NV_MMU_NEW_PTE_KIND 63:56 /* RWXVF */
#define NV_MMU_NEW_PTE_ADDRESS_SHIFT 0x0000000c /* */
#define NV_MMU_NEW_PTE__SIZE 8
#define NV_MMU_VER2_PDE /* ----G */
#define NV_MMU_VER2_PDE_IS_PTE 0:0 /* RWXVF */
#define NV_MMU_VER2_PDE_IS_PTE_TRUE 0x1 /* RW--V */
#define NV_MMU_VER2_PDE_IS_PTE_FALSE 0x0 /* RW--V */
#define NV_MMU_VER2_PDE_IS_PDE 0:0 /* RWXVF */
#define NV_MMU_VER2_PDE_IS_PDE_TRUE 0x0 /* RW--V */
#define NV_MMU_VER2_PDE_IS_PDE_FALSE 0x1 /* RW--V */
#define NV_MMU_VER2_PDE_VALID 0:0 /* RWXVF */
#define NV_MMU_VER2_PDE_VALID_TRUE 0x1 /* RW--V */
#define NV_MMU_VER2_PDE_VALID_FALSE 0x0 /* RW--V */
#define NV_MMU_VER2_PDE_APERTURE 2:1 /* RWXVF */
#define NV_MMU_VER2_PDE_APERTURE_INVALID 0x00000000 /* RW--V */
#define NV_MMU_VER2_PDE_APERTURE_VIDEO_MEMORY 0x00000001 /* RW--V */
#define NV_MMU_VER2_PDE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
#define NV_MMU_VER2_PDE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
#define NV_MMU_VER2_PDE_VOL 3:3 /* RWXVF */
#define NV_MMU_VER2_PDE_VOL_TRUE 0x00000001 /* RW--V */
#define NV_MMU_VER2_PDE_VOL_FALSE 0x00000000 /* RW--V */
#define NV_MMU_VER2_PDE_NO_ATS 5:5 /* RWXVF */
#define NV_MMU_VER2_PDE_NO_ATS_TRUE 0x1 /* RW--V */
#define NV_MMU_VER2_PDE_NO_ATS_FALSE 0x0 /* RW--V */
#define NV_MMU_VER2_PDE_ADDRESS_SYS 53:8 /* RWXVF */
#define NV_MMU_VER2_PDE_ADDRESS_VID (35-3):8 /* RWXVF */
#define NV_MMU_VER2_PDE_ADDRESS_VID_PEER 35:(36-3) /* RWXVF */
#define NV_MMU_VER2_PDE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */
#define NV_MMU_VER2_PDE_ADDRESS_SHIFT 0x0000000c /* */
#define NV_MMU_VER2_PDE__SIZE 8
#define NV_MMU_VER2_DUAL_PDE /* ----G */
#define NV_MMU_VER2_DUAL_PDE_IS_PTE 0:0 /* RWXVF */
#define NV_MMU_VER2_DUAL_PDE_IS_PTE_TRUE 0x1 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_IS_PTE_FALSE 0x0 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_IS_PDE 0:0 /* RWXVF */
#define NV_MMU_VER2_DUAL_PDE_IS_PDE_TRUE 0x0 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_IS_PDE_FALSE 0x1 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_VALID 0:0 /* RWXVF */
#define NV_MMU_VER2_DUAL_PDE_VALID_TRUE 0x1 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_VALID_FALSE 0x0 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG 2:1 /* RWXVF */
#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_VOL_BIG 3:3 /* RWXVF */
#define NV_MMU_VER2_DUAL_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_NO_ATS 5:5 /* RWXVF */
#define NV_MMU_VER2_DUAL_PDE_NO_ATS_TRUE 0x1 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_NO_ATS_FALSE 0x0 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_SYS 53:(8-4) /* RWXVF */
#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_VID (35-3):(8-4) /* RWXVF */
#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_VID_PEER 35:(36-3) /* RWXVF */
#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_VID_PEER_0 0x00000000 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL 66:65 /* RWXVF */
#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_VOL_SMALL 67:67 /* RWXVF */
#define NV_MMU_VER2_DUAL_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SMALL_SYS 117:72 /* RWXVF */
#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SMALL_VID (99-3):72 /* RWXVF */
#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SMALL_VID_PEER 99:(100-3) /* RWXVF */
#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SMALL_VID_PEER_0 0x00000000 /* RW--V */
#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SHIFT 0x0000000c /* */
#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_SHIFT 8 /* */
#define NV_MMU_VER2_DUAL_PDE__SIZE 16
#define NV_MMU_VER2_PTE /* ----G */
#define NV_MMU_VER2_PTE_VALID 0:0 /* RWXVF */
#define NV_MMU_VER2_PTE_VALID_TRUE 0x1 /* RW--V */
#define NV_MMU_VER2_PTE_VALID_FALSE 0x0 /* RW--V */
#define NV_MMU_VER2_PTE_APERTURE 2:1 /* RWXVF */
#define NV_MMU_VER2_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */
#define NV_MMU_VER2_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */
#define NV_MMU_VER2_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
#define NV_MMU_VER2_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
#define NV_MMU_VER2_PTE_VOL 3:3 /* RWXVF */
#define NV_MMU_VER2_PTE_VOL_TRUE 0x00000001 /* RW--V */
#define NV_MMU_VER2_PTE_VOL_FALSE 0x00000000 /* RW--V */
#define NV_MMU_VER2_PTE_ENCRYPTED 4:4 /* RWXVF */
#define NV_MMU_VER2_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */
#define NV_MMU_VER2_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */
#define NV_MMU_VER2_PTE_PRIVILEGE 5:5 /* RWXVF */
#define NV_MMU_VER2_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */
#define NV_MMU_VER2_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */
#define NV_MMU_VER2_PTE_READ_ONLY 6:6 /* RWXVF */
#define NV_MMU_VER2_PTE_READ_ONLY_TRUE 0x1 /* RW--V */
#define NV_MMU_VER2_PTE_READ_ONLY_FALSE 0x0 /* RW--V */
#define NV_MMU_VER2_PTE_ATOMIC_DISABLE 7:7 /* RWXVF */
#define NV_MMU_VER2_PTE_ATOMIC_DISABLE_TRUE 0x1 /* RW--V */
#define NV_MMU_VER2_PTE_ATOMIC_DISABLE_FALSE 0x0 /* RW--V */
#define NV_MMU_VER2_PTE_ADDRESS_SYS 53:8 /* RWXVF */
#define NV_MMU_VER2_PTE_ADDRESS_VID (35-3):8 /* RWXVF */
#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER 35:(36-3) /* RWXVF */
#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */
#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */
#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */
#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */
#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */
#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */
#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */
#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */
#define NV_MMU_VER2_PTE_COMPTAGLINE (20+35):36 /* RWXVF */
#define NV_MMU_VER2_PTE_KIND 63:56 /* RWXVF */
#define NV_MMU_VER2_PTE_ADDRESS_SHIFT 0x0000000c /* */
#define NV_MMU_VER2_PTE__SIZE 8
#define NV_MMU_VER3_PDE /* ----G */
#define NV_MMU_VER3_PDE_IS_PTE 0:0 /* RWXVF */
#define NV_MMU_VER3_PDE_IS_PTE_TRUE 0x1 /* RW--V */
#define NV_MMU_VER3_PDE_IS_PTE_FALSE 0x0 /* RW--V */
#define NV_MMU_VER3_PDE_VALID 0:0 /* RWXVF */
#define NV_MMU_VER3_PDE_VALID_TRUE 0x1 /* RW--V */
#define NV_MMU_VER3_PDE_VALID_FALSE 0x0 /* RW--V */
#define NV_MMU_VER3_PDE_APERTURE 2:1 /* RWXVF */
#define NV_MMU_VER3_PDE_APERTURE_INVALID 0x00000000 /* RW--V */
#define NV_MMU_VER3_PDE_APERTURE_VIDEO_MEMORY 0x00000001 /* RW--V */
#define NV_MMU_VER3_PDE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
#define NV_MMU_VER3_PDE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
#define NV_MMU_VER3_PDE_PCF 5:3 /* RWXVF */
#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_ALLOWED__OR__INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_ALLOWED 0x00000000 /* RW--V */
#define NV_MMU_VER3_PDE_PCF_INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_ALLOWED__OR__SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_ALLOWED 0x00000001 /* RW--V */
#define NV_MMU_VER3_PDE_PCF_SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_NOT_ALLOWED__OR__INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
#define NV_MMU_VER3_PDE_PCF_INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_NOT_ALLOWED__OR__SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
#define NV_MMU_VER3_PDE_PCF_SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
#define NV_MMU_VER3_PDE_ADDRESS 51:12 /* RWXVF */
#define NV_MMU_VER3_PDE_ADDRESS_SHIFT 0x0000000c /* */
#define NV_MMU_VER3_PDE__SIZE 8
#define NV_MMU_VER3_DUAL_PDE /* ----G */
#define NV_MMU_VER3_DUAL_PDE_IS_PTE 0:0 /* RWXVF */
#define NV_MMU_VER3_DUAL_PDE_IS_PTE_TRUE 0x1 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_IS_PTE_FALSE 0x0 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_VALID 0:0 /* RWXVF */
#define NV_MMU_VER3_DUAL_PDE_VALID_TRUE 0x1 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_VALID_FALSE 0x0 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG 2:1 /* RWXVF */
#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_BIG 5:3 /* RWXVF */
#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_ALLOWED__OR__INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_ALLOWED 0x00000000 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_ALLOWED__OR__SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_ALLOWED 0x00000001 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_NOT_ALLOWED__OR__INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_NOT_ALLOWED__OR__SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_ADDRESS_BIG 51:8 /* RWXVF */
#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL 66:65 /* RWXVF */
#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL 69:67 /* RWXVF */
#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_ALLOWED__OR__INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_ALLOWED 0x00000000 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_ALLOWED__OR__SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_ALLOWED 0x00000001 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_NOT_ALLOWED__OR__INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_NOT_ALLOWED__OR__SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
#define NV_MMU_VER3_DUAL_PDE_ADDRESS_SMALL 115:76 /* RWXVF */
#define NV_MMU_VER3_DUAL_PDE_ADDRESS_SHIFT 0x0000000c /* */
#define NV_MMU_VER3_DUAL_PDE_ADDRESS_BIG_SHIFT 8 /* */
#define NV_MMU_VER3_DUAL_PDE__SIZE 16
#define NV_MMU_VER3_PTE /* ----G */
#define NV_MMU_VER3_PTE_VALID 0:0 /* RWXVF */
#define NV_MMU_VER3_PTE_VALID_TRUE 0x1 /* RW--V */
#define NV_MMU_VER3_PTE_VALID_FALSE 0x0 /* RW--V */
#define NV_MMU_VER3_PTE_APERTURE 2:1 /* RWXVF */
#define NV_MMU_VER3_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */
#define NV_MMU_VER3_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */
#define NV_MMU_VER3_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
#define NV_MMU_VER3_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
#define NV_MMU_VER3_PTE_PCF 7:3 /* RWXVF */
#define NV_MMU_VER3_PTE_PCF_INVALID 0x00000000 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_SPARSE 0x00000001 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_MAPPING_NOWHERE 0x00000002 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_NO_VALID_4KB_PAGE 0x00000003 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACE 0x00000000 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACE 0x00000001 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACE 0x00000002 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACE 0x00000003 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACE 0x00000004 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACE 0x00000005 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_CACHED_ACE 0x00000006 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACE 0x00000007 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_CACHED_ACE 0x00000008 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_UNCACHED_ACE 0x00000009 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_CACHED_ACE 0x0000000A /* RW--V */
#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_UNCACHED_ACE 0x0000000B /* RW--V */
#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_CACHED_ACE 0x0000000C /* RW--V */
#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_UNCACHED_ACE 0x0000000D /* RW--V */
#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_CACHED_ACE 0x0000000E /* RW--V */
#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_UNCACHED_ACE 0x0000000F /* RW--V */
#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACD 0x00000010 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACD 0x00000011 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACD 0x00000012 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACD 0x00000013 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACD 0x00000014 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACD 0x00000015 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_CACHED_ACD 0x00000016 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACD 0x00000017 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_CACHED_ACD 0x00000018 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_UNCACHED_ACD 0x00000019 /* RW--V */
#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_CACHED_ACD 0x0000001A /* RW--V */
#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_UNCACHED_ACD 0x0000001B /* RW--V */
#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_CACHED_ACD 0x0000001C /* RW--V */
#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_UNCACHED_ACD 0x0000001D /* RW--V */
#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_CACHED_ACD 0x0000001E /* RW--V */
#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_UNCACHED_ACD 0x0000001F /* RW--V */
#define NV_MMU_VER3_PTE_KIND 11:8 /* RWXVF */
#define NV_MMU_VER3_PTE_ADDRESS 51:12 /* RWXVF */
#define NV_MMU_VER3_PTE_ADDRESS_SYS 51:12 /* RWXVF */
#define NV_MMU_VER3_PTE_ADDRESS_PEER 51:12 /* RWXVF */
#define NV_MMU_VER3_PTE_ADDRESS_VID 39:12 /* RWXVF */
#define NV_MMU_VER3_PTE_PEER_ID 63:(64-3) /* RWXVF */
#define NV_MMU_VER3_PTE_PEER_ID_0 0x00000000 /* RW--V */
#define NV_MMU_VER3_PTE_PEER_ID_1 0x00000001 /* RW--V */
#define NV_MMU_VER3_PTE_PEER_ID_2 0x00000002 /* RW--V */
#define NV_MMU_VER3_PTE_PEER_ID_3 0x00000003 /* RW--V */
#define NV_MMU_VER3_PTE_PEER_ID_4 0x00000004 /* RW--V */
#define NV_MMU_VER3_PTE_PEER_ID_5 0x00000005 /* RW--V */
#define NV_MMU_VER3_PTE_PEER_ID_6 0x00000006 /* RW--V */
#define NV_MMU_VER3_PTE_PEER_ID_7 0x00000007 /* RW--V */
#define NV_MMU_VER3_PTE_ADDRESS_SHIFT 0x0000000c /* */
#define NV_MMU_VER3_PTE__SIZE 8
#define NV_MMU_CLIENT /* ----G */
#define NV_MMU_CLIENT_KIND 2:0 /* RWXVF */
#define NV_MMU_CLIENT_KIND_Z16 0x1 /* R---V */
#define NV_MMU_CLIENT_KIND_S8 0x2 /* R---V */
#define NV_MMU_CLIENT_KIND_S8Z24 0x3 /* R---V */
#define NV_MMU_CLIENT_KIND_ZF32_X24S8 0x4 /* R---V */
#define NV_MMU_CLIENT_KIND_Z24S8 0x5 /* R---V */
#define NV_MMU_CLIENT_KIND_GENERIC_MEMORY 0x6 /* R---V */
#define NV_MMU_CLIENT_KIND_INVALID 0x7 /* R---V */
#endif // __gb100_dev_mmu_h__

View File

@@ -1,14 +1,6 @@
NVIDIA_UVM_SOURCES ?=
NVIDIA_UVM_SOURCES_CXX ?=
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_conf_computing.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_sec2_test.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_sec2.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_sec2.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_blackwell.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_blackwell_fault_buffer.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_blackwell_mmu.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_blackwell_host.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_common.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_linux.c
NVIDIA_UVM_SOURCES += nvidia-uvm/nvstatus.c
@@ -29,6 +21,7 @@ NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_rm_mem.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_channel.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_lock.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hal.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_fd_type.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_processors.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_tree.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_rb_tree.c
@@ -53,6 +46,7 @@ NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_tracker.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_host.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_ce.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_sec2.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_mmu.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_fault_buffer.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_access_counter_buffer.c
@@ -66,7 +60,6 @@ NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta_host.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta_mmu.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta_fault_buffer.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta_access_counter_buffer.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_turing.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_turing_access_counter_buffer.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_turing_fault_buffer.c
@@ -81,8 +74,13 @@ NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_fault_buffer.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_ce.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_host.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_sec2.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_mmu.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ada.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_blackwell.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_blackwell_fault_buffer.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_blackwell_mmu.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_blackwell_host.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_policy.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_perf_utils.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_kvmalloc.c
@@ -98,9 +96,9 @@ NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_perf_heuristics.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_perf_thrashing.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_perf_prefetch.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ats.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ats_ibm.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ats_faults.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ats_sva.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_conf_computing.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_test.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_test_rng.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_tree_test.c
@@ -128,3 +126,5 @@ NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_va_block_test.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_group_tree_test.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_thread_context_test.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_rb_tree_test.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_sec2_test.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_test_file.c

View File

@@ -50,7 +50,6 @@ NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_UVM_OBJECTS)
NV_CONFTEST_FUNCTION_COMPILE_TESTS += radix_tree_empty
NV_CONFTEST_FUNCTION_COMPILE_TESTS += radix_tree_replace_slot
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pnv_npu2_init_context
NV_CONFTEST_FUNCTION_COMPILE_TESTS += cpumask_of_node
NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioasid_get
@@ -61,7 +60,9 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += iommu_sva_bind_device_has_drvdata_arg
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vm_fault_to_errno
NV_CONFTEST_FUNCTION_COMPILE_TESTS += find_next_bit_wrap
NV_CONFTEST_FUNCTION_COMPILE_TESTS += iommu_is_dma_domain
NV_CONFTEST_FUNCTION_COMPILE_TESTS += for_each_sgtable_dma_page
NV_CONFTEST_FUNCTION_COMPILE_TESTS += folio_test_swapcache
NV_CONFTEST_FUNCTION_COMPILE_TESTS += page_pgmap
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += mmu_notifier_ops_invalidate_range
@@ -75,7 +76,10 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_home_node
NV_CONFTEST_TYPE_COMPILE_TESTS += mpol_preferred_many_present
NV_CONFTEST_TYPE_COMPILE_TESTS += mmu_interval_notifier
NV_CONFTEST_TYPE_COMPILE_TESTS += fault_flag_remote_present
NV_CONFTEST_TYPE_COMPILE_TESTS += sg_dma_page_iter
NV_CONFTEST_TYPE_COMPILE_TESTS += struct_page_has_zone_device_data
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_int_active_memcg
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_migrate_vma_setup
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present___iowrite64_lo_hi
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_make_device_exclusive

View File

@@ -33,10 +33,12 @@
#include "uvm_va_block.h"
#include "uvm_tools.h"
#include "uvm_common.h"
#include "uvm_fd_type.h"
#include "uvm_linux_ioctl.h"
#include "uvm_hmm.h"
#include "uvm_mem.h"
#include "uvm_kvmalloc.h"
#include "uvm_test_file.h"
#define NVIDIA_UVM_DEVICE_NAME "nvidia-uvm"
@@ -49,55 +51,9 @@ bool uvm_file_is_nvidia_uvm(struct file *filp)
return (filp != NULL) && (filp->f_op == &uvm_fops);
}
uvm_fd_type_t uvm_fd_type(struct file *filp, void **ptr_val)
bool uvm_file_is_nvidia_uvm_va_space(struct file *filp)
{
unsigned long uptr;
uvm_fd_type_t type;
void *ptr;
UVM_ASSERT(uvm_file_is_nvidia_uvm(filp));
uptr = atomic_long_read_acquire((atomic_long_t *) (&filp->private_data));
type = (uvm_fd_type_t)(uptr & UVM_FD_TYPE_MASK);
ptr = (void *)(uptr & ~UVM_FD_TYPE_MASK);
BUILD_BUG_ON(UVM_FD_COUNT > UVM_FD_TYPE_MASK + 1);
switch (type) {
case UVM_FD_UNINITIALIZED:
case UVM_FD_INITIALIZING:
UVM_ASSERT(!ptr);
break;
case UVM_FD_VA_SPACE:
UVM_ASSERT(ptr);
BUILD_BUG_ON(__alignof__(uvm_va_space_t) < (1UL << UVM_FD_TYPE_BITS));
break;
case UVM_FD_MM:
UVM_ASSERT(ptr);
BUILD_BUG_ON(__alignof__(struct file) < (1UL << UVM_FD_TYPE_BITS));
break;
default:
UVM_ASSERT(0);
}
if (ptr_val)
*ptr_val = ptr;
return type;
}
void *uvm_fd_get_type(struct file *filp, uvm_fd_type_t type)
{
void *ptr;
UVM_ASSERT(uvm_file_is_nvidia_uvm(filp));
if (uvm_fd_type(filp, &ptr) == type)
return ptr;
else
return NULL;
return uvm_file_is_nvidia_uvm(filp) && uvm_fd_type(filp, NULL) == UVM_FD_VA_SPACE;
}
static NV_STATUS uvm_api_mm_initialize(UVM_MM_INITIALIZE_PARAMS *params, struct file *filp)
@@ -105,7 +61,6 @@ static NV_STATUS uvm_api_mm_initialize(UVM_MM_INITIALIZE_PARAMS *params, struct
uvm_va_space_t *va_space;
uvm_va_space_mm_t *va_space_mm;
struct file *uvm_file;
uvm_fd_type_t old_fd_type;
struct mm_struct *mm;
NV_STATUS status;
@@ -127,14 +82,9 @@ static NV_STATUS uvm_api_mm_initialize(UVM_MM_INITIALIZE_PARAMS *params, struct
goto err;
}
old_fd_type = atomic_long_cmpxchg((atomic_long_t *)&filp->private_data,
UVM_FD_UNINITIALIZED,
UVM_FD_INITIALIZING);
old_fd_type &= UVM_FD_TYPE_MASK;
if (old_fd_type != UVM_FD_UNINITIALIZED) {
status = NV_ERR_IN_USE;
status = uvm_fd_type_init(filp);
if (status != NV_OK)
goto err;
}
va_space_mm = &va_space->va_space_mm;
uvm_spin_lock(&va_space_mm->lock);
@@ -173,13 +123,13 @@ static NV_STATUS uvm_api_mm_initialize(UVM_MM_INITIALIZE_PARAMS *params, struct
break;
}
uvm_spin_unlock(&va_space_mm->lock);
atomic_long_set_release((atomic_long_t *)&filp->private_data, (long)uvm_file | UVM_FD_MM);
uvm_fd_type_set(filp, UVM_FD_MM, uvm_file);
return NV_OK;
err_release_unlock:
uvm_spin_unlock(&va_space_mm->lock);
atomic_long_set_release((atomic_long_t *)&filp->private_data, UVM_FD_UNINITIALIZED);
uvm_fd_type_set(filp, UVM_FD_UNINITIALIZED, NULL);
err:
if (uvm_file)
@@ -240,7 +190,7 @@ static void uvm_release_deferred(void *data)
// Since this function is only scheduled to run when uvm_release() fails
// to trylock-acquire the pm.lock, the following acquisition attempt
// is expected to block this thread, and cause it to remain blocked until
// uvm_resume() releases the lock. As a result, the deferred release
// uvm_resume() releases the lock. As a result, the deferred release
// kthread queue may stall for long periods of time.
uvm_down_read(&g_uvm_global.pm.lock);
@@ -249,12 +199,43 @@ static void uvm_release_deferred(void *data)
uvm_up_read(&g_uvm_global.pm.lock);
}
static void uvm_mm_release(struct file *filp, struct file *uvm_file)
static void uvm_release_va_space(struct file *filp, uvm_va_space_t *va_space)
{
int ret;
filp->private_data = NULL;
filp->f_mapping = NULL;
// Because the kernel discards the status code returned from this release
// callback, early exit in case of a pm.lock acquisition failure is not
// an option. Instead, the teardown work normally performed synchronously
// needs to be scheduled to run after uvm_resume() releases the lock.
if (uvm_down_read_trylock(&g_uvm_global.pm.lock)) {
uvm_va_space_destroy(va_space);
uvm_up_read(&g_uvm_global.pm.lock);
}
else {
// Remove references to this inode from the address_space. This isn't
// strictly necessary, as any CPU mappings of this file have already
// been destroyed, and va_space->mapping won't be used again. Still,
// the va_space survives the inode if its destruction is deferred, in
// which case the references are rendered stale.
address_space_init_once(va_space->mapping);
nv_kthread_q_item_init(&va_space->deferred_release_q_item, uvm_release_deferred, va_space);
ret = nv_kthread_q_schedule_q_item(&g_uvm_global.deferred_release_q, &va_space->deferred_release_q_item);
UVM_ASSERT(ret != 0);
}
}
static void uvm_release_mm(struct file *filp, struct file *uvm_file)
{
uvm_va_space_t *va_space = uvm_va_space_get(uvm_file);
uvm_va_space_mm_t *va_space_mm = &va_space->va_space_mm;
struct mm_struct *mm = va_space_mm->mm;
uvm_kvfree(filp->f_mapping);
if (uvm_va_space_mm_enabled(va_space)) {
uvm_va_space_mm_unregister(va_space);
@@ -269,46 +250,27 @@ static void uvm_mm_release(struct file *filp, struct file *uvm_file)
static int uvm_release(struct inode *inode, struct file *filp)
{
void *ptr;
uvm_va_space_t *va_space;
uvm_fd_type_t fd_type;
int ret;
uvm_fd_type_t fd_type = uvm_fd_type(filp, &ptr);
fd_type = uvm_fd_type(filp, &ptr);
UVM_ASSERT(fd_type != UVM_FD_INITIALIZING);
if (fd_type == UVM_FD_UNINITIALIZED) {
uvm_kvfree(filp->f_mapping);
return 0;
}
else if (fd_type == UVM_FD_MM) {
uvm_kvfree(filp->f_mapping);
uvm_mm_release(filp, (struct file *)ptr);
return 0;
}
switch (fd_type) {
case UVM_FD_UNINITIALIZED:
uvm_kvfree(filp->f_mapping);
break;
UVM_ASSERT(fd_type == UVM_FD_VA_SPACE);
va_space = (uvm_va_space_t *)ptr;
filp->private_data = NULL;
filp->f_mapping = NULL;
case UVM_FD_VA_SPACE:
uvm_release_va_space(filp, (uvm_va_space_t *)ptr);
break;
// Because the kernel discards the status code returned from this release
// callback, early exit in case of a pm.lock acquisition failure is not
// an option. Instead, the teardown work normally performed synchronously
// needs to be scheduled to run after uvm_resume() releases the lock.
if (uvm_down_read_trylock(&g_uvm_global.pm.lock)) {
uvm_va_space_destroy(va_space);
uvm_up_read(&g_uvm_global.pm.lock);
}
else {
// Remove references to this inode from the address_space. This isn't
// strictly necessary, as any CPU mappings of this file have already
// been destroyed, and va_space->mapping won't be used again. Still,
// the va_space survives the inode if its destruction is deferred, in
// which case the references are rendered stale.
address_space_init_once(va_space->mapping);
case UVM_FD_MM:
uvm_release_mm(filp, (struct file *)ptr);
break;
nv_kthread_q_item_init(&va_space->deferred_release_q_item, uvm_release_deferred, va_space);
ret = nv_kthread_q_schedule_q_item(&g_uvm_global.deferred_release_q, &va_space->deferred_release_q_item);
UVM_ASSERT(ret != 0);
case UVM_FD_TEST:
uvm_test_file_release(filp, (uvm_test_file_t *)ptr);
break;
default:
UVM_ASSERT_MSG(0, "Unexpected fd type: %d\n", fd_type);
}
return 0;
@@ -680,6 +642,9 @@ static void uvm_vm_open_semaphore_pool(struct vm_area_struct *vma)
// Semaphore pool vmas do not have vma wrappers, but some functions will
// assume vm_private_data is a wrapper.
vma->vm_private_data = NULL;
#if defined(VM_WIPEONFORK)
nv_vm_flags_set(vma, VM_WIPEONFORK);
#endif
if (is_fork) {
// If we forked, leave the parent vma alone.
@@ -772,6 +737,9 @@ static void uvm_vm_open_device_p2p(struct vm_area_struct *vma)
// Device P2P vmas do not have vma wrappers, but some functions will
// assume vm_private_data is a wrapper.
vma->vm_private_data = NULL;
#if defined(VM_WIPEONFORK)
nv_vm_flags_set(vma, VM_WIPEONFORK);
#endif
if (is_fork) {
// If we forked, leave the parent vma alone.
@@ -823,6 +791,7 @@ static struct vm_operations_struct uvm_vm_ops_device_p2p =
static int uvm_mmap(struct file *filp, struct vm_area_struct *vma)
{
void *fd_type_ptr;
uvm_va_space_t *va_space;
NV_STATUS status = uvm_global_get_status();
int ret = 0;
@@ -831,9 +800,17 @@ static int uvm_mmap(struct file *filp, struct vm_area_struct *vma)
if (status != NV_OK)
return -nv_status_to_errno(status);
va_space = uvm_fd_va_space(filp);
if (!va_space)
return -EBADFD;
switch (uvm_fd_type(filp, &fd_type_ptr)) {
case UVM_FD_VA_SPACE:
va_space = (uvm_va_space_t *)fd_type_ptr;
break;
case UVM_FD_TEST:
return uvm_test_file_mmap((uvm_test_file_t *)fd_type_ptr, vma);
default:
return -EBADFD;
}
// When the VA space is associated with an mm, all vmas under the VA space
// must come from that mm.
@@ -861,8 +838,8 @@ static int uvm_mmap(struct file *filp, struct vm_area_struct *vma)
}
// If the PM lock cannot be acquired, disable the VMA and report success
// to the caller. The caller is expected to determine whether the
// map operation succeeded via an ioctl() call. This is necessary to
// to the caller. The caller is expected to determine whether the
// map operation succeeded via an ioctl() call. This is necessary to
// safely handle MAP_FIXED, which needs to complete atomically to prevent
// the loss of the virtual address range.
if (!uvm_down_read_trylock(&g_uvm_global.pm.lock)) {
@@ -993,33 +970,40 @@ static NV_STATUS uvm_api_initialize(UVM_INITIALIZE_PARAMS *params, struct file *
// attempt to be made. This is safe because other threads will have only had
// a chance to observe UVM_FD_INITIALIZING and not UVM_FD_VA_SPACE in this
// case.
old_fd_type = atomic_long_cmpxchg((atomic_long_t *)&filp->private_data,
UVM_FD_UNINITIALIZED,
UVM_FD_INITIALIZING);
old_fd_type &= UVM_FD_TYPE_MASK;
if (old_fd_type == UVM_FD_UNINITIALIZED) {
status = uvm_va_space_create(filp->f_mapping, &va_space, params->flags);
if (status != NV_OK) {
atomic_long_set_release((atomic_long_t *)&filp->private_data, UVM_FD_UNINITIALIZED);
return status;
}
old_fd_type = uvm_fd_type_init_cas(filp);
switch (old_fd_type) {
case UVM_FD_UNINITIALIZED:
status = uvm_va_space_create(filp->f_mapping, &va_space, params->flags);
if (status != NV_OK) {
uvm_fd_type_set(filp, UVM_FD_UNINITIALIZED, NULL);
return status;
}
atomic_long_set_release((atomic_long_t *)&filp->private_data, (long)va_space | UVM_FD_VA_SPACE);
}
else if (old_fd_type == UVM_FD_VA_SPACE) {
va_space = uvm_va_space_get(filp);
uvm_fd_type_set(filp, UVM_FD_VA_SPACE, va_space);
break;
if (params->flags != va_space->initialization_flags)
case UVM_FD_VA_SPACE:
va_space = uvm_va_space_get(filp);
if (params->flags != va_space->initialization_flags)
status = NV_ERR_INVALID_ARGUMENT;
else
status = NV_OK;
break;
case UVM_FD_MM:
case UVM_FD_TEST:
status = NV_ERR_INVALID_ARGUMENT;
else
status = NV_OK;
}
else if (old_fd_type == UVM_FD_MM) {
status = NV_ERR_INVALID_ARGUMENT;
}
else {
UVM_ASSERT(old_fd_type == UVM_FD_INITIALIZING);
status = NV_ERR_BUSY_RETRY;
break;
case UVM_FD_INITIALIZING:
status = NV_ERR_BUSY_RETRY;
break;
default:
UVM_ASSERT(0);
status = NV_ERR_INVALID_STATE; // Quiet compiler warnings
break;
}
return status;
@@ -1227,19 +1211,8 @@ static int uvm_init(void)
goto error;
}
pr_info("Loaded the UVM driver, major device number %d.\n", MAJOR(g_uvm_base_dev));
if (uvm_enable_builtin_tests)
pr_info("Built-in UVM tests are enabled. This is a security risk.\n");
// After Open RM is released, both the enclosing "#if" and this comment
// block should be removed, because the uvm_hmm_is_enabled_system_wide()
// check is both necessary and sufficient for reporting functionality.
// Until that time, however, we need to avoid advertisting UVM's ability to
// enable HMM functionality.
if (uvm_hmm_is_enabled_system_wide())
UVM_INFO_PRINT("HMM (Heterogeneous Memory Management) is enabled in the UVM driver.\n");
UVM_INFO_PRINT("Built-in UVM tests are enabled. This is a security risk.\n");
return 0;
@@ -1268,8 +1241,6 @@ static void uvm_exit(void)
uvm_global_exit();
uvm_test_unload_state_exit();
pr_info("Unloaded the UVM driver.\n");
}
static void __exit uvm_exit_entry(void)

View File

@@ -379,6 +379,17 @@ NV_STATUS UvmIsPageableMemoryAccessSupportedOnGpu(const NvProcessorUuid *gpuUuid
// OS state required to register the GPU is malformed, or the partition
// identified by the user handles or its configuration changed.
//
// NV_ERR_NVLINK_FABRIC_NOT_READY:
// (On NvSwitch-connected system) Indicates that the fabric has not been
// configured yet. Caller must retry GPU registration.
//
// NV_ERR_NVLINK_FABRIC_FAILURE:
// (On NvSwitch-connected systems) Indicates that the NvLink fabric
// failed to be configured.
//
// NV_ERR_GPU_MEMORY_ONLINING_FAULURE:
// (On coherent systems) The GPU's memory onlining failed.
//
// NV_ERR_GENERIC:
// Unexpected error. We try hard to avoid returning this error code,
// because it is not very informative.
@@ -1317,9 +1328,8 @@ NV_STATUS UvmCleanUpZombieResources(void);
//
// NV_ERR_INVALID_ARGUMENT:
// perGpuAttribs is NULL but gpuAttribsCount is non-zero or vice-versa,
// or caching is requested on more than one GPU.
// The Confidential Computing feature is enabled and the perGpuAttribs
// list is empty.
// or caching is requested on more than one GPU, or (in Confidential
// Computing only) the perGpuAttribs list is empty.
//
// NV_ERR_NOT_SUPPORTED:
// The current process is not the one which called UvmInitialize, and
@@ -1420,9 +1430,9 @@ NV_STATUS UvmAllocDeviceP2P(NvProcessorUuid gpuUuid,
// UvmMigrate
//
// Migrates the backing of a given virtual address range to the specified
// destination processor. If any page in the VA range is unpopulated, it is
// populated at the destination processor. The migrated pages in the VA range
// are also mapped on the destination processor.
// destination processor's nearest memory. If any page in the VA range is
// unpopulated, it is populated at the destination processor. The migrated pages
// in the VA range are also mapped on the destination processor.
//
// Both base and length must be aligned to the smallest page size supported by
// the CPU. The VA range must lie within the largest possible virtual address
@@ -1469,7 +1479,9 @@ NV_STATUS UvmAllocDeviceP2P(NvProcessorUuid gpuUuid,
// If read duplication is enabled on any pages in the VA range, then those pages
// are read duplicated at the destination processor, leaving the source copy, if
// present, intact with only its mapping changed to read-only if it wasn't
// already mapped that way.
// already mapped that way. The exception to this behavior is migrating pages
// between different NUMA nodes, in which case the pages are migrated to the
// destination node and a read-only mapping is created to the migrated pages.
//
// Pages in the VA range are migrated even if their preferred location is set to
// a processor other than the destination processor.
@@ -2195,9 +2207,9 @@ NV_STATUS UvmMapDynamicParallelismRegion(void *base,
// allocated via a call to either UvmAlloc or UvmMemMap, or be supported
// system-allocated pageable memory. If the input virtual range corresponds to
// system-allocated pageable memory and UvmIsPageableMemoryAccessSupported
// reports that pageable memory access is supported, the behavior described
// below does not take effect, and read duplication will not be enabled for
// the input range.
// reports that pageable memory access is supported, or if a memoryless
// processor is present, the behavior described below does not take effect, and
// read duplication will not be enabled for the input range.
//
// Both base and length must be aligned to the smallest page size supported by
// the CPU.
@@ -2212,7 +2224,9 @@ NV_STATUS UvmMapDynamicParallelismRegion(void *base,
//
// If UvmMigrate, UvmMigrateAsync or UvmMigrateRangeGroup is called on any pages
// in this VA range, then those pages will also be read duplicated on the
// destination processor for the migration.
// destination processor for the migration unless the migration is between CPU
// NUMA nodes, in which case the pages are migrated to the destination NUMA
// node and a read-only mapping to the migrated pages is created.
//
// Enabling read duplication on a VA range requires the CPU and all GPUs with
// registered VA spaces to be fault-capable. Otherwise, the migration and
@@ -2316,7 +2330,7 @@ NV_STATUS UvmDisableReadDuplication(void *base,
// UvmSetPreferredLocation
//
// Sets the preferred location for the given virtual address range to be the
// specified processor's memory.
// specified processor's nearest memory.
//
// Both base and length must be aligned to the smallest page size supported by
// the CPU. The VA range must lie within the largest possible virtual address
@@ -3945,9 +3959,7 @@ NV_STATUS UvmToolsDisableCounters(UvmToolsCountersHandle counters,
// In-process scenario when targetVa address + size overlaps with buffer + size.
//
// This is essentially a UVM version of RM ctrl call
// NV83DE_CTRL_CMD_DEBUG_READ_MEMORY. For implementation constraints (and more
// information), please refer to the documentation:
// //sw/docs/resman/components/compute/UVM/subsystems/UVM_8_Tools_API_Design.docx
// NV83DE_CTRL_CMD_DEBUG_READ_MEMORY.
//
// Arguments:
// session: (INPUT)
@@ -4001,9 +4013,7 @@ NV_STATUS UvmToolsReadProcessMemory(UvmToolsSessionHandle session,
// buffer + size.
//
// This is essentially a UVM version of RM ctrl call
// NV83DE_CTRL_CMD_DEBUG_READ_MEMORY. For implementation constraints (and more
// information), please refer to the documentation:
// //sw/docs/resman/components/compute/UVM/subsystems/UVM_8_Tools_API_Design.docx
// NV83DE_CTRL_CMD_DEBUG_READ_MEMORY.
//
// Arguments:
// session: (INPUT)

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2021-2023 NVIDIA Corporation
Copyright (c) 2021-2025 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -38,12 +38,10 @@ void uvm_hal_ada_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
parent_gpu->utlb_per_gpc_count = uvm_ada_get_utlbs_per_gpc(parent_gpu);
parent_gpu->fault_buffer_info.replayable.utlb_count = parent_gpu->rm_info.maxGpcCount *
parent_gpu->utlb_per_gpc_count;
parent_gpu->fault_buffer.replayable.utlb_count = parent_gpu->rm_info.maxGpcCount * parent_gpu->utlb_per_gpc_count;
{
uvm_fault_buffer_entry_t *dummy;
UVM_ASSERT(parent_gpu->fault_buffer_info.replayable.utlb_count <= (1 << (sizeof(dummy->fault_source.utlb_id) *
8)));
UVM_ASSERT(parent_gpu->fault_buffer.replayable.utlb_count <= (1 << (sizeof(dummy->fault_source.utlb_id) * 8)));
}
// A single top level PDE on Ada covers 128 TB and that's the minimum size
@@ -51,6 +49,9 @@ void uvm_hal_ada_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
parent_gpu->rm_va_base = 0;
parent_gpu->rm_va_size = 128 * UVM_SIZE_1TB;
parent_gpu->peer_va_base = parent_gpu->rm_va_base + parent_gpu->rm_va_size;
parent_gpu->peer_va_size = NV_MAX_DEVICES * UVM_PEER_IDENTITY_VA_SIZE;
parent_gpu->uvm_mem_va_base = 384 * UVM_SIZE_1TB;
parent_gpu->uvm_mem_va_size = UVM_MEM_VA_SIZE;
@@ -77,10 +78,6 @@ void uvm_hal_ada_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
parent_gpu->non_replayable_faults_supported = true;
parent_gpu->access_counters_supported = true;
parent_gpu->access_counters_can_use_physical_addresses = false;
parent_gpu->fault_cancel_va_supported = true;
parent_gpu->scoped_atomics_supported = true;
@@ -98,4 +95,6 @@ void uvm_hal_ada_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
parent_gpu->plc_supported = true;
parent_gpu->no_ats_range_required = false;
parent_gpu->conf_computing.per_channel_key_rotation = false;
}

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2018-2023 NVIDIA Corporation
Copyright (c) 2018-2025 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -38,12 +38,10 @@ void uvm_hal_ampere_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
parent_gpu->utlb_per_gpc_count = uvm_ampere_get_utlbs_per_gpc(parent_gpu);
parent_gpu->fault_buffer_info.replayable.utlb_count = parent_gpu->rm_info.maxGpcCount *
parent_gpu->utlb_per_gpc_count;
parent_gpu->fault_buffer.replayable.utlb_count = parent_gpu->rm_info.maxGpcCount * parent_gpu->utlb_per_gpc_count;
{
uvm_fault_buffer_entry_t *dummy;
UVM_ASSERT(parent_gpu->fault_buffer_info.replayable.utlb_count <= (1 <<
(sizeof(dummy->fault_source.utlb_id) * 8)));
UVM_ASSERT(parent_gpu->fault_buffer.replayable.utlb_count <= (1 << (sizeof(dummy->fault_source.utlb_id) * 8)));
}
// A single top level PDE on Ampere covers 128 TB and that's the minimum
@@ -51,6 +49,9 @@ void uvm_hal_ampere_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
parent_gpu->rm_va_base = 0;
parent_gpu->rm_va_size = 128 * UVM_SIZE_1TB;
parent_gpu->peer_va_base = parent_gpu->rm_va_base + parent_gpu->rm_va_size;
parent_gpu->peer_va_size = NV_MAX_DEVICES * UVM_PEER_IDENTITY_VA_SIZE;
parent_gpu->uvm_mem_va_base = 384 * UVM_SIZE_1TB;
parent_gpu->uvm_mem_va_size = UVM_MEM_VA_SIZE;
@@ -81,10 +82,6 @@ void uvm_hal_ampere_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
parent_gpu->non_replayable_faults_supported = true;
parent_gpu->access_counters_supported = true;
parent_gpu->access_counters_can_use_physical_addresses = false;
parent_gpu->fault_cancel_va_supported = true;
parent_gpu->scoped_atomics_supported = true;
@@ -107,4 +104,6 @@ void uvm_hal_ampere_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
parent_gpu->plc_supported = true;
parent_gpu->no_ats_range_required = false;
parent_gpu->conf_computing.per_channel_key_rotation = false;
}

View File

@@ -29,6 +29,8 @@
bool uvm_hal_ampere_ce_method_is_valid_c6b5(uvm_push_t *push, NvU32 method_address, NvU32 method_data)
{
UVM_ASSERT(push->channel);
if (!uvm_channel_is_proxy(push->channel))
return true;
@@ -116,6 +118,16 @@ bool uvm_hal_ampere_ce_memcopy_is_valid_c6b5(uvm_push_t *push, uvm_gpu_address_t
{
NvU64 push_begin_gpu_va;
uvm_gpu_t *gpu = uvm_push_get_gpu(push);
const bool peer_copy = uvm_gpu_address_is_peer(gpu, dst) || uvm_gpu_address_is_peer(gpu, src);
UVM_ASSERT(push->channel);
if (peer_copy && !uvm_channel_is_p2p(push->channel)) {
UVM_ERR_PRINT("Peer copy from address (0x%llx) to address (0x%llx) should use designated p2p channels!",
src.address,
dst.address);
return false;
}
if (!uvm_parent_gpu_is_virt_mode_sriov_heavy(gpu->parent))
return true;
@@ -182,6 +194,8 @@ void uvm_hal_ampere_ce_memcopy_patch_src_c6b5(uvm_push_t *push, uvm_gpu_address_
{
uvm_pushbuffer_t *pushbuffer;
UVM_ASSERT(push->channel);
if (!uvm_channel_is_proxy(push->channel))
return;

View File

@@ -36,6 +36,8 @@ bool uvm_hal_ampere_host_method_is_valid(uvm_push_t *push, NvU32 method_address,
if (!uvm_parent_gpu_is_virt_mode_sriov_heavy(gpu->parent))
return true;
UVM_ASSERT(push->channel);
if (uvm_channel_is_privileged(push->channel)) {
switch (method_address) {
case NVC56F_SET_OBJECT:
@@ -84,6 +86,8 @@ bool uvm_hal_ampere_host_method_is_valid(uvm_push_t *push, NvU32 method_address,
bool uvm_hal_ampere_host_sw_method_is_valid(uvm_push_t *push, NvU32 method_address, NvU32 method_data)
{
UVM_ASSERT(push->channel);
if (!uvm_channel_is_proxy(push->channel))
return true;

View File

@@ -189,7 +189,7 @@ static bool uvm_api_range_invalid(NvU64 base, NvU64 length)
}
// Some APIs can only enforce 4K alignment as it's the smallest GPU page size
// even when the smallest host page is larger (e.g. 64K on ppc64le).
// even when the smallest host page is larger.
static bool uvm_api_range_invalid_4k(NvU64 base, NvU64 length)
{
return uvm_api_range_invalid_aligned(base, length, UVM_PAGE_SIZE_4K);

View File

@@ -42,26 +42,11 @@ void uvm_ats_init(const UvmPlatformInfo *platform_info)
uvm_va_space_mm_enabled_system();
}
void uvm_ats_init_va_space(uvm_va_space_t *va_space)
{
uvm_init_rwsem(&va_space->ats.lock, UVM_LOCK_ORDER_LEAF);
if (UVM_ATS_IBM_SUPPORTED())
uvm_ats_ibm_init_va_space(va_space);
}
NV_STATUS uvm_ats_add_gpu(uvm_parent_gpu_t *parent_gpu)
{
if (UVM_ATS_IBM_SUPPORTED()) {
// uvm_ibm_add_gpu() needs to be called even if ATS is disabled since it
// sets parent_gpu->npu. Not setting parent_gpu->npu will result in
// incorrect NVLink addresses. See dma_addr_to_gpu_addr().
return uvm_ats_ibm_add_gpu(parent_gpu);
}
else if (UVM_ATS_SVA_SUPPORTED()) {
if (g_uvm_global.ats.enabled)
return uvm_ats_sva_add_gpu(parent_gpu);
if (g_uvm_global.ats.enabled) {
UVM_ASSERT(UVM_ATS_SVA_SUPPORTED());
return uvm_ats_sva_add_gpu(parent_gpu);
}
return NV_OK;
@@ -69,38 +54,25 @@ NV_STATUS uvm_ats_add_gpu(uvm_parent_gpu_t *parent_gpu)
void uvm_ats_remove_gpu(uvm_parent_gpu_t *parent_gpu)
{
if (UVM_ATS_IBM_SUPPORTED()) {
// uvm_ibm_remove_gpu() needs to be called even if ATS is disabled since
// uvm_ibm_add_gpu() is called even in that case and
// uvm_ibm_remove_gpu() needs to undo the work done by
// uvm_ats_add_gpu() (gpu retained_count etc.).
uvm_ats_ibm_remove_gpu(parent_gpu);
}
else if (UVM_ATS_SVA_SUPPORTED()) {
if (g_uvm_global.ats.enabled)
uvm_ats_sva_remove_gpu(parent_gpu);
if (g_uvm_global.ats.enabled) {
UVM_ASSERT(UVM_ATS_SVA_SUPPORTED());
uvm_ats_sva_remove_gpu(parent_gpu);
}
}
NV_STATUS uvm_ats_bind_gpu(uvm_gpu_va_space_t *gpu_va_space)
{
NV_STATUS status = NV_OK;
UVM_ASSERT(gpu_va_space);
if (!gpu_va_space->ats.enabled)
return status;
return NV_OK;
UVM_ASSERT(UVM_ATS_SVA_SUPPORTED());
uvm_assert_lockable_order(UVM_LOCK_ORDER_MMAP_LOCK);
uvm_assert_lockable_order(UVM_LOCK_ORDER_VA_SPACE);
if (UVM_ATS_IBM_SUPPORTED())
status = uvm_ats_ibm_bind_gpu(gpu_va_space);
else if (UVM_ATS_SVA_SUPPORTED())
status = uvm_ats_sva_bind_gpu(gpu_va_space);
return status;
return uvm_ats_sva_bind_gpu(gpu_va_space);
}
void uvm_ats_unbind_gpu(uvm_gpu_va_space_t *gpu_va_space)
@@ -110,10 +82,9 @@ void uvm_ats_unbind_gpu(uvm_gpu_va_space_t *gpu_va_space)
if (!gpu_va_space->ats.enabled)
return;
if (UVM_ATS_IBM_SUPPORTED())
uvm_ats_ibm_unbind_gpu(gpu_va_space);
else if (UVM_ATS_SVA_SUPPORTED())
uvm_ats_sva_unbind_gpu(gpu_va_space);
UVM_ASSERT(UVM_ATS_SVA_SUPPORTED());
uvm_ats_sva_unbind_gpu(gpu_va_space);
}
NV_STATUS uvm_ats_register_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space)
@@ -127,6 +98,8 @@ NV_STATUS uvm_ats_register_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space)
if (!gpu_va_space->ats.enabled)
return status;
UVM_ASSERT(UVM_ATS_SVA_SUPPORTED());
va_space = gpu_va_space->va_space;
UVM_ASSERT(va_space);
@@ -138,10 +111,7 @@ NV_STATUS uvm_ats_register_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space)
if (uvm_processor_mask_test(&va_space->ats.registered_gpu_va_spaces, gpu_id))
return NV_ERR_INVALID_DEVICE;
if (UVM_ATS_IBM_SUPPORTED())
status = uvm_ats_ibm_register_gpu_va_space(gpu_va_space);
else if (UVM_ATS_SVA_SUPPORTED())
status = uvm_ats_sva_register_gpu_va_space(gpu_va_space);
status = uvm_ats_sva_register_gpu_va_space(gpu_va_space);
if (status == NV_OK)
uvm_processor_mask_set(&va_space->ats.registered_gpu_va_spaces, gpu_id);
@@ -159,25 +129,14 @@ void uvm_ats_unregister_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space)
if (!gpu_va_space->ats.enabled)
return;
UVM_ASSERT(UVM_ATS_SVA_SUPPORTED());
va_space = gpu_va_space->va_space;
gpu_id = gpu_va_space->gpu->id;
if (UVM_ATS_IBM_SUPPORTED())
uvm_ats_ibm_unregister_gpu_va_space(gpu_va_space);
else if (UVM_ATS_SVA_SUPPORTED())
uvm_ats_sva_unregister_gpu_va_space(gpu_va_space);
uvm_ats_sva_unregister_gpu_va_space(gpu_va_space);
uvm_va_space_down_write(va_space);
uvm_processor_mask_clear(&va_space->ats.registered_gpu_va_spaces, gpu_id);
uvm_va_space_up_write(va_space);
}
void uvm_ats_invalidate(uvm_va_space_t *va_space, NvU64 start, NvU64 end)
{
// We can only reach here from the mmu_notifier callbacks and these callbacks
// wouldn't have been registered if ATS wasn't enabled.
UVM_ASSERT(g_uvm_global.ats.enabled);
if (UVM_ATS_IBM_SUPPORTED())
uvm_ats_ibm_invalidate(va_space, start, end);
}

View File

@@ -26,12 +26,11 @@
#include "uvm_linux.h"
#include "uvm_forward_decl.h"
#include "uvm_ats_ibm.h"
#include "nv_uvm_types.h"
#include "uvm_lock.h"
#include "uvm_ats_sva.h"
#define UVM_ATS_SUPPORTED() (UVM_ATS_IBM_SUPPORTED() || UVM_ATS_SVA_SUPPORTED())
#define UVM_ATS_SUPPORTED() UVM_ATS_SVA_SUPPORTED()
typedef struct
{
@@ -43,12 +42,7 @@ typedef struct
// being called in ats_compute_residency_mask().
uvm_rw_semaphore_t lock;
union
{
uvm_ibm_va_space_t ibm;
uvm_sva_va_space_t sva;
};
uvm_sva_va_space_t sva;
} uvm_ats_va_space_t;
typedef struct
@@ -61,12 +55,7 @@ typedef struct
NvU32 pasid;
union
{
uvm_ibm_gpu_va_space_t ibm;
uvm_sva_gpu_va_space_t sva;
};
uvm_sva_gpu_va_space_t sva;
} uvm_ats_gpu_va_space_t;
// Initializes driver-wide ATS state
@@ -74,11 +63,6 @@ typedef struct
// LOCKING: None
void uvm_ats_init(const UvmPlatformInfo *platform_info);
// Initializes ATS specific GPU state
//
// LOCKING: None
void uvm_ats_init_va_space(uvm_va_space_t *va_space);
// Enables ATS feature on the GPU.
//
// LOCKING: g_uvm_global.global lock mutex must be held.
@@ -115,8 +99,6 @@ void uvm_ats_unbind_gpu(uvm_gpu_va_space_t *gpu_va_space);
//
// LOCKING: The VA space lock must be held in write mode.
// mm has to be retained prior to calling this function.
// current->mm->mmap_lock must be held in write mode iff
// UVM_ATS_IBM_SUPPORTED_IN_KERNEL() is 1.
NV_STATUS uvm_ats_register_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space);
// Disables ATS access for the gpu_va_space. Prior to calling this function,
@@ -124,19 +106,8 @@ NV_STATUS uvm_ats_register_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space);
// accesses in this GPU VA space, and that no ATS fault handling for this
// GPU will be attempted.
//
// LOCKING: This function may block on mmap_lock and will acquire the VA space
// lock, so neither lock must be held.
// LOCKING: This function will acquire the VA space lock, so it must not be
// held.
void uvm_ats_unregister_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space);
// Synchronously invalidate ATS translations cached by GPU TLBs. The
// invalidate applies to all GPUs with active GPU VA spaces in va_space, and
// covers all pages touching any part of the given range. end is inclusive.
//
// GMMU translations in the given range are not guaranteed to be
// invalidated.
//
// LOCKING: No locks are required, but this function may be called with
// interrupts disabled.
void uvm_ats_invalidate(uvm_va_space_t *va_space, NvU64 start, NvU64 end);
#endif // __UVM_ATS_H__

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2023 NVIDIA Corporation
Copyright (c) 2024-2025 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -29,6 +29,7 @@
#include <linux/nodemask.h>
#include <linux/mempolicy.h>
#include <linux/mmu_notifier.h>
#include <linux/topology.h>
#if UVM_HMM_RANGE_FAULT_SUPPORTED()
#include <linux/hmm.h>
@@ -55,38 +56,8 @@ static NV_STATUS service_ats_requests(uvm_gpu_va_space_t *gpu_va_space,
NvU64 user_space_start;
NvU64 user_space_length;
bool write = (access_type >= UVM_FAULT_ACCESS_TYPE_WRITE);
bool fault_service_type = (service_type == UVM_ATS_SERVICE_TYPE_FAULTS);
uvm_populate_permissions_t populate_permissions = fault_service_type ?
(write ? UVM_POPULATE_PERMISSIONS_WRITE : UVM_POPULATE_PERMISSIONS_ANY) :
UVM_POPULATE_PERMISSIONS_INHERIT;
// Request uvm_migrate_pageable() to touch the corresponding page after
// population.
// Under virtualization ATS provides two translations:
// 1) guest virtual -> guest physical
// 2) guest physical -> host physical
//
// The overall ATS translation will fault if either of those translations is
// invalid. The pin_user_pages() call within uvm_migrate_pageable() call
// below handles translation #1, but not #2. We don't know if we're running
// as a guest, but in case we are we can force that translation to be valid
// by touching the guest physical address from the CPU. If the translation
// is not valid then the access will cause a hypervisor fault. Note that
// dma_map_page() can't establish mappings used by GPU ATS SVA translations.
// GPU accesses to host physical addresses obtained as a result of the
// address translation request uses the CPU address space instead of the
// IOMMU address space since the translated host physical address isn't
// necessarily an IOMMU address. The only way to establish guest physical to
// host physical mapping in the CPU address space is to touch the page from
// the CPU.
//
// We assume that the hypervisor mappings are all VM_PFNMAP, VM_SHARED, and
// VM_WRITE, meaning that the mappings are all granted write access on any
// fault and that the kernel will never revoke them.
// drivers/vfio/pci/vfio_pci_nvlink2.c enforces this. Thus we can assume
// that a read fault is always sufficient to also enable write access on the
// guest translation.
bool is_fault_service_type = (service_type == UVM_ATS_SERVICE_TYPE_FAULTS);
bool is_prefetch_faults = (is_fault_service_type && (access_type == UVM_FAULT_ACCESS_TYPE_PREFETCH));
uvm_migrate_args_t uvm_migrate_args =
{
@@ -96,15 +67,30 @@ static NV_STATUS service_ats_requests(uvm_gpu_va_space_t *gpu_va_space,
.dst_node_id = ats_context->residency_node,
.start = start,
.length = length,
.populate_permissions = populate_permissions,
.touch = fault_service_type,
.skip_mapped = fault_service_type,
.populate_on_cpu_alloc_failures = fault_service_type,
.populate_on_migrate_vma_failures = fault_service_type,
.populate_permissions = UVM_POPULATE_PERMISSIONS_INHERIT,
.populate_flags = UVM_POPULATE_PAGEABLE_FLAG_SKIP_PROT_CHECK,
.skip_mapped = is_fault_service_type,
.populate_on_cpu_alloc_failures = is_fault_service_type,
.populate_on_migrate_vma_failures = is_fault_service_type,
.user_space_start = &user_space_start,
.user_space_length = &user_space_length,
// Potential STO NVLINK errors cannot be resolved in fault or access
// counter handlers. If there are GPUs to check for STO, it's either
// a) a false positive, and the migration went through ok, or
// b) a true positive, and the destination is all zeros, and the
// application will be terminated soon.
.gpus_to_check_for_nvlink_errors = NULL,
.fail_on_unresolved_sto_errors = !is_fault_service_type || is_prefetch_faults,
};
if (is_fault_service_type) {
uvm_migrate_args.populate_permissions = (write ? UVM_POPULATE_PERMISSIONS_WRITE : UVM_POPULATE_PERMISSIONS_ANY);
// If we're faulting, let the GPU access special vmas
uvm_migrate_args.populate_flags |= UVM_POPULATE_PAGEABLE_FLAG_ALLOW_SPECIAL;
}
UVM_ASSERT(uvm_ats_can_service_faults(gpu_va_space, mm));
// We are trying to use migrate_vma API in the kernel (if it exists) to
@@ -113,7 +99,7 @@ static NV_STATUS service_ats_requests(uvm_gpu_va_space_t *gpu_va_space,
// set skip_mapped to true. For pages already mapped, this will only handle
// PTE upgrades if needed.
status = uvm_migrate_pageable(&uvm_migrate_args);
if (fault_service_type && (status == NV_WARN_NOTHING_TO_DO))
if (is_fault_service_type && (status == NV_WARN_NOTHING_TO_DO))
status = NV_OK;
UVM_ASSERT(status != NV_ERR_MORE_PROCESSING_REQUIRED);
@@ -129,9 +115,9 @@ static void flush_tlb_va_region(uvm_gpu_va_space_t *gpu_va_space,
uvm_ats_fault_invalidate_t *ats_invalidate;
if (client_type == UVM_FAULT_CLIENT_TYPE_GPC)
ats_invalidate = &gpu_va_space->gpu->parent->fault_buffer_info.replayable.ats_invalidate;
ats_invalidate = &gpu_va_space->gpu->parent->fault_buffer.replayable.ats_invalidate;
else
ats_invalidate = &gpu_va_space->gpu->parent->fault_buffer_info.non_replayable.ats_invalidate;
ats_invalidate = &gpu_va_space->gpu->parent->fault_buffer.non_replayable.ats_invalidate;
if (!ats_invalidate->tlb_batch_pending) {
uvm_tlb_batch_begin(&gpu_va_space->page_tables, &ats_invalidate->tlb_batch);
@@ -146,7 +132,10 @@ static void ats_batch_select_residency(uvm_gpu_va_space_t *gpu_va_space,
uvm_ats_fault_context_t *ats_context)
{
uvm_gpu_t *gpu = gpu_va_space->gpu;
int residency = uvm_gpu_numa_node(gpu);
int residency;
UVM_ASSERT(gpu->mem_info.numa.enabled);
residency = uvm_gpu_numa_node(gpu);
#if defined(NV_MEMPOLICY_HAS_UNIFIED_NODES)
struct mempolicy *vma_policy = vma_policy(vma);
@@ -279,6 +268,27 @@ static const struct mmu_interval_notifier_ops uvm_ats_notifier_ops =
#endif
static bool resident_policy_match(struct vm_area_struct *vma, int dst_nid, int src_nid)
{
#if defined(NV_MEMPOLICY_HAS_UNIFIED_NODES)
struct mempolicy *vma_policy = vma_policy(vma);
// TODO: Bug 4981209: When migrations between CPU numa nodes are supported,
// add (dst_nid != closest_cpu_numa_node) to allow migrations between CPU
// NUMA nodes when destination is the closest_cpu_numa_node.
if (vma_policy &&
node_isset(src_nid, vma_policy->nodes) &&
node_isset(dst_nid, vma_policy->nodes) &&
!cpumask_empty(cpumask_of_node(src_nid)) &&
!cpumask_empty(cpumask_of_node(dst_nid))) {
return true;
}
#endif
return false;
}
static NV_STATUS ats_compute_residency_mask(uvm_gpu_va_space_t *gpu_va_space,
struct vm_area_struct *vma,
NvU64 base,
@@ -358,9 +368,23 @@ static NV_STATUS ats_compute_residency_mask(uvm_gpu_va_space_t *gpu_va_space,
if (pfn & HMM_PFN_VALID) {
struct page *page = hmm_pfn_to_page(pfn);
int resident_node = page_to_nid(page);
if (page_to_nid(page) == ats_context->residency_node)
// Set the residency_mask if:
// - The page is already resident at the intended destination.
// or
// - If both the source and destination nodes are CPU nodes and
// source node is already in the list of preferred nodes for
// the vma. On multi-CPU NUMA node architectures, this avoids
// unnecessary migrations between CPU nodes. Since the
// specific ats_context->residency_node selected by
// ats_batch_select_residency() is just a guess among the list
// of preferred nodes, paying the cost of migration across the
// CPU preferred nodes in this case can't be justified.
if ((resident_node == ats_context->residency_node) ||
resident_policy_match(vma, ats_context->residency_node, resident_node)) {
uvm_page_mask_set(residency_mask, page_index);
}
ats_context->prefetch_state.first_touch = false;
}
@@ -463,6 +487,77 @@ static NV_STATUS ats_compute_prefetch(uvm_gpu_va_space_t *gpu_va_space,
return status;
}
static NV_STATUS uvm_ats_service_faults_region(uvm_gpu_va_space_t *gpu_va_space,
struct vm_area_struct *vma,
NvU64 base,
uvm_va_block_region_t region,
uvm_fault_access_type_t access_type,
uvm_ats_fault_context_t *ats_context,
uvm_page_mask_t *faults_serviced_mask)
{
NvU64 start = base + (region.first * PAGE_SIZE);
size_t length = uvm_va_block_region_size(region);
NV_STATUS status;
UVM_ASSERT(start >= vma->vm_start);
UVM_ASSERT((start + length) <= vma->vm_end);
status = service_ats_requests(gpu_va_space,
vma,
start,
length,
access_type,
UVM_ATS_SERVICE_TYPE_FAULTS,
ats_context);
if (status != NV_OK) {
// This condition can occur if we unexpectedly fault on a vma that
// doesn't support faulting (or at least doesn't support
// pin_user_pages). This may be an incorrect mapping setup from the
// vma's owning driver, a hardware bug, or just that the owning driver
// didn't expect a device fault. Either way, we don't want to consider
// this a global error so don't propagate it, but also don't indicate
// that the faults were serviced. That way the caller knows to cancel
// them precisely.
if (status == NV_ERR_INVALID_ADDRESS)
return NV_OK;
return status;
}
uvm_page_mask_region_fill(faults_serviced_mask, region);
// WAR for older kernel versions missing an SMMU invalidate on RO -> RW
// transition. The SMMU and GPU could have the stale RO copy cached in their
// TLBs, which could have caused this write fault. This operation
// invalidates the SMMU TLBs but not the GPU TLBs. That will happen below as
// necessary.
if (access_type == UVM_FAULT_ACCESS_TYPE_WRITE)
uvm_ats_smmu_invalidate_tlbs(gpu_va_space, start, length);
// The Linux kernel does not invalidate TLB entries on an invalid to valid
// PTE transition. The GPU might have the invalid PTE cached in its TLB.
// The GPU will re-fetch an entry on access if the PTE is invalid and the
// page size is not 4K, but if the page size is 4K no re-fetch will happen
// and the GPU will fault despite the CPU PTE being valid. We don't know
// whether these faults happened due to stale entries after a transition,
// so use the hammer of always invalidating the GPU's TLB on each fault.
//
// The second case is similar and handles missing ATS invalidations on RO ->
// RW transitions for all page sizes. See the uvm_ats_smmu_invalidate_tlbs()
// call above.
if (PAGE_SIZE == UVM_PAGE_SIZE_4K || (UVM_ATS_SMMU_WAR_REQUIRED() && access_type == UVM_FAULT_ACCESS_TYPE_WRITE)) {
flush_tlb_va_region(gpu_va_space, start, length, ats_context->client_type);
}
else {
// ARM requires TLB invalidations on RO -> RW, but not all architectures
// do. If we implement ATS support on other architectures, we might need
// to issue GPU invalidates.
UVM_ASSERT(NVCPU_IS_AARCH64);
}
return NV_OK;
}
NV_STATUS uvm_ats_service_faults(uvm_gpu_va_space_t *gpu_va_space,
struct vm_area_struct *vma,
NvU64 base,
@@ -471,12 +566,11 @@ NV_STATUS uvm_ats_service_faults(uvm_gpu_va_space_t *gpu_va_space,
NV_STATUS status = NV_OK;
uvm_va_block_region_t subregion;
uvm_va_block_region_t region = uvm_va_block_region(0, PAGES_PER_UVM_VA_BLOCK);
uvm_page_mask_t *prefetch_only_fault_mask = &ats_context->faults.prefetch_only_fault_mask;
uvm_page_mask_t *read_fault_mask = &ats_context->faults.read_fault_mask;
uvm_page_mask_t *write_fault_mask = &ats_context->faults.write_fault_mask;
uvm_page_mask_t *faults_serviced_mask = &ats_context->faults.faults_serviced_mask;
uvm_page_mask_t *reads_serviced_mask = &ats_context->faults.reads_serviced_mask;
uvm_fault_client_type_t client_type = ats_context->client_type;
uvm_ats_service_type_t service_type = UVM_ATS_SERVICE_TYPE_FAULTS;
UVM_ASSERT(vma);
UVM_ASSERT(IS_ALIGNED(base, UVM_VA_BLOCK_SIZE));
@@ -492,7 +586,7 @@ NV_STATUS uvm_ats_service_faults(uvm_gpu_va_space_t *gpu_va_space,
uvm_page_mask_zero(reads_serviced_mask);
if (!(vma->vm_flags & VM_READ))
return status;
return NV_OK;
if (!(vma->vm_flags & VM_WRITE)) {
// If VMA doesn't have write permissions, all write faults are fatal.
@@ -508,72 +602,65 @@ NV_STATUS uvm_ats_service_faults(uvm_gpu_va_space_t *gpu_va_space,
// There are no pending faults beyond write faults to RO region.
if (uvm_page_mask_empty(read_fault_mask))
return status;
return NV_OK;
}
ats_batch_select_residency(gpu_va_space, vma, ats_context);
ats_compute_prefetch(gpu_va_space, vma, base, service_type, ats_context);
ats_compute_prefetch(gpu_va_space, vma, base, UVM_ATS_SERVICE_TYPE_FAULTS, ats_context);
for_each_va_block_subregion_in_mask(subregion, write_fault_mask, region) {
NvU64 start = base + (subregion.first * PAGE_SIZE);
size_t length = uvm_va_block_region_num_pages(subregion) * PAGE_SIZE;
uvm_fault_access_type_t access_type = (vma->vm_flags & VM_WRITE) ?
UVM_FAULT_ACCESS_TYPE_WRITE :
UVM_FAULT_ACCESS_TYPE_READ;
UVM_ASSERT(start >= vma->vm_start);
UVM_ASSERT((start + length) <= vma->vm_end);
status = service_ats_requests(gpu_va_space, vma, start, length, access_type, service_type, ats_context);
if (status != NV_OK)
return status;
uvm_fault_access_type_t access_type;
uvm_page_mask_t *serviced_mask;
if (vma->vm_flags & VM_WRITE) {
uvm_page_mask_region_fill(faults_serviced_mask, subregion);
uvm_ats_smmu_invalidate_tlbs(gpu_va_space, start, length);
// The Linux kernel never invalidates TLB entries on mapping
// permission upgrade. This is a problem if the GPU has cached
// entries with the old permission. The GPU will re-fetch the entry
// if the PTE is invalid and page size is not 4K (this is the case
// on P9). However, if a page gets upgraded from R/O to R/W and GPU
// has the PTEs cached with R/O permissions we will enter an
// infinite loop because we just forward the fault to the Linux
// kernel and it will see that the permissions in the page table are
// correct. Therefore, we flush TLB entries on ATS write faults.
flush_tlb_va_region(gpu_va_space, start, length, client_type);
access_type = UVM_FAULT_ACCESS_TYPE_WRITE;
serviced_mask = faults_serviced_mask;
}
else {
uvm_page_mask_region_fill(reads_serviced_mask, subregion);
// write_fault_mask contains just the addresses with both read and
// fatal write faults, so we need to service the read component.
access_type = UVM_FAULT_ACCESS_TYPE_READ;
serviced_mask = reads_serviced_mask;
}
status = uvm_ats_service_faults_region(gpu_va_space,
vma,
base,
subregion,
access_type,
ats_context,
serviced_mask);
if (status != NV_OK)
return status;
}
// Remove write faults from read_fault_mask
// Remove write faults from read_fault_mask to avoid double-service
uvm_page_mask_andnot(read_fault_mask, read_fault_mask, write_fault_mask);
for_each_va_block_subregion_in_mask(subregion, read_fault_mask, region) {
NvU64 start = base + (subregion.first * PAGE_SIZE);
size_t length = uvm_va_block_region_num_pages(subregion) * PAGE_SIZE;
uvm_fault_access_type_t access_type = UVM_FAULT_ACCESS_TYPE_READ;
UVM_ASSERT(start >= vma->vm_start);
UVM_ASSERT((start + length) <= vma->vm_end);
status = service_ats_requests(gpu_va_space, vma, start, length, access_type, service_type, ats_context);
status = uvm_ats_service_faults_region(gpu_va_space,
vma,
base,
subregion,
UVM_FAULT_ACCESS_TYPE_READ,
ats_context,
faults_serviced_mask);
if (status != NV_OK)
return status;
}
uvm_page_mask_region_fill(faults_serviced_mask, subregion);
// Similarly to permission upgrade scenario, discussed above, GPU
// will not re-fetch the entry if the PTE is invalid and page size
// is 4K. To avoid infinite faulting loop, invalidate TLB for every
// new translation written explicitly like in the case of permission
// upgrade.
if (PAGE_SIZE == UVM_PAGE_SIZE_4K)
flush_tlb_va_region(gpu_va_space, start, length, client_type);
// Handle HW prefetch only faults
for_each_va_block_subregion_in_mask(subregion, prefetch_only_fault_mask, region) {
status = uvm_ats_service_faults_region(gpu_va_space,
vma,
base,
subregion,
UVM_FAULT_ACCESS_TYPE_PREFETCH,
ats_context,
faults_serviced_mask);
if (status != NV_OK)
return status;
}
return status;
@@ -590,12 +677,14 @@ bool uvm_ats_check_in_gmmu_region(uvm_va_space_t *va_space, NvU64 address, uvm_v
if (next->node.start <= gmmu_region_base + UVM_GMMU_ATS_GRANULARITY - 1)
return true;
prev = uvm_va_range_container(uvm_range_tree_prev(&va_space->va_range_tree, &next->node));
prev = uvm_va_range_gmmu_mappable_prev(next);
}
else {
// No VA range exists after address, so check the last VA range in the
// tree.
prev = uvm_va_range_container(uvm_range_tree_last(&va_space->va_range_tree));
while (prev && !uvm_va_range_is_gmmu_mappable(prev))
prev = uvm_va_range_gmmu_mappable_prev(prev);
}
return prev && (prev->node.end >= gmmu_region_base);
@@ -668,6 +757,20 @@ NV_STATUS uvm_ats_service_access_counters(uvm_gpu_va_space_t *gpu_va_space,
&ats_context->access_counters.accessed_mask,
&ats_context->prefetch_state.residency_mask);
// Pretend that pages that are already resident at the destination GPU were
// migrated now. This makes sure that the access counter is cleared even if
// the accessed pages, were already resident on the target.
// TODO: Bug 5296998: [uvm][ats] Not clearing stale access counter
// notifications can lead to missed migrations
// The same problem of stale notification exists for migration to other
// locations than local vidmem. However, stale notifications to data
// migrated to another remote location are identical to those triggered
// by accessing memory that cannot or should not be migrated.
if (uvm_id_equal(ats_context->residency_id, gpu_va_space->gpu->id)) {
uvm_page_mask_copy(&ats_context->access_counters.migrated_mask,
&ats_context->prefetch_state.residency_mask);
}
for_each_va_block_subregion_in_mask(subregion, &ats_context->access_counters.accessed_mask, region) {
NV_STATUS status;
NvU64 start = base + (subregion.first * PAGE_SIZE);
@@ -679,7 +782,10 @@ NV_STATUS uvm_ats_service_access_counters(uvm_gpu_va_space_t *gpu_va_space,
UVM_ASSERT((start + length) <= vma->vm_end);
status = service_ats_requests(gpu_va_space, vma, start, length, access_type, service_type, ats_context);
if (status == NV_OK)
// Clear access counters if pages were migrated or migration needs to
// be retried
if (status == NV_OK || status == NV_ERR_BUSY_RETRY)
uvm_page_mask_region_fill(migrated_mask, subregion);
else if (status != NV_WARN_NOTHING_TO_DO)
return status;

View File

@@ -29,12 +29,13 @@
// Service ATS faults in the range (base, base + UVM_VA_BLOCK_SIZE) with service
// type for individual pages in the range requested by page masks set in
// ats_context->fault.read_fault_mask/write_fault_mask. base must be aligned to
// UVM_VA_BLOCK_SIZE. The caller is responsible for ensuring that faulting
// addresses fall completely within the VMA. The caller is also responsible for
// ensuring that the faulting addresses don't overlap a GMMU region. (See
// uvm_ats_check_in_gmmu_region). The caller is also responsible for handling
// any errors returned by this function (fault cancellations etc.).
// ats_context->fault.read_fault_mask/write_fault_mask/prefetch_only_mask.
// base must be aligned to UVM_VA_BLOCK_SIZE. The caller is responsible for
// ensuring that faulting addresses fall completely within the VMA. The caller
// is also responsible for ensuring that the faulting addresses don't overlap
// a GMMU region. (See uvm_ats_check_in_gmmu_region). The caller is also
// responsible for handling any errors returned by this function (fault
// cancellations etc.).
//
// Returns the fault service status in ats_context->fault.faults_serviced_mask.
// In addition, ats_context->fault.reads_serviced_mask returns whether read

View File

@@ -1,715 +0,0 @@
/*******************************************************************************
Copyright (c) 2018-2019 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
#include "uvm_api.h"
#include "uvm_lock.h"
#include "uvm_kvmalloc.h"
#include "uvm_global.h"
#include "uvm_va_space.h"
#include "uvm_va_space_mm.h"
#include "uvm_ats_ibm.h"
#include "uvm_common.h"
#include <linux/pci.h>
#if UVM_IBM_NPU_SUPPORTED()
#include <linux/of.h>
#include <linux/sizes.h>
#include <asm/pci-bridge.h>
#include <asm/io.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#define NPU_ATSD_REG_MAP_SIZE 32
// There are three 8-byte registers in each ATSD mapping:
#define NPU_ATSD_REG_LAUNCH 0
#define NPU_ATSD_REG_AVA 1
#define NPU_ATSD_REG_STAT 2
// Fields within the NPU_ATSD_REG_LAUNCH register:
// "PRS" (process-scoped) bit. 1 means to limit invalidates to the specified
// PASID.
#define NPU_ATSD_REG_LAUNCH_PASID_ENABLE 13
// "PID" field. This specifies the PASID target of the invalidate.
#define NPU_ATSD_REG_LAUNCH_PASID_VAL 38
// "IS" bit. 0 means the specified virtual address range will be invalidated. 1
// means all entries will be invalidated.
#define NPU_ATSD_REG_LAUNCH_INVAL_ALL 12
// "AP" field. This encodes the size of a range-based invalidate.
#define NPU_ATSD_REG_LAUNCH_INVAL_SIZE 17
// "No flush" bit. 0 will trigger a flush (membar) from the GPU following the
// invalidate, 1 will not.
#define NPU_ATSD_REG_LAUNCH_FLUSH_DISABLE 39
// Helper to iterate over the active NPUs in the given VA space (all NPUs with
// GPUs that have GPU VA spaces registered in this VA space).
#define for_each_npu_index_in_va_space(npu_index, va_space) \
for (({uvm_assert_rwlock_locked(&(va_space)->ats.ibm.rwlock); \
(npu_index) = find_first_bit((va_space)->ats.ibm.npu_active_mask, NV_MAX_NPUS);}); \
(npu_index) < NV_MAX_NPUS; \
(npu_index) = find_next_bit((va_space)->ats.ibm.npu_active_mask, NV_MAX_NPUS, (npu_index) + 1))
// An invalidate requires operating on one set of registers in each NPU. This
// struct tracks which register set (id) is in use per NPU for a given
// operation.
typedef struct
{
NvU8 ids[NV_MAX_NPUS];
} uvm_atsd_regs_t;
// Get the index of the input npu pointer within UVM's global npus array
static size_t uvm_ibm_npu_index(uvm_ibm_npu_t *npu)
{
size_t npu_index = npu - &g_uvm_global.npus[0];
UVM_ASSERT(npu_index < ARRAY_SIZE(g_uvm_global.npus));
return npu_index;
}
// Find an existing NPU matching pci_domain, or return an empty NPU slot if none
// is found. Returns NULL if no slots are available.
static uvm_ibm_npu_t *uvm_ibm_npu_find(int pci_domain)
{
size_t i;
uvm_ibm_npu_t *npu, *first_free = NULL;
uvm_assert_mutex_locked(&g_uvm_global.global_lock);
for (i = 0; i < ARRAY_SIZE(g_uvm_global.npus); i++) {
npu = &g_uvm_global.npus[i];
if (npu->num_retained_gpus == 0) {
if (!first_free)
first_free = npu;
}
else if (npu->pci_domain == pci_domain) {
return npu;
}
}
return first_free;
}
static void uvm_ibm_npu_destroy(uvm_ibm_npu_t *npu)
{
size_t i;
uvm_assert_mutex_locked(&g_uvm_global.global_lock);
UVM_ASSERT(npu->num_retained_gpus == 0);
UVM_ASSERT(bitmap_empty(npu->atsd_regs.locks, UVM_MAX_ATSD_REGS));
for (i = 0; i < npu->atsd_regs.count; i++) {
UVM_ASSERT(npu->atsd_regs.io_addrs[i]);
iounmap(npu->atsd_regs.io_addrs[i]);
}
memset(npu, 0, sizeof(*npu));
}
static NV_STATUS uvm_ibm_npu_init(uvm_ibm_npu_t *npu, struct pci_dev *npu_dev)
{
struct pci_controller *hose;
size_t i, reg_count, reg_size = sizeof(npu->atsd_regs.io_addrs[0]);
int ret;
uvm_assert_mutex_locked(&g_uvm_global.global_lock);
UVM_ASSERT(npu->num_retained_gpus == 0);
UVM_ASSERT(bitmap_empty(npu->atsd_regs.locks, UVM_MAX_ATSD_REGS));
npu->pci_domain = pci_domain_nr(npu_dev->bus);
if (!UVM_ATS_IBM_SUPPORTED_IN_DRIVER())
return NV_OK;
hose = pci_bus_to_host(npu_dev->bus);
ret = of_property_count_elems_of_size(hose->dn, "ibm,mmio-atsd", reg_size);
if (ret < 0) {
UVM_ERR_PRINT("Failed to query NPU %d ATSD register count: %d\n", npu->pci_domain, ret);
return errno_to_nv_status(ret);
}
// For ATS to be enabled globally, we must have NPU ATSD registers
reg_count = ret;
if (reg_count == 0 || reg_count > UVM_MAX_ATSD_REGS) {
UVM_ERR_PRINT("NPU %d has invalid ATSD register count: %zu\n", npu->pci_domain, reg_count);
return NV_ERR_INVALID_STATE;
}
// Map the ATSD registers
for (i = 0; i < reg_count; i++) {
u64 phys_addr;
__be64 __iomem *io_addr;
ret = of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", i, &phys_addr);
UVM_ASSERT(ret == 0);
io_addr = ioremap(phys_addr, NPU_ATSD_REG_MAP_SIZE);
if (!io_addr) {
uvm_ibm_npu_destroy(npu);
return NV_ERR_NO_MEMORY;
}
npu->atsd_regs.io_addrs[npu->atsd_regs.count++] = io_addr;
}
return NV_OK;
}
NV_STATUS uvm_ats_ibm_add_gpu(uvm_parent_gpu_t *parent_gpu)
{
struct pci_dev *npu_dev = pnv_pci_get_npu_dev(parent_gpu->pci_dev, 0);
uvm_ibm_npu_t *npu;
NV_STATUS status;
uvm_assert_mutex_locked(&g_uvm_global.global_lock);
if (!npu_dev)
return NV_OK;
npu = uvm_ibm_npu_find(pci_domain_nr(npu_dev->bus));
if (!npu) {
// If this happens then we can't support the system configuation until
// NV_MAX_NPUS is updated. Return the same error as when the number of
// GPUs exceeds UVM_MAX_GPUS.
UVM_ERR_PRINT("No more NPU slots available, update NV_MAX_NPUS\n");
return NV_ERR_INSUFFICIENT_RESOURCES;
}
if (npu->num_retained_gpus == 0) {
status = uvm_ibm_npu_init(npu, npu_dev);
if (status != NV_OK)
return status;
}
// This npu field could be read concurrently by a thread in the ATSD
// invalidate path. We don't need to provide ordering with those threads
// because those invalidates won't apply to the GPU being added until a GPU
// VA space on this GPU is registered.
npu->atsd_regs.num_membars = max(npu->atsd_regs.num_membars, parent_gpu->num_hshub_tlb_invalidate_membars);
parent_gpu->npu = npu;
++npu->num_retained_gpus;
return NV_OK;
}
void uvm_ats_ibm_remove_gpu(uvm_parent_gpu_t *parent_gpu)
{
uvm_ibm_npu_t *npu = parent_gpu->npu;
uvm_parent_gpu_t *other_parent_gpu;
NvU32 num_membars_new = 0;
uvm_assert_mutex_locked(&g_uvm_global.global_lock);
if (!npu)
return;
UVM_ASSERT(npu->num_retained_gpus > 0);
if (--npu->num_retained_gpus == 0) {
uvm_ibm_npu_destroy(npu);
}
else {
// Re-calculate the membar count
for_each_parent_gpu(other_parent_gpu) {
// The current GPU being removed should've already been removed from
// the global list.
UVM_ASSERT(other_parent_gpu != parent_gpu);
if (other_parent_gpu->npu == npu)
num_membars_new = max(num_membars_new, other_parent_gpu->num_hshub_tlb_invalidate_membars);
}
UVM_ASSERT(num_membars_new > 0);
npu->atsd_regs.num_membars = num_membars_new;
}
}
#if UVM_ATS_IBM_SUPPORTED()
void uvm_ats_ibm_init_va_space(uvm_va_space_t *va_space)
{
uvm_ibm_va_space_t *ibm_va_space;
UVM_ASSERT(va_space);
ibm_va_space = &va_space->ats.ibm;
uvm_rwlock_irqsave_init(&ibm_va_space->rwlock, UVM_LOCK_ORDER_LEAF);
}
#if UVM_ATS_IBM_SUPPORTED_IN_KERNEL()
static void npu_release_dummy(struct npu_context *npu_context, void *va_mm)
{
// See the comment on the call to pnv_npu2_init_context()
}
static NV_STATUS uvm_ats_ibm_register_gpu_va_space_kernel(uvm_gpu_va_space_t *gpu_va_space)
{
uvm_va_space_t *va_space = gpu_va_space->va_space;
uvm_ibm_gpu_va_space_t *ibm_gpu_va_space = &gpu_va_space->ats.ibm;
struct npu_context *npu_context;
// pnv_npu2_init_context() registers current->mm with
// mmu_notifier_register(). We need that to match the mm we passed to our
// own mmu_notifier_register() for this VA space.
if (current->mm != va_space->va_space_mm.mm)
return NV_ERR_NOT_SUPPORTED;
uvm_assert_mmap_lock_locked_write(current->mm);
uvm_assert_rwsem_locked_write(&va_space->lock);
// pnv_npu2_init_context() doesn't handle being called multiple times for
// the same GPU under the same mm, which could happen if multiple VA spaces
// are created in this process. To handle that we pass the VA space pointer
// as the callback parameter: the callback values are shared by all devices
// under this mm, so pnv_npu2_init_context() enforces that the values match
// the ones already registered to the mm.
//
// Otherwise we don't use the callback, since we have our own callback
// registered under the va_space_mm that will be called at the same point
// (mmu_notifier release).
npu_context = pnv_npu2_init_context(gpu_va_space->gpu->parent->pci_dev,
(MSR_DR | MSR_PR | MSR_HV),
npu_release_dummy,
va_space);
if (IS_ERR(npu_context)) {
int err = PTR_ERR(npu_context);
// We'll get -EINVAL if the callback value (va_space) differs from the
// one already registered to the npu_context associated with this mm.
// That can only happen when multiple VA spaces attempt registration
// within the same process, which is disallowed and should return
// NV_ERR_NOT_SUPPORTED.
if (err == -EINVAL)
return NV_ERR_NOT_SUPPORTED;
return errno_to_nv_status(err);
}
ibm_gpu_va_space->npu_context = npu_context;
return NV_OK;
}
static void uvm_ats_ibm_unregister_gpu_va_space_kernel(uvm_gpu_va_space_t *gpu_va_space)
{
uvm_gpu_va_space_state_t state;
uvm_va_space_t *va_space = gpu_va_space->va_space;
uvm_ibm_va_space_t *ibm_va_space;
uvm_ibm_gpu_va_space_t *ibm_gpu_va_space = &gpu_va_space->ats.ibm;
if (!ibm_gpu_va_space->npu_context)
return;
// va_space is guaranteed to not be NULL if ibm_gpu_va_space->npu_context is
// not NULL.
UVM_ASSERT(va_space);
state = uvm_gpu_va_space_state(gpu_va_space);
UVM_ASSERT(state == UVM_GPU_VA_SPACE_STATE_INIT || state == UVM_GPU_VA_SPACE_STATE_DEAD);
ibm_va_space = &va_space->ats.ibm;
// pnv_npu2_destroy_context() may in turn call mmu_notifier_unregister().
// If uvm_va_space_mm_shutdown() is concurrently executing in another
// thread, mmu_notifier_unregister() will wait for
// uvm_va_space_mm_shutdown() to finish. uvm_va_space_mm_shutdown() takes
// mmap_lock and the VA space lock, so we can't be holding those locks on
// this path.
uvm_assert_unlocked_order(UVM_LOCK_ORDER_MMAP_LOCK);
uvm_assert_unlocked_order(UVM_LOCK_ORDER_VA_SPACE);
pnv_npu2_destroy_context(ibm_gpu_va_space->npu_context, gpu_va_space->gpu->parent->pci_dev);
ibm_gpu_va_space->npu_context = NULL;
}
#else
static void uvm_ats_ibm_register_gpu_va_space_driver(uvm_gpu_va_space_t *gpu_va_space)
{
uvm_va_space_t *va_space = gpu_va_space->va_space;
uvm_ibm_gpu_va_space_t *ibm_gpu_va_space = &gpu_va_space->ats.ibm;
uvm_gpu_t *gpu = gpu_va_space->gpu;
size_t npu_index = uvm_ibm_npu_index(gpu->parent->npu);
uvm_ibm_va_space_t *ibm_va_space;
UVM_ASSERT(va_space);
ibm_va_space = &va_space->ats.ibm;
uvm_assert_rwsem_locked_write(&va_space->lock);
uvm_write_lock_irqsave(&ibm_va_space->rwlock);
// If this is the first GPU VA space to use this NPU in the VA space, mark
// the NPU as active so invalidates are issued to it.
if (ibm_va_space->npu_ref_counts[npu_index] == 0) {
// If this is the first active NPU in the entire VA space, we have to
// tell the kernel to send TLB invalidations to the IOMMU. See kernel
// commit 03b8abedf4f4965e7e9e0d4f92877c42c07ce19f for background.
//
// This is safe to do without holding mm_users high or mmap_lock.
if (bitmap_empty(ibm_va_space->npu_active_mask, NV_MAX_NPUS))
mm_context_add_copro(va_space->va_space_mm.mm);
UVM_ASSERT(!test_bit(npu_index, ibm_va_space->npu_active_mask));
__set_bit(npu_index, ibm_va_space->npu_active_mask);
}
else {
UVM_ASSERT(test_bit(npu_index, ibm_va_space->npu_active_mask));
}
++ibm_va_space->npu_ref_counts[npu_index];
// As soon as this lock is dropped, invalidates on this VA space's mm may
// begin issuing ATSDs to this NPU.
uvm_write_unlock_irqrestore(&ibm_va_space->rwlock);
ibm_gpu_va_space->did_ibm_driver_init = true;
}
static void uvm_ats_ibm_unregister_gpu_va_space_driver(uvm_gpu_va_space_t *gpu_va_space)
{
uvm_va_space_t *va_space = gpu_va_space->va_space;
uvm_gpu_t *gpu = gpu_va_space->gpu;
size_t npu_index = uvm_ibm_npu_index(gpu->parent->npu);
bool do_remove = false;
uvm_ibm_va_space_t *ibm_va_space;
uvm_ibm_gpu_va_space_t *ibm_gpu_va_space = &gpu_va_space->ats.ibm;
if (!ibm_gpu_va_space->did_ibm_driver_init)
return;
UVM_ASSERT(va_space);
ibm_va_space = &va_space->ats.ibm;
// Note that we aren't holding the VA space lock here, so another thread
// could be in uvm_ats_ibm_register_gpu_va_space() for this same GPU right
// now. The write lock and ref counts below will handle that case.
// Once we return from this function with a bit cleared in the
// npu_active_mask, we have to guarantee that this VA space no longer
// accesses that NPU's ATSD registers. This is needed in case GPU unregister
// needs to unmap those registers. We use the reader/writer lock to
// guarantee this, which means that invalidations must not access the ATSD
// registers outside of the lock.
//
// Future work: if we could synchronize_srcu() on the mmu_notifier SRCU we
// might do that here instead to flush out all invalidates. That would allow
// us to avoid taking a read lock in the invalidate path, though we'd have
// to be careful when clearing the mask bit relative to the synchronize, and
// we'd have to be careful in cases where this thread doesn't hold a
// reference to mm_users.
uvm_write_lock_irqsave(&ibm_va_space->rwlock);
UVM_ASSERT(ibm_va_space->npu_ref_counts[npu_index] > 0);
UVM_ASSERT(test_bit(npu_index, ibm_va_space->npu_active_mask));
--ibm_va_space->npu_ref_counts[npu_index];
if (ibm_va_space->npu_ref_counts[npu_index] == 0) {
__clear_bit(npu_index, ibm_va_space->npu_active_mask);
if (bitmap_empty(ibm_va_space->npu_active_mask, NV_MAX_NPUS))
do_remove = true;
}
uvm_write_unlock_irqrestore(&ibm_va_space->rwlock);
if (do_remove) {
// mm_context_remove_copro() must be called outside of the spinlock
// because it may issue invalidates across CPUs in this mm. The
// coprocessor count is atomically refcounted by that function, so it's
// safe to call here even if another thread jumps in with a register and
// calls mm_context_add_copro() between this thread's unlock and this
// call.
UVM_ASSERT(va_space->va_space_mm.mm);
mm_context_remove_copro(va_space->va_space_mm.mm);
}
}
#endif // UVM_ATS_IBM_SUPPORTED_IN_KERNEL()
static mm_context_id_t va_space_pasid(uvm_va_space_t *va_space)
{
struct mm_struct *mm = va_space->va_space_mm.mm;
UVM_ASSERT(mm);
return mm->context.id;
}
NV_STATUS uvm_ats_ibm_register_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space)
{
uvm_va_space_t *va_space = gpu_va_space->va_space;
NV_STATUS status = NV_OK;
UVM_ASSERT(gpu_va_space->ats.enabled);
UVM_ASSERT(uvm_gpu_va_space_state(gpu_va_space) == UVM_GPU_VA_SPACE_STATE_INIT);
UVM_ASSERT(va_space->va_space_mm.mm);
uvm_assert_rwsem_locked_write(&va_space->lock);
#if UVM_ATS_IBM_SUPPORTED_IN_KERNEL()
status = uvm_ats_ibm_register_gpu_va_space_kernel(gpu_va_space);
#else
uvm_ats_ibm_register_gpu_va_space_driver(gpu_va_space);
#endif
gpu_va_space->ats.pasid = (NvU32) va_space_pasid(gpu_va_space->va_space);
return status;
}
void uvm_ats_ibm_unregister_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space)
{
#if UVM_ATS_IBM_SUPPORTED_IN_KERNEL()
uvm_ats_ibm_unregister_gpu_va_space_kernel(gpu_va_space);
#else
uvm_ats_ibm_unregister_gpu_va_space_driver(gpu_va_space);
#endif
gpu_va_space->ats.pasid = -1U;
}
#if UVM_ATS_IBM_SUPPORTED_IN_DRIVER()
// Find any available ATSD register set in this NPU and return that index. This
// will busy wait until a register set is free.
static NvU8 atsd_reg_acquire(uvm_ibm_npu_t *npu)
{
uvm_spin_loop_t spin;
size_t i;
bool first = true;
while (1) {
// Using for_each_clear_bit is racy, since the bits could change at any
// point. That's ok since we'll either just retry or use a real atomic
// to lock the bit. Checking for clear bits first avoids spamming
// atomics in the contended case.
for_each_clear_bit(i, npu->atsd_regs.locks, npu->atsd_regs.count) {
if (!test_and_set_bit_lock(i, npu->atsd_regs.locks))
return (NvU8)i;
}
// Back off and try again, avoiding the overhead of initializing the
// tracking timers unless we need them.
if (first) {
uvm_spin_loop_init(&spin);
first = false;
}
else {
UVM_SPIN_LOOP(&spin);
}
}
}
static void atsd_reg_release(uvm_ibm_npu_t *npu, NvU8 reg)
{
UVM_ASSERT(reg < npu->atsd_regs.count);
UVM_ASSERT(test_bit(reg, npu->atsd_regs.locks));
clear_bit_unlock(reg, npu->atsd_regs.locks);
}
static __be64 atsd_reg_read(uvm_ibm_npu_t *npu, NvU8 reg, size_t offset)
{
__be64 __iomem *io_addr = npu->atsd_regs.io_addrs[reg] + offset;
UVM_ASSERT(reg < npu->atsd_regs.count);
return __raw_readq(io_addr);
}
static void atsd_reg_write(uvm_ibm_npu_t *npu, NvU8 reg, size_t offset, NvU64 val)
{
__be64 __iomem *io_addr = npu->atsd_regs.io_addrs[reg] + offset;
UVM_ASSERT(reg < npu->atsd_regs.count);
__raw_writeq_be(val, io_addr);
}
// Acquire a set of registers in each NPU which is active in va_space
static void atsd_regs_acquire(uvm_va_space_t *va_space, uvm_atsd_regs_t *regs)
{
size_t i;
for_each_npu_index_in_va_space(i, va_space)
regs->ids[i] = atsd_reg_acquire(&g_uvm_global.npus[i]);
}
static void atsd_regs_release(uvm_va_space_t *va_space, uvm_atsd_regs_t *regs)
{
size_t i;
for_each_npu_index_in_va_space(i, va_space)
atsd_reg_release(&g_uvm_global.npus[i], regs->ids[i]);
}
// Write the provided value to each NPU active in va_space at the provided
// register offset.
static void atsd_regs_write(uvm_va_space_t *va_space, uvm_atsd_regs_t *regs, size_t offset, NvU64 val)
{
size_t i;
for_each_npu_index_in_va_space(i, va_space)
atsd_reg_write(&g_uvm_global.npus[i], regs->ids[i], offset, val);
}
// Wait for all prior operations issued to active NPUs in va_space on the given
// registers to finish.
static void atsd_regs_wait(uvm_va_space_t *va_space, uvm_atsd_regs_t *regs)
{
uvm_spin_loop_t spin;
size_t i;
for_each_npu_index_in_va_space(i, va_space) {
UVM_SPIN_WHILE(atsd_reg_read(&g_uvm_global.npus[i], regs->ids[i], NPU_ATSD_REG_STAT), &spin)
;
}
}
// Encode an invalidate targeting the given pasid and the given size for the
// NPU_ATSD_REG_LAUNCH register. The target address is encoded separately.
//
// psize must be one of the MMU_PAGE_* values defined in powerpc's asm/mmu.h. A
// psize of MMU_PAGE_COUNT means to invalidate the entire address space.
static NvU64 atsd_get_launch_val(mm_context_id_t pasid, int psize)
{
NvU64 val = 0;
val |= PPC_BIT(NPU_ATSD_REG_LAUNCH_PASID_ENABLE);
val |= pasid << PPC_BITLSHIFT(NPU_ATSD_REG_LAUNCH_PASID_VAL);
if (psize == MMU_PAGE_COUNT) {
val |= PPC_BIT(NPU_ATSD_REG_LAUNCH_INVAL_ALL);
}
else {
// The NPU registers do not support arbitrary sizes
UVM_ASSERT(psize == MMU_PAGE_64K || psize == MMU_PAGE_2M || psize == MMU_PAGE_1G);
val |= (NvU64)mmu_get_ap(psize) << PPC_BITLSHIFT(NPU_ATSD_REG_LAUNCH_INVAL_SIZE);
}
return val;
}
// Return the encoded size to use for an ATSD targeting the given range, in one
// of the MMU_PAGE_* values defined in powerpc's asm/mmu.h. A return value of
// MMU_PAGE_COUNT means the entire address space must be invalidated.
//
// start is an in/out parameter. On return start will be set to the aligned
// starting address to use for the ATSD. end is inclusive.
static int atsd_calc_size(NvU64 *start, NvU64 end)
{
// ATSDs have high latency, so we prefer to over-invalidate rather than
// issue multiple precise invalidates. Supported sizes are only 64K, 2M, and
// 1G.
*start = UVM_ALIGN_DOWN(*start, SZ_64K);
end = UVM_ALIGN_DOWN(end, SZ_64K);
if (*start == end)
return MMU_PAGE_64K;
*start = UVM_ALIGN_DOWN(*start, SZ_2M);
end = UVM_ALIGN_DOWN(end, SZ_2M);
if (*start == end)
return MMU_PAGE_2M;
*start = UVM_ALIGN_DOWN(*start, SZ_1G);
end = UVM_ALIGN_DOWN(end, SZ_1G);
if (*start == end)
return MMU_PAGE_1G;
return MMU_PAGE_COUNT;
}
// Issue an ATSD to all NPUs and wait for completion
static void atsd_launch_wait(uvm_va_space_t *va_space, uvm_atsd_regs_t *regs, NvU64 val)
{
atsd_regs_write(va_space, regs, NPU_ATSD_REG_LAUNCH, val);
atsd_regs_wait(va_space, regs);
}
// Issue and wait for the required membars following an invalidate
static void atsd_issue_membars(uvm_va_space_t *va_space, uvm_atsd_regs_t *regs)
{
size_t i;
NvU32 num_membars = 0;
// These membars are issued using ATSDs which target a reserved PASID of 0.
// That PASID is valid on the GPU in order for the membar to be valid, but
// 0 will never be used by the kernel for an actual address space so the
// ATSD won't actually invalidate any entries.
NvU64 val = atsd_get_launch_val(0, MMU_PAGE_COUNT);
for_each_npu_index_in_va_space(i, va_space) {
uvm_ibm_npu_t *npu = &g_uvm_global.npus[i];
num_membars = max(num_membars, npu->atsd_regs.num_membars);
}
for (i = 0; i < num_membars; i++)
atsd_launch_wait(va_space, regs, val);
}
static void uvm_ats_ibm_invalidate_all(uvm_va_space_t *va_space, uvm_atsd_regs_t *regs)
{
NvU64 val = atsd_get_launch_val(va_space_pasid(va_space), MMU_PAGE_COUNT);
atsd_launch_wait(va_space, regs, val);
atsd_issue_membars(va_space, regs);
}
static void uvm_ats_ibm_invalidate_range(uvm_va_space_t *va_space, uvm_atsd_regs_t *regs, NvU64 start, int psize)
{
NvU64 val = atsd_get_launch_val(va_space_pasid(va_space), psize);
// Barriers are expensive, so write all address registers first then do a
// single barrier for all of them.
atsd_regs_write(va_space, regs, NPU_ATSD_REG_AVA, start);
eieio();
atsd_launch_wait(va_space, regs, val);
atsd_issue_membars(va_space, regs);
}
#endif // UVM_ATS_IBM_SUPPORTED_IN_DRIVER()
void uvm_ats_ibm_invalidate(uvm_va_space_t *va_space, NvU64 start, NvU64 end)
{
#if UVM_ATS_IBM_SUPPORTED_IN_DRIVER()
unsigned long irq_flags;
uvm_atsd_regs_t regs;
NvU64 atsd_start = start;
int psize = atsd_calc_size(&atsd_start, end);
uvm_ibm_va_space_t *ibm_va_space = &va_space->ats.ibm;
BUILD_BUG_ON(order_base_2(UVM_MAX_ATSD_REGS) > 8*sizeof(regs.ids[0]));
// We must hold this lock in at least read mode when accessing NPU
// registers. See the comment in uvm_ats_ibm_unregister_gpu_va_space_driver.
uvm_read_lock_irqsave(&ibm_va_space->rwlock, irq_flags);
if (!bitmap_empty(ibm_va_space->npu_active_mask, NV_MAX_NPUS)) {
atsd_regs_acquire(va_space, &regs);
if (psize == MMU_PAGE_COUNT)
uvm_ats_ibm_invalidate_all(va_space, &regs);
else
uvm_ats_ibm_invalidate_range(va_space, &regs, atsd_start, psize);
atsd_regs_release(va_space, &regs);
}
uvm_read_unlock_irqrestore(&ibm_va_space->rwlock, irq_flags);
#else
UVM_ASSERT_MSG(0, "This function should not be called on this kernel version\n");
#endif // UVM_ATS_IBM_SUPPORTED_IN_DRIVER()
}
#endif // UVM_ATS_IBM_SUPPORTED
#endif // UVM_IBM_NPU_SUPPORTED

View File

@@ -1,266 +0,0 @@
/*******************************************************************************
Copyright (c) 2018-2019 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
#ifndef __UVM_ATS_IBM_H__
#define __UVM_ATS_IBM_H__
#include "uvm_linux.h"
#include "uvm_forward_decl.h"
#include "uvm_hal_types.h"
#if defined(NVCPU_PPC64LE) && defined(NV_PNV_PCI_GET_NPU_DEV_PRESENT)
#include <asm/mmu.h>
#if defined(NV_MAX_NPUS)
#define UVM_IBM_NPU_SUPPORTED() 1
#else
#define UVM_IBM_NPU_SUPPORTED() 0
#endif
#else
#define UVM_IBM_NPU_SUPPORTED() 0
#endif
#if defined(NV_ASM_OPAL_API_H_PRESENT)
// For OPAL_NPU_INIT_CONTEXT
#include <asm/opal-api.h>
#endif
// Timeline of kernel changes:
//
// 0) Before 1ab66d1fbadad86b1f4a9c7857e193af0ee0022c
// - No NPU-ATS code existed, nor did the OPAL_NPU_INIT_CONTEXT firmware
// call.
// - NV_PNV_NPU2_INIT_CONTEXT_PRESENT Not defined
// - NV_PNV_NPU2_INIT_CONTEXT_CALLBACK_RETURNS_VOID Not defined
// - OPAL_NPU_INIT_CONTEXT Not defined
// - ATS support type None
//
// 1) NPU ATS code added: 1ab66d1fbadad86b1f4a9c7857e193af0ee0022c, v4.12
// (2017-04-03)
// - This commit added initial support for NPU ATS, including the necessary
// OPAL firmware calls. This support was developmental and required
// several bug fixes before it could be used in production.
// - NV_PNV_NPU2_INIT_CONTEXT_PRESENT Defined
// - NV_PNV_NPU2_INIT_CONTEXT_CALLBACK_RETURNS_VOID Not defined
// - OPAL_NPU_INIT_CONTEXT Defined
// - ATS support type None
//
// 2) NPU ATS code fixed: a1409adac748f0db655e096521bbe6904aadeb98, v4.17
// (2018-04-11)
// - This commit changed the function signature for pnv_npu2_init_context's
// callback parameter. Since all required bug fixes went in prior to this
// change, we can use the callback signature as a flag to indicate
// whether the PPC arch layer in the kernel supports ATS in production.
// - NV_PNV_NPU2_INIT_CONTEXT_PRESENT Defined
// - NV_PNV_NPU2_INIT_CONTEXT_CALLBACK_RETURNS_VOID Defined
// - OPAL_NPU_INIT_CONTEXT Defined
// - ATS support type Kernel
//
// 3) NPU ATS code removed: 7eb3cf761927b2687164e182efa675e6c09cfe44, v5.3
// (2019-06-25)
// - This commit removed NPU-ATS support from the PPC arch layer, so the
// driver needs to handle things instead. pnv_npu2_init_context is no
// longer present, so we use OPAL_NPU_INIT_CONTEXT to differentiate
// between this state and scenario #0.
// - NV_PNV_NPU2_INIT_CONTEXT_PRESENT Not defined
// - NV_PNV_NPU2_INIT_CONTEXT_CALLBACK_RETURNS_VOID Not defined
// - OPAL_NPU_INIT_CONTEXT Defined
// - ATS support type Driver
//
#if defined(NV_PNV_NPU2_INIT_CONTEXT_CALLBACK_RETURNS_VOID)
#define UVM_ATS_IBM_SUPPORTED_IN_KERNEL() 1
#define UVM_ATS_IBM_SUPPORTED_IN_DRIVER() 0
#elif !defined(NV_PNV_NPU2_INIT_CONTEXT_PRESENT) && defined(OPAL_NPU_INIT_CONTEXT) && UVM_CAN_USE_MMU_NOTIFIERS()
#define UVM_ATS_IBM_SUPPORTED_IN_KERNEL() 0
#define UVM_ATS_IBM_SUPPORTED_IN_DRIVER() 1
#else
#define UVM_ATS_IBM_SUPPORTED_IN_KERNEL() 0
#define UVM_ATS_IBM_SUPPORTED_IN_DRIVER() 0
#endif
#define UVM_ATS_IBM_SUPPORTED() (UVM_ATS_IBM_SUPPORTED_IN_KERNEL() || UVM_ATS_IBM_SUPPORTED_IN_DRIVER())
// Maximum number of parallel ATSD register sets per NPU
#define UVM_MAX_ATSD_REGS 16
typedef struct
{
#if UVM_IBM_NPU_SUPPORTED()
// These are the active NPUs in this VA space, that is, all NPUs with
// GPUs that have GPU VA spaces registered in this VA space.
//
// If a bit is clear in npu_active_mask then the corresponding entry of
// npu_ref_counts is 0. If a bit is set then the corresponding entry of
// npu_ref_counts is greater than 0.
NvU32 npu_ref_counts[NV_MAX_NPUS];
DECLARE_BITMAP(npu_active_mask, NV_MAX_NPUS);
#endif
// Lock protecting npu_ref_counts and npu_active_mask. Invalidations
// take this lock for read. GPU VA space register and unregister take
// this lock for write. Since all invalidations take the lock for read
// for the duration of the invalidate, taking the lock for write also
// flushes all invalidates.
//
// This is a spinlock because the invalidation code paths may be called
// with interrupts disabled, so those paths can't take the VA space
// lock. We could use a normal exclusive spinlock instead, but a reader/
// writer lock is preferred to allow concurrent invalidates in the same
// VA space.
uvm_rwlock_irqsave_t rwlock;
} uvm_ibm_va_space_t;
typedef struct
{
#if UVM_ATS_IBM_SUPPORTED_IN_KERNEL()
struct npu_context *npu_context;
#endif
// Used on the teardown path to know what to clean up. npu_context acts
// as the equivalent flag for kernel-provided support.
bool did_ibm_driver_init;
} uvm_ibm_gpu_va_space_t;
struct uvm_ibm_npu_struct
{
// Number of retained GPUs under this NPU. The other fields in this struct
// are only valid if this is non-zero.
unsigned int num_retained_gpus;
// PCI domain containing this NPU. This acts as a unique system-wide ID for
// this UVM NPU.
int pci_domain;
// The ATS-related fields are only valid when ATS support is enabled and
// UVM_ATS_IBM_SUPPORTED_IN_DRIVER() is 1.
struct
{
// Mapped addresses of the ATSD trigger registers. There may be more
// than one set of identical registers per NPU to enable concurrent
// invalidates.
//
// These will not be accessed unless there is a GPU VA space registered
// on a GPU under this NPU. They are protected by bit locks in the locks
// field.
__be64 __iomem *io_addrs[UVM_MAX_ATSD_REGS];
// Actual number of registers in the io_addrs array
size_t count;
// Bitmask for allocation and locking of the registers. Bit index n
// corresponds to io_addrs[n]. A set bit means that index is in use
// (locked).
DECLARE_BITMAP(locks, UVM_MAX_ATSD_REGS);
// Max value of any uvm_parent_gpu_t::num_hshub_tlb_invalidate_membars
// for all retained GPUs under this NPU.
NvU32 num_membars;
} atsd_regs;
};
#if UVM_IBM_NPU_SUPPORTED()
NV_STATUS uvm_ats_ibm_add_gpu(uvm_parent_gpu_t *parent_gpu);
void uvm_ats_ibm_remove_gpu(uvm_parent_gpu_t *parent_gpu);
#else
static NV_STATUS uvm_ats_ibm_add_gpu(uvm_parent_gpu_t *parent_gpu)
{
return NV_OK;
}
static void uvm_ats_ibm_remove_gpu(uvm_parent_gpu_t *parent_gpu)
{
}
#endif // UVM_IBM_NPU_SUPPORTED
#if UVM_ATS_IBM_SUPPORTED()
// Initializes IBM specific GPU state.
//
// LOCKING: None
void uvm_ats_ibm_init_va_space(uvm_va_space_t *va_space);
// Enables ATS access for the gpu_va_space on the mm_struct associated with
// the VA space (va_space_mm).
//
// If UVM_ATS_IBM_SUPPORTED_IN_KERNEL() is 1, NV_ERR_NOT_SUPPORTED is
// returned if current->mm does not match va_space_mm.mm or if a GPU VA
// space within another VA space has already called this function on the
// same mm.
//
// If UVM_ATS_IBM_SUPPORTED_IN_DRIVER() is 1 there are no such restrictions.
//
// LOCKING: The VA space lock must be held in write mode.
// current->mm->mmap_lock must be held in write mode iff
// UVM_ATS_IBM_SUPPORTED_IN_KERNEL() is 1.
NV_STATUS uvm_ats_ibm_register_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space);
// Disables ATS access for the gpu_va_space. Prior to calling this function,
// the caller must guarantee that the GPU will no longer make any ATS
// accesses in this GPU VA space, and that no ATS fault handling for this
// GPU will be attempted.
//
// LOCKING: This function may block on mmap_lock and the VA space lock, so
// neither must be held.
void uvm_ats_ibm_unregister_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space);
// Synchronously invalidate ATS translations cached by GPU TLBs. The
// invalidate applies to all GPUs with active GPU VA spaces in va_space, and
// covers all pages touching any part of the given range. end is inclusive.
//
// GMMU translations in the given range are not guaranteed to be
// invalidated.
//
// LOCKING: No locks are required, but this function may be called with
// interrupts disabled.
void uvm_ats_ibm_invalidate(uvm_va_space_t *va_space, NvU64 start, NvU64 end);
#else
static void uvm_ats_ibm_init_va_space(uvm_va_space_t *va_space)
{
}
static NV_STATUS uvm_ats_ibm_register_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space)
{
return NV_OK;
}
static void uvm_ats_ibm_unregister_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space)
{
}
static void uvm_ats_ibm_invalidate(uvm_va_space_t *va_space, NvU64 start, NvU64 end)
{
}
#endif // UVM_ATS_IBM_SUPPORTED
static NV_STATUS uvm_ats_ibm_bind_gpu(uvm_gpu_va_space_t *gpu_va_space)
{
return NV_OK;
}
static void uvm_ats_ibm_unbind_gpu(uvm_gpu_va_space_t *gpu_va_space)
{
}
#endif // __UVM_ATS_IBM_H__

View File

@@ -139,7 +139,11 @@ static NvU32 smmu_vcmdq_read32(void __iomem *smmu_cmdqv_base, int reg)
static void smmu_vcmdq_write64(void __iomem *smmu_cmdqv_base, int reg, NvU64 val)
{
#if NV_IS_EXPORT_SYMBOL_PRESENT___iowrite64_lo_hi
__iowrite64_lo_hi(val, SMMU_VCMDQ_BASE_ADDR(smmu_cmdqv_base, VCMDQ) + reg);
#else
iowrite64(val, SMMU_VCMDQ_BASE_ADDR(smmu_cmdqv_base, VCMDQ) + reg);
#endif
}
// Fix for Bug 4130089: [GH180][r535] WAR for kernel not issuing SMMU

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2022-2023 NVIDIA Corporation
Copyright (c) 2022-2024 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -38,12 +38,10 @@ void uvm_hal_blackwell_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
parent_gpu->utlb_per_gpc_count = uvm_blackwell_get_utlbs_per_gpc(parent_gpu);
parent_gpu->fault_buffer_info.replayable.utlb_count = parent_gpu->rm_info.maxGpcCount *
parent_gpu->utlb_per_gpc_count;
parent_gpu->fault_buffer.replayable.utlb_count = parent_gpu->rm_info.maxGpcCount * parent_gpu->utlb_per_gpc_count;
{
uvm_fault_buffer_entry_t *dummy;
UVM_ASSERT(parent_gpu->fault_buffer_info.replayable.utlb_count <= (1 <<
(sizeof(dummy->fault_source.utlb_id) * 8)));
UVM_ASSERT(parent_gpu->fault_buffer.replayable.utlb_count <= (1 << (sizeof(dummy->fault_source.utlb_id) * 8)));
}
// A single top level PDE on Blackwell covers 64 PB and that's the minimum
@@ -51,14 +49,16 @@ void uvm_hal_blackwell_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
parent_gpu->rm_va_base = 0;
parent_gpu->rm_va_size = 64 * UVM_SIZE_1PB;
parent_gpu->peer_va_base = parent_gpu->rm_va_base + parent_gpu->rm_va_size;
parent_gpu->peer_va_size = NV_MAX_DEVICES * UVM_PEER_IDENTITY_VA_SIZE;
parent_gpu->uvm_mem_va_base = parent_gpu->rm_va_size + 384 * UVM_SIZE_1TB;
parent_gpu->uvm_mem_va_size = UVM_MEM_VA_SIZE;
// See uvm_mmu.h for mapping placement
parent_gpu->flat_vidmem_va_base = (64 * UVM_SIZE_1PB) + (32 * UVM_SIZE_1TB);
// TODO: Bug 3953852: Set this to true pending Blackwell changes
parent_gpu->ce_phys_vidmem_write_supported = !uvm_parent_gpu_is_coherent(parent_gpu);
parent_gpu->ce_phys_vidmem_write_supported = true;
parent_gpu->peer_copy_mode = g_uvm_global.peer_copy_mode;
@@ -81,10 +81,6 @@ void uvm_hal_blackwell_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
parent_gpu->non_replayable_faults_supported = true;
parent_gpu->access_counters_supported = true;
parent_gpu->access_counters_can_use_physical_addresses = false;
parent_gpu->fault_cancel_va_supported = true;
parent_gpu->scoped_atomics_supported = true;
@@ -102,4 +98,17 @@ void uvm_hal_blackwell_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
parent_gpu->plc_supported = true;
parent_gpu->no_ats_range_required = true;
parent_gpu->conf_computing.per_channel_key_rotation = true;
// TODO: Bug 5023085: this should be queried from RM instead of determined
// by UVM.
if (parent_gpu->rm_info.gpuArch == NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GB100 &&
parent_gpu->rm_info.gpuImplementation ==
NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GB10B)
parent_gpu->is_integrated_gpu = true;
if (parent_gpu->rm_info.gpuArch == NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GB200 &&
parent_gpu->rm_info.gpuImplementation ==
NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GB20B)
parent_gpu->is_integrated_gpu = true;
}

View File

@@ -254,3 +254,31 @@ void uvm_hal_blackwell_host_tlb_invalidate_test(uvm_push_t *push,
HWVALUE(C96F, MEM_OP_D, TLB_INVALIDATE_PDB_ADDR_HI, pdb_hi));
}
}
uvm_access_counter_clear_op_t
uvm_hal_blackwell_access_counter_query_clear_op_gb100(uvm_parent_gpu_t *parent_gpu,
uvm_access_counter_buffer_entry_t **buffer_entries,
NvU32 num_entries)
{
if (parent_gpu->rm_info.accessCntrBufferCount > 1) {
NvU32 i;
for (i = 0; i < num_entries; i++) {
const uvm_access_counter_buffer_entry_t *entry = buffer_entries[i];
// The LSb identifies the die ID.
if ((entry->tag & 0x1) == 1)
return UVM_ACCESS_COUNTER_CLEAR_OP_ALL;
}
}
return UVM_ACCESS_COUNTER_CLEAR_OP_TARGETED;
}
uvm_access_counter_clear_op_t
uvm_hal_blackwell_access_counter_query_clear_op_gb20x(uvm_parent_gpu_t *parent_gpu,
uvm_access_counter_buffer_entry_t **buffer_entries,
NvU32 num_entries)
{
return UVM_ACCESS_COUNTER_CLEAR_OP_TARGETED;
}

View File

@@ -37,9 +37,9 @@
#include "uvm_hal_types.h"
#include "uvm_blackwell_fault_buffer.h"
#include "hwref/blackwell/gb100/dev_fault.h"
#include "hwref/blackwell/gb100/dev_mmu.h"
static uvm_mmu_mode_hal_t blackwell_mmu_mode_hal;
static uvm_mmu_mode_hal_t blackwell_integrated_mmu_mode_hal;
static NvU32 page_table_depth_blackwell(NvU64 page_size)
{
@@ -60,35 +60,71 @@ static NvU64 page_sizes_blackwell(void)
return UVM_PAGE_SIZE_256G | UVM_PAGE_SIZE_512M | UVM_PAGE_SIZE_2M | UVM_PAGE_SIZE_64K | UVM_PAGE_SIZE_4K;
}
static NvU64 page_sizes_blackwell_integrated(void)
{
return UVM_PAGE_SIZE_2M | UVM_PAGE_SIZE_64K | UVM_PAGE_SIZE_4K;
}
static uvm_mmu_mode_hal_t *__uvm_hal_mmu_mode_blackwell(uvm_mmu_mode_hal_t *mmu_mode_hal,
NvU64 big_page_size)
{
uvm_mmu_mode_hal_t *hopper_mmu_mode_hal;
UVM_ASSERT(big_page_size == UVM_PAGE_SIZE_64K || big_page_size == UVM_PAGE_SIZE_128K);
hopper_mmu_mode_hal = uvm_hal_mmu_mode_hopper(big_page_size);
UVM_ASSERT(hopper_mmu_mode_hal);
// The assumption made is that arch_hal->mmu_mode_hal() will be called
// under the global lock the first time, so check it here.
uvm_assert_mutex_locked(&g_uvm_global.global_lock);
*mmu_mode_hal = *hopper_mmu_mode_hal;
mmu_mode_hal->page_table_depth = page_table_depth_blackwell;
return mmu_mode_hal;
}
uvm_mmu_mode_hal_t *uvm_hal_mmu_mode_blackwell(NvU64 big_page_size)
{
static bool initialized = false;
UVM_ASSERT(big_page_size == UVM_PAGE_SIZE_64K || big_page_size == UVM_PAGE_SIZE_128K);
// TODO: Bug 1789555: RM should reject the creation of GPU VA spaces with
// 128K big page size for Pascal+ GPUs
if (big_page_size == UVM_PAGE_SIZE_128K)
return NULL;
if (!initialized) {
uvm_mmu_mode_hal_t *hopper_mmu_mode_hal = uvm_hal_mmu_mode_hopper(big_page_size);
UVM_ASSERT(hopper_mmu_mode_hal);
// The assumption made is that arch_hal->mmu_mode_hal() will be called
// under the global lock the first time, so check it here.
uvm_assert_mutex_locked(&g_uvm_global.global_lock);
blackwell_mmu_mode_hal = *hopper_mmu_mode_hal;
blackwell_mmu_mode_hal.page_table_depth = page_table_depth_blackwell;
blackwell_mmu_mode_hal.page_sizes = page_sizes_blackwell;
uvm_mmu_mode_hal_t *mmu_mode_hal;
mmu_mode_hal = __uvm_hal_mmu_mode_blackwell(&blackwell_mmu_mode_hal, big_page_size);
mmu_mode_hal->page_sizes = page_sizes_blackwell;
initialized = true;
}
return &blackwell_mmu_mode_hal;
}
uvm_mmu_mode_hal_t *uvm_hal_mmu_mode_blackwell_integrated(NvU64 big_page_size)
{
static bool initialized = false;
// TODO: Bug 1789555: RM should reject the creation of GPU VA spaces with
// 128K big page size for Pascal+ GPUs
if (big_page_size == UVM_PAGE_SIZE_128K)
return NULL;
if (!initialized) {
uvm_mmu_mode_hal_t *mmu_mode_hal;
mmu_mode_hal = __uvm_hal_mmu_mode_blackwell(&blackwell_integrated_mmu_mode_hal, big_page_size);
mmu_mode_hal->page_sizes = page_sizes_blackwell_integrated;
initialized = true;
}
return &blackwell_integrated_mmu_mode_hal;
}
NvU16 uvm_hal_blackwell_mmu_client_id_to_utlb_id(NvU16 client_id)
{
switch (client_id) {

View File

@@ -65,7 +65,10 @@ static NV_STATUS test_non_pipelined(uvm_gpu_t *gpu)
memset(host_ptr, 0, CE_TEST_MEM_SIZE);
for (i = 0; i < CE_TEST_MEM_COUNT; ++i) {
status = uvm_rm_mem_alloc(gpu, UVM_RM_MEM_TYPE_GPU, CE_TEST_MEM_SIZE, 0, &mem[i]);
uvm_rm_mem_type_t type;
type = gpu->mem_info.size ? UVM_RM_MEM_TYPE_GPU : UVM_RM_MEM_TYPE_SYS;
status = uvm_rm_mem_alloc(gpu, type, CE_TEST_MEM_SIZE, 0, &mem[i]);
TEST_CHECK_GOTO(status == NV_OK, done);
}
@@ -405,6 +408,7 @@ static NV_STATUS test_memcpy_and_memset(uvm_gpu_t *gpu)
uvm_rm_mem_t *sys_rm_mem = NULL;
uvm_rm_mem_t *gpu_rm_mem = NULL;
uvm_gpu_address_t gpu_addresses[4] = {0};
size_t gpu_addresses_length = 0;
size_t size = gpu->big_page.internal_size;
static const size_t element_sizes[] = {1, 4, 8};
const size_t iterations = 4;
@@ -435,7 +439,7 @@ static NV_STATUS test_memcpy_and_memset(uvm_gpu_t *gpu)
// Virtual address (in UVM's internal address space) backed by sysmem
TEST_NV_CHECK_GOTO(uvm_rm_mem_alloc(gpu, UVM_RM_MEM_TYPE_SYS, size, 0, &sys_rm_mem), done);
gpu_addresses[0] = uvm_rm_mem_get_gpu_va(sys_rm_mem, gpu, is_proxy_va_space);
gpu_addresses[gpu_addresses_length++] = uvm_rm_mem_get_gpu_va(sys_rm_mem, gpu, is_proxy_va_space);
if (g_uvm_global.conf_computing_enabled) {
for (i = 0; i < iterations; ++i) {
@@ -472,21 +476,23 @@ static NV_STATUS test_memcpy_and_memset(uvm_gpu_t *gpu)
// Physical address in sysmem
TEST_NV_CHECK_GOTO(uvm_mem_alloc(&mem_params, &sys_uvm_mem), done);
TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_phys(sys_uvm_mem, gpu), done);
gpu_addresses[1] = uvm_mem_gpu_address_physical(sys_uvm_mem, gpu, 0, size);
gpu_addresses[gpu_addresses_length++] = uvm_mem_gpu_address_physical(sys_uvm_mem, gpu, 0, size);
// Physical address in vidmem
mem_params.backing_gpu = gpu;
TEST_NV_CHECK_GOTO(uvm_mem_alloc(&mem_params, &gpu_uvm_mem), done);
gpu_addresses[2] = uvm_mem_gpu_address_physical(gpu_uvm_mem, gpu, 0, size);
if (gpu->mem_info.size > 0) {
// Physical address in vidmem
mem_params.backing_gpu = gpu;
TEST_NV_CHECK_GOTO(uvm_mem_alloc(&mem_params, &gpu_uvm_mem), done);
gpu_addresses[gpu_addresses_length++] = uvm_mem_gpu_address_physical(gpu_uvm_mem, gpu, 0, size);
// Virtual address (in UVM's internal address space) backed by vidmem
TEST_NV_CHECK_GOTO(uvm_rm_mem_alloc(gpu, UVM_RM_MEM_TYPE_GPU, size, 0, &gpu_rm_mem), done);
gpu_addresses[3] = uvm_rm_mem_get_gpu_va(gpu_rm_mem, gpu, is_proxy_va_space);
// Virtual address (in UVM's internal address space) backed by vidmem
TEST_NV_CHECK_GOTO(uvm_rm_mem_alloc(gpu, UVM_RM_MEM_TYPE_GPU, size, 0, &gpu_rm_mem), done);
gpu_addresses[gpu_addresses_length++] = uvm_rm_mem_get_gpu_va(gpu_rm_mem, gpu, is_proxy_va_space);
}
for (i = 0; i < iterations; ++i) {
for (j = 0; j < ARRAY_SIZE(gpu_addresses); ++j) {
for (k = 0; k < ARRAY_SIZE(gpu_addresses); ++k) {
for (j = 0; j < gpu_addresses_length; ++j) {
for (k = 0; k < gpu_addresses_length; ++k) {
for (s = 0; s < ARRAY_SIZE(element_sizes); s++) {
TEST_NV_CHECK_GOTO(test_memcpy_and_memset_inner(gpu,
gpu_addresses[k],

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2015-2023 NVIDIA Corporation
Copyright (c) 2015-2025 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -38,6 +38,7 @@
#include "clb06f.h"
#include "uvm_conf_computing.h"
// WLC push is decrypted by SEC2 or CE (in WLC schedule).
// In sysmem it's followed by auth tag.
#define WLC_PUSHBUFFER_ALIGNMENT max3(UVM_CONF_COMPUTING_AUTH_TAG_ALIGNMENT, \
@@ -97,6 +98,37 @@ typedef enum
UVM_CHANNEL_UPDATE_MODE_FORCE_ALL
} uvm_channel_update_mode_t;
typedef enum
{
// Reserve an entry that is expected to use p2p operations.
UVM_CHANNEL_RESERVE_WITH_P2P,
// Reserve an entry that is not expected to use p2p operations.
UVM_CHANNEL_RESERVE_NO_P2P,
} uvm_channel_reserve_type_t;
bool uvm_channel_pool_is_p2p(uvm_channel_pool_t *pool)
{
uvm_channel_manager_t *manager = pool->manager;
uvm_gpu_t *gpu = manager->gpu;
uvm_gpu_id_t id;
if (manager->pool_to_use.default_for_type[UVM_CHANNEL_TYPE_GPU_TO_GPU] == pool)
return true;
uvm_spin_lock(&gpu->peer_info.peer_gpu_lock);
for_each_gpu_id_in_mask(id, &gpu->peer_info.peer_gpu_mask) {
if (manager->pool_to_use.gpu_to_gpu[uvm_id_gpu_index(id)] == pool) {
uvm_spin_unlock(&gpu->peer_info.peer_gpu_lock);
return true;
}
}
uvm_spin_unlock(&gpu->peer_info.peer_gpu_lock);
return false;
}
bool uvm_channel_pool_uses_mutex(uvm_channel_pool_t *pool)
{
// Work submission to proxy channels in SR-IOV heavy entails calling RM API
@@ -119,10 +151,12 @@ bool uvm_channel_pool_uses_mutex(uvm_channel_pool_t *pool)
static void channel_pool_lock_init(uvm_channel_pool_t *pool)
{
uvm_lock_order_t order = UVM_LOCK_ORDER_CHANNEL;
uvm_lock_order_t order;
if (g_uvm_global.conf_computing_enabled && uvm_channel_pool_is_wlc(pool))
order = UVM_LOCK_ORDER_WLC_CHANNEL;
else
order = UVM_LOCK_ORDER_CHANNEL;
if (uvm_channel_pool_uses_mutex(pool))
uvm_mutex_init(&pool->mutex, order);
@@ -274,7 +308,9 @@ NvU32 uvm_channel_get_available_gpfifo_entries(uvm_channel_t *channel)
return available;
}
static bool try_claim_channel_locked(uvm_channel_t *channel, NvU32 num_gpfifo_entries)
static bool try_claim_channel_locked(uvm_channel_t *channel,
NvU32 num_gpfifo_entries,
uvm_channel_reserve_type_t reserve_type)
{
bool claimed = false;
@@ -283,6 +319,9 @@ static bool try_claim_channel_locked(uvm_channel_t *channel, NvU32 num_gpfifo_en
uvm_channel_pool_assert_locked(channel->pool);
if (reserve_type == UVM_CHANNEL_RESERVE_WITH_P2P && channel->suspended_p2p)
return false;
if (channel_get_available_gpfifo_entries(channel) >= num_gpfifo_entries) {
channel->current_gpfifo_count += num_gpfifo_entries;
claimed = true;
@@ -291,12 +330,14 @@ static bool try_claim_channel_locked(uvm_channel_t *channel, NvU32 num_gpfifo_en
return claimed;
}
static bool try_claim_channel(uvm_channel_t *channel, NvU32 num_gpfifo_entries)
static bool try_claim_channel(uvm_channel_t *channel,
NvU32 num_gpfifo_entries,
uvm_channel_reserve_type_t reserve_type)
{
bool claimed;
channel_pool_lock(channel->pool);
claimed = try_claim_channel_locked(channel, num_gpfifo_entries);
claimed = try_claim_channel_locked(channel, num_gpfifo_entries, reserve_type);
channel_pool_unlock(channel->pool);
return claimed;
@@ -349,7 +390,8 @@ static bool test_claim_and_lock_channel(uvm_channel_t *channel, NvU32 num_gpfifo
if (uvm_channel_is_locked_for_push(channel))
return false;
if (try_claim_channel_locked(channel, num_gpfifo_entries)) {
// Confidential compute is not using p2p ops, reserve without p2p
if (try_claim_channel_locked(channel, num_gpfifo_entries, UVM_CHANNEL_RESERVE_NO_P2P)) {
lock_channel_for_push(channel);
return true;
}
@@ -490,7 +532,9 @@ static NV_STATUS channel_reserve_and_lock_in_pool(uvm_channel_pool_t *pool, uvm_
for_each_clear_bit(index, pool->conf_computing.push_locks, pool->num_channels) {
channel = &pool->channels[index];
if (try_claim_channel_locked(channel, 1)) {
// Confidential compute is not using p2p ops, reserve without p2p
if (try_claim_channel_locked(channel, 1, UVM_CHANNEL_RESERVE_NO_P2P)) {
lock_channel_for_push(channel);
goto done;
}
@@ -529,7 +573,9 @@ done:
}
// Reserve a channel in the specified pool
static NV_STATUS channel_reserve_in_pool(uvm_channel_pool_t *pool, uvm_channel_t **channel_out)
static NV_STATUS channel_reserve_in_pool(uvm_channel_pool_t *pool,
uvm_channel_reserve_type_t reserve_type,
uvm_channel_t **channel_out)
{
uvm_channel_t *channel;
uvm_spin_loop_t spin;
@@ -541,7 +587,7 @@ static NV_STATUS channel_reserve_in_pool(uvm_channel_pool_t *pool, uvm_channel_t
uvm_for_each_channel_in_pool(channel, pool) {
// TODO: Bug 1764953: Prefer idle/less busy channels
if (try_claim_channel(channel, 1)) {
if (try_claim_channel(channel, 1, reserve_type)) {
*channel_out = channel;
return NV_OK;
}
@@ -554,7 +600,7 @@ static NV_STATUS channel_reserve_in_pool(uvm_channel_pool_t *pool, uvm_channel_t
uvm_channel_update_progress(channel);
if (try_claim_channel(channel, 1)) {
if (try_claim_channel(channel, 1, reserve_type)) {
*channel_out = channel;
return NV_OK;
@@ -564,6 +610,9 @@ static NV_STATUS channel_reserve_in_pool(uvm_channel_pool_t *pool, uvm_channel_t
if (status != NV_OK)
return status;
if (reserve_type == UVM_CHANNEL_RESERVE_WITH_P2P && channel->suspended_p2p)
return NV_ERR_BUSY_RETRY;
UVM_SPIN_LOOP(&spin);
}
}
@@ -575,12 +624,18 @@ static NV_STATUS channel_reserve_in_pool(uvm_channel_pool_t *pool, uvm_channel_t
NV_STATUS uvm_channel_reserve_type(uvm_channel_manager_t *manager, uvm_channel_type_t type, uvm_channel_t **channel_out)
{
uvm_channel_reserve_type_t reserve_type;
uvm_channel_pool_t *pool = manager->pool_to_use.default_for_type[type];
UVM_ASSERT(pool != NULL);
UVM_ASSERT(type < UVM_CHANNEL_TYPE_COUNT);
return channel_reserve_in_pool(pool, channel_out);
if (type == UVM_CHANNEL_TYPE_GPU_TO_GPU)
reserve_type = UVM_CHANNEL_RESERVE_WITH_P2P;
else
reserve_type = UVM_CHANNEL_RESERVE_NO_P2P;
return channel_reserve_in_pool(pool, reserve_type, channel_out);
}
NV_STATUS uvm_channel_reserve_gpu_to_gpu(uvm_channel_manager_t *manager,
@@ -596,7 +651,7 @@ NV_STATUS uvm_channel_reserve_gpu_to_gpu(uvm_channel_manager_t *manager,
UVM_ASSERT(pool->pool_type == UVM_CHANNEL_POOL_TYPE_CE);
return channel_reserve_in_pool(pool, channel_out);
return channel_reserve_in_pool(pool, UVM_CHANNEL_RESERVE_WITH_P2P, channel_out);
}
NV_STATUS uvm_channel_manager_wait(uvm_channel_manager_t *manager)
@@ -1441,7 +1496,6 @@ void uvm_channel_end_push(uvm_push_t *push)
bool needs_sec2_work_submit = false;
channel_pool_lock(channel->pool);
encrypt_push(push);
new_tracking_value = ++channel->tracking_sem.queued_value;
@@ -1523,6 +1577,7 @@ void uvm_channel_end_push(uvm_push_t *push)
// push must be updated before that. Notably uvm_pushbuffer_end_push() has
// to be called first.
unlock_channel_for_push(channel);
channel_pool_unlock(channel->pool);
// This memory barrier is borrowed from CUDA, as it supposedly fixes perf
@@ -1805,13 +1860,14 @@ NV_STATUS uvm_channel_reserve(uvm_channel_t *channel, NvU32 num_gpfifo_entries)
if (g_uvm_global.conf_computing_enabled)
return channel_reserve_and_lock(channel, num_gpfifo_entries);
if (try_claim_channel(channel, num_gpfifo_entries))
// Direct channel reservations don't use p2p
if (try_claim_channel(channel, num_gpfifo_entries, UVM_CHANNEL_RESERVE_NO_P2P))
return NV_OK;
uvm_channel_update_progress(channel);
uvm_spin_loop_init(&spin);
while (!try_claim_channel(channel, num_gpfifo_entries) && status == NV_OK) {
while (!try_claim_channel(channel, num_gpfifo_entries, UVM_CHANNEL_RESERVE_NO_P2P) && status == NV_OK) {
UVM_SPIN_LOOP(&spin);
status = uvm_channel_check_errors(channel);
uvm_channel_update_progress(channel);
@@ -1825,9 +1881,11 @@ void uvm_channel_release(uvm_channel_t *channel, NvU32 num_gpfifo_entries)
channel_pool_lock(channel->pool);
UVM_ASSERT(uvm_channel_is_locked_for_push(channel));
unlock_channel_for_push(channel);
UVM_ASSERT(channel->current_gpfifo_count >= num_gpfifo_entries);
channel->current_gpfifo_count -= num_gpfifo_entries;
channel_pool_unlock(channel->pool);
}
@@ -1852,6 +1910,140 @@ static uvm_gpfifo_entry_t *uvm_channel_get_first_pending_entry(uvm_channel_t *ch
return entry;
}
static NV_STATUS channel_suspend_p2p(uvm_channel_t *channel)
{
NV_STATUS status = NV_OK;
UVM_ASSERT(channel);
UVM_ASSERT(!channel->suspended_p2p);
// Reserve all entries to block traffic.
// Each channel needs 1 entry as sentinel.
status = uvm_channel_reserve(channel, channel->num_gpfifo_entries - 1);
// Prevent p2p traffic from reserving entries
if (status == NV_OK)
channel->suspended_p2p = true;
// Release the entries reserved above to allow non-p2p traffic
uvm_channel_release(channel, channel->num_gpfifo_entries - 1);
return status;
}
static void channel_resume_p2p(uvm_channel_t *channel)
{
UVM_ASSERT(channel);
UVM_ASSERT(channel->suspended_p2p);
channel->suspended_p2p = false;
}
static NV_STATUS channel_pool_suspend_p2p(uvm_channel_pool_t *pool)
{
NV_STATUS status = NV_OK;
NvU32 i;
UVM_ASSERT(pool);
UVM_ASSERT(!uvm_channel_pool_is_wlc(pool));
UVM_ASSERT(!uvm_channel_pool_is_lcic(pool));
for (i = 0; i < pool->num_channels; ++i) {
status = channel_suspend_p2p(pool->channels + i);
if (status != NV_OK)
break;
}
// Resume suspended channels in case of error
while ((status != NV_OK) && (i-- > 0))
channel_resume_p2p(pool->channels + i);
for (i = 0; i < pool->num_channels; ++i)
UVM_ASSERT(pool->channels[i].suspended_p2p == (status == NV_OK));
return status;
}
static void channel_pool_resume_p2p(uvm_channel_pool_t *pool)
{
NvU32 i;
UVM_ASSERT(pool);
UVM_ASSERT(!uvm_channel_pool_is_wlc(pool));
UVM_ASSERT(!uvm_channel_pool_is_lcic(pool));
for (i = 0; i < pool->num_channels; ++i)
channel_resume_p2p(pool->channels + i);
}
NV_STATUS uvm_channel_manager_suspend_p2p(uvm_channel_manager_t *channel_manager)
{
uvm_channel_pool_t *pool;
NV_STATUS status = NV_OK;
uvm_gpu_t *gpu = channel_manager->gpu;
uvm_gpu_id_t gpu_id;
DECLARE_BITMAP(suspended_pools, UVM_COPY_ENGINE_COUNT_MAX);
// Pools can be assigned to multiple 'pool_to_use' locations
// Use bitmap to track which were suspended.
bitmap_zero(suspended_pools, channel_manager->num_channel_pools);
uvm_assert_mutex_locked(&g_uvm_global.global_lock);
for_each_gpu_id_in_mask(gpu_id, &gpu->peer_info.peer_gpu_mask) {
pool = channel_manager->pool_to_use.gpu_to_gpu[uvm_id_gpu_index(gpu_id)];
if (pool && !test_bit(uvm_channel_pool_index_in_channel_manager(pool), suspended_pools)) {
status = channel_pool_suspend_p2p(pool);
if (status != NV_OK)
break;
__set_bit(uvm_channel_pool_index_in_channel_manager(pool), suspended_pools);
}
}
pool = channel_manager->pool_to_use.default_for_type[UVM_CHANNEL_TYPE_GPU_TO_GPU];
if (status == NV_OK && !test_bit(uvm_channel_pool_index_in_channel_manager(pool), suspended_pools)) {
status = channel_pool_suspend_p2p(pool);
// Do not set the suspended_pools bit here. If status is NV_OK it's not
// needed, otherwise it should not be set anyway.
}
// Resume suspended pools in case of error
if (status != NV_OK) {
unsigned i;
for_each_set_bit(i, suspended_pools, channel_manager->num_channel_pools)
channel_pool_resume_p2p(channel_manager->channel_pools + i);
}
return status;
}
void uvm_channel_manager_resume_p2p(uvm_channel_manager_t *channel_manager)
{
uvm_channel_pool_t *pool;
uvm_gpu_t *gpu = channel_manager->gpu;
uvm_gpu_id_t gpu_id;
DECLARE_BITMAP(resumed_pools, UVM_COPY_ENGINE_COUNT_MAX);
// Pools can be assigned to multiple 'pool_to_use' locations
// Use bitmap to track which were suspended.
bitmap_zero(resumed_pools, channel_manager->num_channel_pools);
uvm_assert_mutex_locked(&g_uvm_global.global_lock);
for_each_gpu_id_in_mask(gpu_id, &gpu->peer_info.peer_gpu_mask) {
pool = channel_manager->pool_to_use.gpu_to_gpu[uvm_id_gpu_index(gpu_id)];
if (pool && !test_and_set_bit(uvm_channel_pool_index_in_channel_manager(pool), resumed_pools))
channel_pool_resume_p2p(pool);
}
pool = channel_manager->pool_to_use.default_for_type[UVM_CHANNEL_TYPE_GPU_TO_GPU];
if (!test_and_set_bit(uvm_channel_pool_index_in_channel_manager(pool), resumed_pools))
channel_pool_resume_p2p(pool);
}
NV_STATUS uvm_channel_get_status(uvm_channel_t *channel)
{
uvm_gpu_t *gpu;
@@ -1891,7 +2083,7 @@ NV_STATUS uvm_channel_check_errors(uvm_channel_t *channel)
NV_STATUS status = uvm_channel_get_status(channel);
if (status == NV_OK)
return NV_OK;
return status;
UVM_ERR_PRINT("Detected a channel error, channel %s GPU %s\n",
channel->name,
@@ -2134,6 +2326,7 @@ static uvmGpuTsgHandle channel_get_tsg(uvm_channel_t *channel)
tsg_index = uvm_channel_index_in_pool(channel);
}
UVM_ASSERT(tsg_index < pool->num_tsgs);
return pool->tsg_handles[tsg_index];
@@ -2251,6 +2444,7 @@ static NV_STATUS channel_create(uvm_channel_pool_t *pool, uvm_channel_t *channel
if (status != NV_OK)
goto error;
channel->suspended_p2p = false;
channel->num_gpfifo_entries = channel_pool_num_gpfifo_entries(pool);
channel->gpfifo_entries = uvm_kvmalloc_zero(sizeof(*channel->gpfifo_entries) * channel->num_gpfifo_entries);
if (channel->gpfifo_entries == NULL) {
@@ -2444,6 +2638,7 @@ static UVM_GPU_CHANNEL_ENGINE_TYPE pool_type_to_engine_type(uvm_channel_pool_typ
{
if (pool_type == UVM_CHANNEL_POOL_TYPE_SEC2)
return UVM_GPU_CHANNEL_ENGINE_TYPE_SEC2;
return UVM_GPU_CHANNEL_ENGINE_TYPE_CE;
}
@@ -2706,6 +2901,13 @@ static NV_STATUS channel_pool_add(uvm_channel_manager_t *channel_manager,
static bool ce_is_usable(const UvmGpuCopyEngineCaps *cap)
{
// When Confidential Computing is enabled, all Copy Engines must support
// encryption / decryption, tracked by 'secure' flag. This holds even for
// non-CPU-GPU transactions because each channel has an associate semaphore,
// and semaphore release must be observable by all processing units.
if (g_uvm_global.conf_computing_enabled && !cap->secure)
return false;
return cap->supported && !cap->grce;
}
@@ -2862,10 +3064,6 @@ static void pick_ces_for_channel_types(uvm_channel_manager_t *manager,
{
unsigned i;
// In Confidential Computing, do not mark all usable CEs, only the preferred
// ones, because non-preferred CE channels are guaranteed to not be used.
bool mark_all_usable_ces = !g_uvm_global.conf_computing_enabled;
for (i = 0; i < num_channel_types; ++i) {
unsigned ce;
unsigned best_ce = UVM_COPY_ENGINE_COUNT_MAX;
@@ -2875,7 +3073,10 @@ static void pick_ces_for_channel_types(uvm_channel_manager_t *manager,
if (!ce_is_usable(ce_caps + ce))
continue;
if (mark_all_usable_ces)
// In Confidential Computing, do not mark all usable CEs, only the
// preferred ones, because non-preferred CE channels are guaranteed
// to not be used.
if (!g_uvm_global.conf_computing_enabled)
__set_bit(ce, manager->ce_mask);
if (best_ce == UVM_COPY_ENGINE_COUNT_MAX) {
@@ -2919,6 +3120,7 @@ static void pick_ces_conf_computing(uvm_channel_manager_t *manager,
unsigned *preferred_ce)
{
unsigned best_wlc_ce;
uvm_gpu_t *gpu = manager->gpu;
// The WLC type must go last so an unused CE is chosen, if available
uvm_channel_type_t types[] = {UVM_CHANNEL_TYPE_CPU_TO_GPU,
@@ -2937,9 +3139,10 @@ static void pick_ces_conf_computing(uvm_channel_manager_t *manager,
best_wlc_ce = preferred_ce[UVM_CHANNEL_TYPE_WLC];
// TODO: Bug 4576908: in HCC, the WLC type should not share a CE with any
// channel type other than LCIC. The assertion should be a check instead.
UVM_ASSERT(ce_usage_count(best_wlc_ce, preferred_ce) == 0);
// The implementation of engine-wide key rotation depends on using a
// dedicated CE for the WLC and LCIC pools.
if (uvm_conf_computing_is_key_rotation_enabled(gpu) && !gpu->parent->conf_computing.per_channel_key_rotation)
UVM_ASSERT(ce_usage_count(best_wlc_ce, preferred_ce) == 0);
}
static NV_STATUS channel_manager_pick_ces(uvm_channel_manager_t *manager, unsigned *preferred_ce)
@@ -2967,6 +3170,7 @@ static NV_STATUS channel_manager_pick_ces(uvm_channel_manager_t *manager, unsign
pick_ces_conf_computing(manager, ces_caps->copyEngineCaps, preferred_ce);
else
pick_ces(manager, ces_caps->copyEngineCaps, preferred_ce);
out:
uvm_kvfree(ces_caps);
@@ -3000,6 +3204,8 @@ void uvm_channel_manager_set_p2p_ce(uvm_channel_manager_t *manager, uvm_gpu_t *p
UVM_ASSERT(manager->gpu != peer);
UVM_ASSERT(optimal_ce < UVM_COPY_ENGINE_COUNT_MAX);
UVM_ASSERT(manager->gpu->parent->peer_copy_mode != UVM_GPU_PEER_COPY_MODE_UNSUPPORTED);
UVM_ASSERT(peer->parent->peer_copy_mode != UVM_GPU_PEER_COPY_MODE_UNSUPPORTED);
manager->pool_to_use.gpu_to_gpu[peer_gpu_index] = channel_manager_ce_pool(manager, optimal_ce);
}
@@ -3056,14 +3262,14 @@ static void init_channel_manager_conf(uvm_channel_manager_t *manager)
manager->conf.num_gpfifo_entries = UVM_CHANNEL_NUM_GPFIFO_ENTRIES_DEFAULT;
if (manager->conf.num_gpfifo_entries != uvm_channel_num_gpfifo_entries) {
pr_info("Invalid value for uvm_channel_num_gpfifo_entries = %u, using %u instead\n",
uvm_channel_num_gpfifo_entries,
manager->conf.num_gpfifo_entries);
UVM_INFO_PRINT("Invalid value for uvm_channel_num_gpfifo_entries = %u, using %u instead\n",
uvm_channel_num_gpfifo_entries,
manager->conf.num_gpfifo_entries);
}
// 2- Allocation locations
if (uvm_conf_computing_mode_is_hcc(gpu)) {
if (g_uvm_global.conf_computing_enabled) {
UVM_ASSERT(gpu->mem_info.size > 0);
// When the Confidential Computing feature is enabled, the GPU is
@@ -3098,9 +3304,9 @@ static void init_channel_manager_conf(uvm_channel_manager_t *manager)
pushbuffer_loc_value = uvm_channel_pushbuffer_loc;
if (!is_string_valid_location(pushbuffer_loc_value)) {
pushbuffer_loc_value = UVM_CHANNEL_PUSHBUFFER_LOC_DEFAULT;
pr_info("Invalid value for uvm_channel_pushbuffer_loc = %s, using %s instead\n",
uvm_channel_pushbuffer_loc,
pushbuffer_loc_value);
UVM_INFO_PRINT("Invalid value for uvm_channel_pushbuffer_loc = %s, using %s instead\n",
uvm_channel_pushbuffer_loc,
pushbuffer_loc_value);
}
// Override the default value if requested by the user
@@ -3110,8 +3316,8 @@ static void init_channel_manager_conf(uvm_channel_manager_t *manager)
// so force the location to sys for now.
// TODO: Bug 2904133: Remove the following "if" after the bug is fixed.
if (NVCPU_IS_AARCH64) {
pr_info("uvm_channel_pushbuffer_loc = %s is not supported on AARCH64, using sys instead\n",
pushbuffer_loc_value);
UVM_INFO_PRINT("uvm_channel_pushbuffer_loc = %s is not supported on AARCH64, using sys instead\n",
pushbuffer_loc_value);
manager->conf.pushbuffer_loc = UVM_BUFFER_LOCATION_SYS;
}
else {
@@ -3123,8 +3329,9 @@ static void init_channel_manager_conf(uvm_channel_manager_t *manager)
// Only support the knobs for GPFIFO/GPPut on Volta+
if (!gpu->parent->gpfifo_in_vidmem_supported) {
if (manager->conf.gpput_loc == UVM_BUFFER_LOCATION_SYS) {
pr_info("CAUTION: allocating GPPut in sysmem is NOT supported and may crash the system, using %s instead\n",
buffer_location_to_string(UVM_BUFFER_LOCATION_DEFAULT));
UVM_INFO_PRINT("CAUTION: allocating GPPut in sysmem is NOT supported and may crash the system, using %s "
"instead\n",
buffer_location_to_string(UVM_BUFFER_LOCATION_DEFAULT));
}
manager->conf.gpfifo_loc = UVM_BUFFER_LOCATION_DEFAULT;
@@ -3136,17 +3343,17 @@ static void init_channel_manager_conf(uvm_channel_manager_t *manager)
gpfifo_loc_value = uvm_channel_gpfifo_loc;
if (!is_string_valid_location(gpfifo_loc_value)) {
gpfifo_loc_value = UVM_CHANNEL_GPFIFO_LOC_DEFAULT;
pr_info("Invalid value for uvm_channel_gpfifo_loc = %s, using %s instead\n",
uvm_channel_gpfifo_loc,
gpfifo_loc_value);
UVM_INFO_PRINT("Invalid value for uvm_channel_gpfifo_loc = %s, using %s instead\n",
uvm_channel_gpfifo_loc,
gpfifo_loc_value);
}
gpput_loc_value = uvm_channel_gpput_loc;
if (!is_string_valid_location(gpput_loc_value)) {
gpput_loc_value = UVM_CHANNEL_GPPUT_LOC_DEFAULT;
pr_info("Invalid value for uvm_channel_gpput_loc = %s, using %s instead\n",
uvm_channel_gpput_loc,
gpput_loc_value);
UVM_INFO_PRINT("Invalid value for uvm_channel_gpput_loc = %s, using %s instead\n",
uvm_channel_gpput_loc,
gpput_loc_value);
}
// On coherent platforms where the GPU does not cache sysmem but the CPU
@@ -3290,7 +3497,7 @@ static NV_STATUS setup_wlc_schedule(uvm_channel_t *wlc)
// WLC can only process one job at a time.
// Prune any initialization entries and block all but one (+1 for sentinel)
uvm_channel_update_progress(wlc);
if (!try_claim_channel(wlc, wlc->num_gpfifo_entries - 2)) {
if (!try_claim_channel(wlc, wlc->num_gpfifo_entries - 2, UVM_CHANNEL_RESERVE_NO_P2P)) {
status = NV_ERR_INVALID_STATE;
goto free_gpfifo_entries;
}
@@ -3437,7 +3644,7 @@ static NV_STATUS setup_lcic_schedule(uvm_channel_t *paired_wlc, uvm_channel_t *l
// Prune any initialization entries and
// block all gpfifo entries (-1 for sentinel)
uvm_channel_update_progress(lcic);
if (!try_claim_channel(lcic, lcic->num_gpfifo_entries - 1)) {
if (!try_claim_channel(lcic, lcic->num_gpfifo_entries - 1, UVM_CHANNEL_RESERVE_NO_P2P)) {
status = NV_ERR_INVALID_STATE;
goto free_gpfifo_entries;
}
@@ -3700,6 +3907,7 @@ static void channel_manager_destroy_pools(uvm_channel_manager_t *manager)
{
uvm_rm_mem_free(manager->gpu->conf_computing.iv_rm_mem);
manager->gpu->conf_computing.iv_rm_mem = NULL;
while (manager->num_channel_pools > 0)
channel_pool_destroy(manager->channel_pools + manager->num_channel_pools - 1);
@@ -3856,6 +4064,7 @@ const char *uvm_channel_type_to_string(uvm_channel_type_t channel_type)
const char *uvm_channel_pool_type_to_string(uvm_channel_pool_type_t channel_pool_type)
{
BUILD_BUG_ON(UVM_CHANNEL_POOL_TYPE_COUNT != 5);
switch (channel_pool_type) {
@@ -3870,17 +4079,21 @@ const char *uvm_channel_pool_type_to_string(uvm_channel_pool_type_t channel_pool
static const char *get_gpfifo_location_string(uvm_channel_t *channel)
{
// SEC2 channels override the channel manager location for GPFIFO.
if (uvm_channel_is_sec2(channel))
return buffer_location_to_string(UVM_BUFFER_LOCATION_SYS);
return buffer_location_to_string(channel->pool->manager->conf.gpfifo_loc);
}
static const char *get_gpput_location_string(uvm_channel_t *channel)
{
// SEC2 channels override the channel manager location for GPPUT.
if (uvm_channel_is_sec2(channel))
return buffer_location_to_string(UVM_BUFFER_LOCATION_SYS);
return buffer_location_to_string(channel->pool->manager->conf.gpput_loc);
}

View File

@@ -200,6 +200,7 @@ typedef struct
// num_tsgs is 1. Pre-Volta GPUs also have a single TSG object, but since HW
// does not support TSG for CE engines, a HW TSG is not created, but a TSG
// object is required to allocate channels.
//
// When Confidential Computing mode is enabled, the WLC and LCIC channel
// types require one TSG for each WLC/LCIC pair of channels. In this case,
// we do not use a TSG per channel pool, but instead a TSG per WLC/LCIC
@@ -416,6 +417,8 @@ struct uvm_channel_struct
struct list_head channel_list_node;
NvU32 pending_event_count;
} tools;
bool suspended_p2p;
};
struct uvm_channel_manager_struct
@@ -478,6 +481,12 @@ struct uvm_channel_manager_struct
} conf_computing;
};
// Index of a channel pool within the manager
static unsigned uvm_channel_pool_index_in_channel_manager(const uvm_channel_pool_t *pool)
{
return pool - pool->manager->channel_pools;
}
// Create a channel manager for the GPU
NV_STATUS uvm_channel_manager_create(uvm_gpu_t *gpu, uvm_channel_manager_t **manager_out);
@@ -532,6 +541,8 @@ NvU64 uvm_channel_get_static_pb_unprotected_sysmem_gpu_va(uvm_channel_t *channel
char* uvm_channel_get_static_pb_unprotected_sysmem_cpu(uvm_channel_t *channel);
bool uvm_channel_pool_is_p2p(uvm_channel_pool_t *pool);
static bool uvm_channel_pool_is_proxy(uvm_channel_pool_t *pool)
{
UVM_ASSERT(uvm_pool_type_is_valid(pool->pool_type));
@@ -549,6 +560,11 @@ static bool uvm_channel_pool_is_ce(uvm_channel_pool_t *pool)
return !uvm_channel_pool_is_sec2(pool);
}
static bool uvm_channel_is_p2p(uvm_channel_t *channel)
{
return uvm_channel_pool_is_p2p(channel->pool);
}
static bool uvm_channel_is_ce(uvm_channel_t *channel)
{
return uvm_channel_pool_is_ce(channel->pool);
@@ -584,14 +600,25 @@ bool uvm_channel_is_privileged(uvm_channel_t *channel);
// Destroy the channel manager
void uvm_channel_manager_destroy(uvm_channel_manager_t *channel_manager);
// Suspend p2p traffic on channels used for p2p operations.
// This is used in STO recovery sequence to quiet nvlink traffic before the
// links can be restored.
NV_STATUS uvm_channel_manager_suspend_p2p(uvm_channel_manager_t *channel_manager);
// Resume p2p traffic on channels used for p2p operations.
// This is used at the end of the STO recovery sequence to resume suspended p2p
// traffic on p2p channels.
void uvm_channel_manager_resume_p2p(uvm_channel_manager_t *channel_manager);
// Get the current status of the channel
// Returns NV_OK if the channel is in a good state and NV_ERR_RC_ERROR
// otherwise. Notably this never sets the global fatal error.
// Returns NV_OK if the channel is in a good state,
// NV_ERR_RC_ERROR otherwise.
// Notably this never sets the global fatal error.
NV_STATUS uvm_channel_get_status(uvm_channel_t *channel);
// Check for channel errors
// Checks for channel errors by calling uvm_channel_get_status(). If an error
// occurred, sets the global fatal error and prints errors.
// Checks for channel errors by calling uvm_channel_get_status().
// If a fatal error occurred, sets the global fatal error and prints errors.
NV_STATUS uvm_channel_check_errors(uvm_channel_t *channel);
// Check errors on all channels in the channel manager
@@ -625,6 +652,7 @@ static bool uvm_channel_manager_is_wlc_ready(uvm_channel_manager_t *manager)
{
return manager->conf_computing.wlc_ready;
}
// Get the GPU VA of semaphore_channel's tracking semaphore within the VA space
// associated with access_channel.
//

View File

@@ -206,9 +206,10 @@ static NV_STATUS uvm_test_rc_for_gpu(uvm_gpu_t *gpu)
uvm_for_each_pool(pool, manager) {
uvm_channel_t *channel;
// Skip LCIC channels as those can't accept any pushes
if (uvm_channel_pool_is_lcic(pool))
continue;
// Skip LCIC channels as those can't accept any pushes
if (uvm_channel_pool_is_lcic(pool))
continue;
uvm_for_each_channel_in_pool(channel, pool) {
NvU32 i;
for (i = 0; i < 512; ++i) {
@@ -1292,6 +1293,7 @@ static NV_STATUS test_write_ctrl_gpfifo_noop(uvm_va_space_t *va_space)
// after their schedule is set up
if (uvm_channel_pool_is_wlc(pool))
continue;
uvm_for_each_channel_in_pool(channel, pool) {
NvU32 i;
@@ -1331,6 +1333,7 @@ static NV_STATUS test_write_ctrl_gpfifo_and_pushes(uvm_va_space_t *va_space)
// after their schedule is set up
if (uvm_channel_pool_is_wlc(pool))
continue;
uvm_for_each_channel_in_pool(channel, pool) {
NvU32 i;
uvm_push_t push;
@@ -1473,6 +1476,7 @@ static NV_STATUS test_channel_pushbuffer_extension_base(uvm_va_space_t *va_space
// Skip LCIC channels as those can't accept any pushes
if (uvm_channel_pool_is_lcic(pool))
continue;
uvm_for_each_channel_in_pool(channel, pool) {
NvU32 i;
uvm_push_t push;

View File

@@ -57,6 +57,7 @@ enum {
// NULL.
void uvm_uuid_string(char *buffer, const NvProcessorUuid *uuid);
// Long prefix - typically for debugging and tests.
#define UVM_PRINT_FUNC_PREFIX(func, prefix, fmt, ...) \
func(prefix "%s:%u %s[pid:%d]" fmt, \
kbasename(__FILE__), \
@@ -65,10 +66,15 @@ void uvm_uuid_string(char *buffer, const NvProcessorUuid *uuid);
current->pid, \
##__VA_ARGS__)
// Short prefix - typically for information.
#define UVM_PRINT_FUNC_SHORT_PREFIX(func, prefix, fmt, ...) \
func(prefix fmt, ##__VA_ARGS__)
// No prefix - used by kernel panic messages.
#define UVM_PRINT_FUNC(func, fmt, ...) \
UVM_PRINT_FUNC_PREFIX(func, "", fmt, ##__VA_ARGS__)
// Check whether UVM_{ERR,DBG,INFO)_PRINT* should be enabled
// Check whether UVM_{ERR,DBG)_PRINT* should be enabled.
bool uvm_debug_prints_enabled(void);
// A printing helper like UVM_PRINT_FUNC_PREFIX that only prints if
@@ -80,10 +86,10 @@ bool uvm_debug_prints_enabled(void);
} \
} while (0)
#define UVM_ASSERT_PRINT(fmt, ...) \
#define UVM_ERR_PRINT_ALWAYS(fmt, ...) \
UVM_PRINT_FUNC_PREFIX(printk, KERN_ERR NVIDIA_UVM_PRETTY_PRINTING_PREFIX, " " fmt, ##__VA_ARGS__)
#define UVM_ASSERT_PRINT_RL(fmt, ...) \
#define UVM_ERR_PRINT_ALWAYS_RL(fmt, ...) \
UVM_PRINT_FUNC_PREFIX(printk_ratelimited, KERN_ERR NVIDIA_UVM_PRETTY_PRINTING_PREFIX, " " fmt, ##__VA_ARGS__)
#define UVM_ERR_PRINT(fmt, ...) \
@@ -95,13 +101,16 @@ bool uvm_debug_prints_enabled(void);
#define UVM_DBG_PRINT(fmt, ...) \
UVM_PRINT_FUNC_PREFIX_CHECK(printk, KERN_DEBUG NVIDIA_UVM_PRETTY_PRINTING_PREFIX, " " fmt, ##__VA_ARGS__)
#define UVM_DBG_PRINT_RL(fmt, ...) \
#define UVM_DBG_PRINT_RL(fmt, ...) \
UVM_PRINT_FUNC_PREFIX_CHECK(printk_ratelimited, KERN_DEBUG NVIDIA_UVM_PRETTY_PRINTING_PREFIX, " " fmt, ##__VA_ARGS__)
// UVM_INFO_PRINT prints in all modes (including in the release mode.) It is
// used for relaying driver-level information, rather than detailed debugging
// information; therefore, it does not add the "pretty long prefix".
#define UVM_INFO_PRINT(fmt, ...) \
UVM_PRINT_FUNC_PREFIX_CHECK(printk, KERN_INFO NVIDIA_UVM_PRETTY_PRINTING_PREFIX, " " fmt, ##__VA_ARGS__)
UVM_PRINT_FUNC_SHORT_PREFIX(printk, KERN_INFO NVIDIA_UVM_PRETTY_PRINTING_PREFIX, " " fmt, ##__VA_ARGS__)
#define UVM_ERR_PRINT_NV_STATUS(msg, rmStatus, ...) \
#define UVM_ERR_PRINT_NV_STATUS(msg, rmStatus, ...) \
UVM_ERR_PRINT("ERROR: %s : " msg "\n", nvstatusToString(rmStatus), ##__VA_ARGS__)
#define UVM_PANIC() UVM_PRINT_FUNC(panic, "\n")
@@ -134,13 +143,13 @@ void on_uvm_test_fail(void);
// Unlike on_uvm_test_fail it provides 'panic' coverity semantics
void on_uvm_assert(void);
#define _UVM_ASSERT_MSG(expr, cond, fmt, ...) \
do { \
if (unlikely(!(expr))) { \
UVM_ASSERT_PRINT("Assert failed, condition %s not true" fmt, cond, ##__VA_ARGS__); \
dump_stack(); \
on_uvm_assert(); \
} \
#define _UVM_ASSERT_MSG(expr, cond, fmt, ...) \
do { \
if (unlikely(!(expr))) { \
UVM_ERR_PRINT_ALWAYS("Assert failed, condition %s not true" fmt, cond, ##__VA_ARGS__); \
dump_stack(); \
on_uvm_assert(); \
} \
} while (0)
// Prevent function calls in expr and the print argument list from being
@@ -151,7 +160,8 @@ void on_uvm_assert(void);
UVM_NO_PRINT(fmt, ##__VA_ARGS__); \
} while (0)
// UVM_ASSERT and UVM_ASSERT_MSG are only enabled on non-release and Coverity builds
// UVM_ASSERT and UVM_ASSERT_MSG are only enabled on non-release and Coverity
// builds.
#if UVM_IS_DEBUG() || defined __COVERITY__
#define UVM_ASSERT_MSG(expr, fmt, ...) _UVM_ASSERT_MSG(expr, #expr, ": " fmt, ##__VA_ARGS__)
#define UVM_ASSERT(expr) _UVM_ASSERT_MSG(expr, #expr, "\n")
@@ -174,16 +184,16 @@ extern bool uvm_release_asserts_set_global_error_for_tests;
// Given these are enabled for release builds, we need to be more cautious than
// in UVM_ASSERT(). Use a ratelimited print and only dump the stack if a module
// param is enabled.
#define _UVM_ASSERT_MSG_RELEASE(expr, cond, fmt, ...) \
do { \
if (uvm_release_asserts && unlikely(!(expr))) { \
UVM_ASSERT_PRINT_RL("Assert failed, condition %s not true" fmt, cond, ##__VA_ARGS__); \
if (uvm_release_asserts_set_global_error || uvm_release_asserts_set_global_error_for_tests) \
uvm_global_set_fatal_error(NV_ERR_INVALID_STATE); \
if (uvm_release_asserts_dump_stack) \
dump_stack(); \
on_uvm_assert(); \
} \
#define _UVM_ASSERT_MSG_RELEASE(expr, cond, fmt, ...) \
do { \
if (uvm_release_asserts && unlikely(!(expr))) { \
UVM_ERR_PRINT_ALWAYS_RL("Assert failed, condition %s not true" fmt, cond, ##__VA_ARGS__); \
if (uvm_release_asserts_set_global_error || uvm_release_asserts_set_global_error_for_tests) \
uvm_global_set_fatal_error(NV_ERR_INVALID_STATE); \
if (uvm_release_asserts_dump_stack) \
dump_stack(); \
on_uvm_assert(); \
} \
} while (0)
#define UVM_ASSERT_MSG_RELEASE(expr, fmt, ...) _UVM_ASSERT_MSG_RELEASE(expr, #expr, ": " fmt, ##__VA_ARGS__)
@@ -240,15 +250,6 @@ static inline NvBool uvm_ranges_overlap(NvU64 a_start, NvU64 a_end, NvU64 b_star
return a_end >= b_start && b_end >= a_start;
}
static int debug_mode(void)
{
#ifdef DEBUG
return 1;
#else
return 0;
#endif
}
static inline void kmem_cache_destroy_safe(struct kmem_cache **ppCache)
{
if (ppCache)
@@ -326,22 +327,6 @@ typedef struct
NvHandle user_object;
} uvm_rm_user_object_t;
typedef enum
{
UVM_FD_UNINITIALIZED,
UVM_FD_INITIALIZING,
UVM_FD_VA_SPACE,
UVM_FD_MM,
UVM_FD_COUNT
} uvm_fd_type_t;
// This should be large enough to fit the valid values from uvm_fd_type_t above.
// Note we can't use order_base_2(UVM_FD_COUNT) to define this because our code
// coverage tool fails due when the preprocessor expands that to a huge mess of
// ternary operators.
#define UVM_FD_TYPE_BITS 2
#define UVM_FD_TYPE_MASK ((1UL << UVM_FD_TYPE_BITS) - 1)
// Macro used to compare two values for types that support less than operator.
// It returns -1 if a < b, 1 if a > b and 0 if a == 0
#define UVM_CMP_DEFAULT(a,b) \
@@ -364,37 +349,13 @@ typedef enum
// file. A NULL input returns false.
bool uvm_file_is_nvidia_uvm(struct file *filp);
// Returns the type of data filp->private_data contains to and if ptr_val !=
// NULL returns the value of the pointer.
uvm_fd_type_t uvm_fd_type(struct file *filp, void **ptr_val);
// Returns the pointer stored in filp->private_data if the type
// matches, otherwise returns NULL.
void *uvm_fd_get_type(struct file *filp, uvm_fd_type_t type);
// Reads the first word in the supplied struct page.
static inline void uvm_touch_page(struct page *page)
{
char *mapping;
UVM_ASSERT(page);
mapping = (char *) kmap(page);
(void)READ_ONCE(*mapping);
kunmap(page);
}
// Like uvm_file_is_nvidia_uvm(), but further requires that the input file
// represent a UVM VA space (has fd type UVM_FD_VA_SPACE).
bool uvm_file_is_nvidia_uvm_va_space(struct file *filp);
// Return true if the VMA is one used by UVM managed allocations.
bool uvm_vma_is_managed(struct vm_area_struct *vma);
static bool uvm_platform_uses_canonical_form_address(void)
{
if (NVCPU_IS_PPC64LE)
return false;
return true;
}
// Similar to the GPU MMU HAL num_va_bits(), it returns the CPU's num_va_bits().
static NvU32 uvm_cpu_num_va_bits(void)
{
@@ -410,7 +371,7 @@ static void uvm_get_unaddressable_range(NvU32 num_va_bits, NvU64 *first, NvU64 *
// Maxwell GPUs (num_va_bits == 40b) do not support canonical form address
// even when plugged into platforms using it.
if (uvm_platform_uses_canonical_form_address() && num_va_bits > 40) {
if (num_va_bits > 40) {
*first = 1ULL << (num_va_bits - 1);
*outer = (NvU64)((NvS64)(1ULL << 63) >> (64 - num_va_bits));
}

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2021-2023 NVIDIA Corporation
Copyright (c) 2021-2025 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -65,32 +65,13 @@ static ulong uvm_conf_computing_channel_iv_rotation_limit = UVM_CONF_COMPUTING_I
module_param(uvm_conf_computing_channel_iv_rotation_limit, ulong, S_IRUGO);
static UvmGpuConfComputeMode uvm_conf_computing_get_mode(const uvm_parent_gpu_t *parent)
{
return parent->rm_info.gpuConfComputeCaps.mode;
}
bool uvm_conf_computing_mode_is_hcc(const uvm_gpu_t *gpu)
{
return uvm_conf_computing_get_mode(gpu->parent) == UVM_GPU_CONF_COMPUTE_MODE_HCC;
}
void uvm_conf_computing_check_parent_gpu(const uvm_parent_gpu_t *parent)
{
uvm_parent_gpu_t *other_parent;
UvmGpuConfComputeMode parent_mode = uvm_conf_computing_get_mode(parent);
uvm_assert_mutex_locked(&g_uvm_global.global_lock);
// The Confidential Computing state of the GPU should match that of the
// system.
UVM_ASSERT((parent_mode != UVM_GPU_CONF_COMPUTE_MODE_NONE) == g_uvm_global.conf_computing_enabled);
// All GPUs derive Confidential Computing status from their parent. By
// current policy all parent GPUs have identical Confidential Computing
// status.
for_each_parent_gpu(other_parent)
UVM_ASSERT(parent_mode == uvm_conf_computing_get_mode(other_parent));
// Confidential Computing enablement on the system should match enablement
// on the GPU.
UVM_ASSERT(parent->rm_info.gpuConfComputeCaps.bConfComputingEnabled == g_uvm_global.conf_computing_enabled);
}
static void dma_buffer_destroy_locked(uvm_conf_computing_dma_buffer_pool_t *dma_buffer_pool,
@@ -343,9 +324,6 @@ static NV_STATUS dummy_iv_mem_init(uvm_gpu_t *gpu)
{
NV_STATUS status;
if (!uvm_conf_computing_mode_is_hcc(gpu))
return NV_OK;
status = uvm_mem_alloc_sysmem_dma(sizeof(UvmCslIv), gpu, NULL, &gpu->conf_computing.iv_mem);
if (status != NV_OK)
return status;
@@ -554,7 +532,7 @@ NV_STATUS uvm_conf_computing_fault_decrypt(uvm_parent_gpu_t *parent_gpu,
{
NV_STATUS status;
NvU32 fault_entry_size = parent_gpu->fault_buffer_hal->entry_size(parent_gpu);
UvmCslContext *csl_context = &parent_gpu->fault_buffer_info.rm_info.replayable.cslCtx;
UvmCslContext *csl_context = &parent_gpu->fault_buffer.rm_info.replayable.cslCtx;
// There is no dedicated lock for the CSL context associated with replayable
// faults. The mutual exclusion required by the RM CSL API is enforced by
@@ -593,7 +571,7 @@ void uvm_conf_computing_fault_increment_decrypt_iv(uvm_parent_gpu_t *parent_gpu)
{
NV_STATUS status;
NvU32 fault_entry_size = parent_gpu->fault_buffer_hal->entry_size(parent_gpu);
UvmCslContext *csl_context = &parent_gpu->fault_buffer_info.rm_info.replayable.cslCtx;
UvmCslContext *csl_context = &parent_gpu->fault_buffer.rm_info.replayable.cslCtx;
// See comment in uvm_conf_computing_fault_decrypt
UVM_ASSERT(uvm_sem_is_locked(&parent_gpu->isr.replayable_faults.service_lock));
@@ -730,7 +708,12 @@ void uvm_conf_computing_disable_key_rotation(uvm_gpu_t *gpu)
bool uvm_conf_computing_is_key_rotation_enabled(uvm_gpu_t *gpu)
{
return gpu->channel_manager->conf_computing.key_rotation_enabled;
UVM_ASSERT(gpu);
// If the channel_manager is not set, we're in channel manager destroy
// path after the pointer was NULL-ed. Chances are that other key rotation
// infrastructure is not available either. Disallow the key rotation.
return gpu->channel_manager && gpu->channel_manager->conf_computing.key_rotation_enabled;
}
bool uvm_conf_computing_is_key_rotation_enabled_in_pool(uvm_channel_pool_t *pool)

Some files were not shown because too many files have changed in this diff Show More