mirror of
https://github.com/NVIDIA/open-gpu-kernel-modules.git
synced 2026-01-30 13:09:47 +00:00
Compare commits
10 Commits
570.144
...
535.216.01
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
60d85c464b | ||
|
|
c588c3877f | ||
|
|
4459285b60 | ||
|
|
f4bdce9a0a | ||
|
|
c042c7903d | ||
|
|
044f70bbb8 | ||
|
|
6d33efe502 | ||
|
|
ee55481a49 | ||
|
|
7165299dee | ||
|
|
e573018659 |
8
.github/ISSUE_TEMPLATE/20_build_bug.yml
vendored
8
.github/ISSUE_TEMPLATE/20_build_bug.yml
vendored
@@ -32,14 +32,6 @@ body:
|
|||||||
description: "Which kernel are you running? (output of `uname -a`, say if you built it yourself)."
|
description: "Which kernel are you running? (output of `uname -a`, say if you built it yourself)."
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: checkboxes
|
|
||||||
id: sw_host_kernel_stable
|
|
||||||
attributes:
|
|
||||||
label: "Please confirm you are running a stable release kernel (e.g. not a -rc). We do not accept bug reports for unreleased kernels."
|
|
||||||
options:
|
|
||||||
- label: "I am running on a stable kernel release."
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: bug_description
|
id: bug_description
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
@@ -57,20 +57,6 @@ ifeq ($(NV_UNDEF_BEHAVIOR_SANITIZER),1)
|
|||||||
UBSAN_SANITIZE := y
|
UBSAN_SANITIZE := y
|
||||||
endif
|
endif
|
||||||
|
|
||||||
#
|
|
||||||
# Command to create a symbolic link, explicitly resolving the symlink target
|
|
||||||
# to an absolute path to abstract away the difference between Linux < 6.13,
|
|
||||||
# where the CWD is the Linux kernel source tree for Kbuild extmod builds, and
|
|
||||||
# Linux >= 6.13, where the CWD is the external module source tree.
|
|
||||||
#
|
|
||||||
# This is used to create the nv*-kernel.o -> nv*-kernel.o_binary symlinks for
|
|
||||||
# kernel modules which use precompiled binary object files.
|
|
||||||
#
|
|
||||||
|
|
||||||
quiet_cmd_symlink = SYMLINK $@
|
|
||||||
cmd_symlink = ln -sf $(abspath $<) $@
|
|
||||||
|
|
||||||
|
|
||||||
$(foreach _module, $(NV_KERNEL_MODULES), \
|
$(foreach _module, $(NV_KERNEL_MODULES), \
|
||||||
$(eval include $(src)/$(_module)/$(_module).Kbuild))
|
$(eval include $(src)/$(_module)/$(_module).Kbuild))
|
||||||
|
|
||||||
@@ -84,26 +70,14 @@ $(foreach _module, $(NV_KERNEL_MODULES), \
|
|||||||
|
|
||||||
EXTRA_CFLAGS += -I$(src)/common/inc
|
EXTRA_CFLAGS += -I$(src)/common/inc
|
||||||
EXTRA_CFLAGS += -I$(src)
|
EXTRA_CFLAGS += -I$(src)
|
||||||
EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args
|
EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-error -Wno-format-extra-args
|
||||||
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
|
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
|
||||||
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"570.144\"
|
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"535.216.01\"
|
||||||
|
|
||||||
ifneq ($(SYSSRCHOST1X),)
|
ifneq ($(SYSSRCHOST1X),)
|
||||||
EXTRA_CFLAGS += -I$(SYSSRCHOST1X)
|
EXTRA_CFLAGS += -I$(SYSSRCHOST1X)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# Some Android kernels prohibit driver use of filesystem functions like
|
|
||||||
# filp_open() and kernel_read(). Disable the NV_FILESYSTEM_ACCESS_AVAILABLE
|
|
||||||
# functionality that uses those functions when building for Android.
|
|
||||||
|
|
||||||
PLATFORM_IS_ANDROID ?= 0
|
|
||||||
|
|
||||||
ifeq ($(PLATFORM_IS_ANDROID),1)
|
|
||||||
EXTRA_CFLAGS += -DNV_FILESYSTEM_ACCESS_AVAILABLE=0
|
|
||||||
else
|
|
||||||
EXTRA_CFLAGS += -DNV_FILESYSTEM_ACCESS_AVAILABLE=1
|
|
||||||
endif
|
|
||||||
|
|
||||||
EXTRA_CFLAGS += -Wno-unused-function
|
EXTRA_CFLAGS += -Wno-unused-function
|
||||||
|
|
||||||
ifneq ($(NV_BUILD_TYPE),debug)
|
ifneq ($(NV_BUILD_TYPE),debug)
|
||||||
@@ -118,6 +92,7 @@ endif
|
|||||||
|
|
||||||
ifeq ($(NV_BUILD_TYPE),debug)
|
ifeq ($(NV_BUILD_TYPE),debug)
|
||||||
EXTRA_CFLAGS += -g
|
EXTRA_CFLAGS += -g
|
||||||
|
EXTRA_CFLAGS += $(call cc-option,-gsplit-dwarf,)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
EXTRA_CFLAGS += -ffreestanding
|
EXTRA_CFLAGS += -ffreestanding
|
||||||
@@ -132,7 +107,7 @@ ifeq ($(ARCH),x86_64)
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
ifeq ($(ARCH),powerpc)
|
ifeq ($(ARCH),powerpc)
|
||||||
EXTRA_CFLAGS += -mlittle-endian -mno-strict-align
|
EXTRA_CFLAGS += -mlittle-endian -mno-strict-align -mno-altivec
|
||||||
endif
|
endif
|
||||||
|
|
||||||
EXTRA_CFLAGS += -DNV_UVM_ENABLE
|
EXTRA_CFLAGS += -DNV_UVM_ENABLE
|
||||||
@@ -152,13 +127,6 @@ ifdef VGX_FORCE_VFIO_PCI_CORE
|
|||||||
EXTRA_CFLAGS += -DNV_VGPU_FORCE_VFIO_PCI_CORE
|
EXTRA_CFLAGS += -DNV_VGPU_FORCE_VFIO_PCI_CORE
|
||||||
endif
|
endif
|
||||||
|
|
||||||
WARNINGS_AS_ERRORS ?=
|
|
||||||
ifeq ($(WARNINGS_AS_ERRORS),1)
|
|
||||||
ccflags-y += -Werror
|
|
||||||
else
|
|
||||||
ccflags-y += -Wno-error
|
|
||||||
endif
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# The conftest.sh script tests various aspects of the target kernel.
|
# The conftest.sh script tests various aspects of the target kernel.
|
||||||
# The per-module Kbuild files included above should:
|
# The per-module Kbuild files included above should:
|
||||||
@@ -186,8 +154,6 @@ NV_CFLAGS_FROM_CONFTEST := $(shell $(NV_CONFTEST_CMD) build_cflags)
|
|||||||
NV_CONFTEST_CFLAGS = $(NV_CFLAGS_FROM_CONFTEST) $(EXTRA_CFLAGS) -fno-pie
|
NV_CONFTEST_CFLAGS = $(NV_CFLAGS_FROM_CONFTEST) $(EXTRA_CFLAGS) -fno-pie
|
||||||
NV_CONFTEST_CFLAGS += $(call cc-disable-warning,pointer-sign)
|
NV_CONFTEST_CFLAGS += $(call cc-disable-warning,pointer-sign)
|
||||||
NV_CONFTEST_CFLAGS += $(call cc-option,-fshort-wchar,)
|
NV_CONFTEST_CFLAGS += $(call cc-option,-fshort-wchar,)
|
||||||
NV_CONFTEST_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types,)
|
|
||||||
NV_CONFTEST_CFLAGS += -Wno-error
|
|
||||||
|
|
||||||
NV_CONFTEST_COMPILE_TEST_HEADERS := $(obj)/conftest/macros.h
|
NV_CONFTEST_COMPILE_TEST_HEADERS := $(obj)/conftest/macros.h
|
||||||
NV_CONFTEST_COMPILE_TEST_HEADERS += $(obj)/conftest/functions.h
|
NV_CONFTEST_COMPILE_TEST_HEADERS += $(obj)/conftest/functions.h
|
||||||
@@ -247,7 +213,102 @@ $(obj)/conftest/patches.h: $(NV_CONFTEST_SCRIPT)
|
|||||||
@mkdir -p $(obj)/conftest
|
@mkdir -p $(obj)/conftest
|
||||||
@$(NV_CONFTEST_CMD) patch_check > $@
|
@$(NV_CONFTEST_CMD) patch_check > $@
|
||||||
|
|
||||||
include $(src)/header-presence-tests.mk
|
|
||||||
|
# Each of these headers is checked for presence with a test #include; a
|
||||||
|
# corresponding #define will be generated in conftest/headers.h.
|
||||||
|
NV_HEADER_PRESENCE_TESTS = \
|
||||||
|
asm/system.h \
|
||||||
|
drm/drmP.h \
|
||||||
|
drm/drm_auth.h \
|
||||||
|
drm/drm_gem.h \
|
||||||
|
drm/drm_crtc.h \
|
||||||
|
drm/drm_color_mgmt.h \
|
||||||
|
drm/drm_atomic.h \
|
||||||
|
drm/drm_atomic_helper.h \
|
||||||
|
drm/drm_atomic_state_helper.h \
|
||||||
|
drm/drm_encoder.h \
|
||||||
|
drm/drm_atomic_uapi.h \
|
||||||
|
drm/drm_drv.h \
|
||||||
|
drm/drm_framebuffer.h \
|
||||||
|
drm/drm_connector.h \
|
||||||
|
drm/drm_probe_helper.h \
|
||||||
|
drm/drm_blend.h \
|
||||||
|
drm/drm_fourcc.h \
|
||||||
|
drm/drm_prime.h \
|
||||||
|
drm/drm_plane.h \
|
||||||
|
drm/drm_vblank.h \
|
||||||
|
drm/drm_file.h \
|
||||||
|
drm/drm_ioctl.h \
|
||||||
|
drm/drm_device.h \
|
||||||
|
drm/drm_mode_config.h \
|
||||||
|
drm/drm_modeset_lock.h \
|
||||||
|
dt-bindings/interconnect/tegra_icc_id.h \
|
||||||
|
generated/autoconf.h \
|
||||||
|
generated/compile.h \
|
||||||
|
generated/utsrelease.h \
|
||||||
|
linux/efi.h \
|
||||||
|
linux/kconfig.h \
|
||||||
|
linux/platform/tegra/mc_utils.h \
|
||||||
|
linux/printk.h \
|
||||||
|
linux/ratelimit.h \
|
||||||
|
linux/prio_tree.h \
|
||||||
|
linux/log2.h \
|
||||||
|
linux/of.h \
|
||||||
|
linux/bug.h \
|
||||||
|
linux/sched.h \
|
||||||
|
linux/sched/mm.h \
|
||||||
|
linux/sched/signal.h \
|
||||||
|
linux/sched/task.h \
|
||||||
|
linux/sched/task_stack.h \
|
||||||
|
xen/ioemu.h \
|
||||||
|
linux/fence.h \
|
||||||
|
linux/dma-resv.h \
|
||||||
|
soc/tegra/chip-id.h \
|
||||||
|
soc/tegra/fuse.h \
|
||||||
|
soc/tegra/tegra_bpmp.h \
|
||||||
|
video/nv_internal.h \
|
||||||
|
linux/platform/tegra/dce/dce-client-ipc.h \
|
||||||
|
linux/nvhost.h \
|
||||||
|
linux/nvhost_t194.h \
|
||||||
|
linux/host1x-next.h \
|
||||||
|
asm/book3s/64/hash-64k.h \
|
||||||
|
asm/set_memory.h \
|
||||||
|
asm/prom.h \
|
||||||
|
asm/powernv.h \
|
||||||
|
linux/atomic.h \
|
||||||
|
asm/barrier.h \
|
||||||
|
asm/opal-api.h \
|
||||||
|
sound/hdaudio.h \
|
||||||
|
asm/pgtable_types.h \
|
||||||
|
asm/page.h \
|
||||||
|
linux/stringhash.h \
|
||||||
|
linux/dma-map-ops.h \
|
||||||
|
rdma/peer_mem.h \
|
||||||
|
sound/hda_codec.h \
|
||||||
|
linux/dma-buf.h \
|
||||||
|
linux/time.h \
|
||||||
|
linux/platform_device.h \
|
||||||
|
linux/mutex.h \
|
||||||
|
linux/reset.h \
|
||||||
|
linux/of_platform.h \
|
||||||
|
linux/of_device.h \
|
||||||
|
linux/of_gpio.h \
|
||||||
|
linux/gpio.h \
|
||||||
|
linux/gpio/consumer.h \
|
||||||
|
linux/interconnect.h \
|
||||||
|
linux/pm_runtime.h \
|
||||||
|
linux/clk.h \
|
||||||
|
linux/clk-provider.h \
|
||||||
|
linux/ioasid.h \
|
||||||
|
linux/stdarg.h \
|
||||||
|
linux/iosys-map.h \
|
||||||
|
asm/coco.h \
|
||||||
|
linux/vfio_pci_core.h \
|
||||||
|
linux/mdev.h \
|
||||||
|
soc/tegra/bpmp-abi.h \
|
||||||
|
soc/tegra/bpmp.h \
|
||||||
|
linux/cc_platform.h \
|
||||||
|
asm/cpufeature.h
|
||||||
|
|
||||||
# Filename to store the define for the header in $(1); this is only consumed by
|
# Filename to store the define for the header in $(1); this is only consumed by
|
||||||
# the rule below that concatenates all of these together.
|
# the rule below that concatenates all of these together.
|
||||||
|
|||||||
@@ -52,22 +52,6 @@ else
|
|||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# If CC hasn't been set explicitly, check the value of CONFIG_CC_VERSION_TEXT.
|
|
||||||
# Look for the compiler specified there, and use it by default, if found.
|
|
||||||
ifeq ($(origin CC),default)
|
|
||||||
cc_version_text=$(firstword $(shell . $(KERNEL_OUTPUT)/.config; \
|
|
||||||
echo "$$CONFIG_CC_VERSION_TEXT"))
|
|
||||||
|
|
||||||
ifneq ($(cc_version_text),)
|
|
||||||
ifeq ($(shell command -v $(cc_version_text)),)
|
|
||||||
$(warning WARNING: Unable to locate the compiler $(cc_version_text) \
|
|
||||||
from CONFIG_CC_VERSION_TEXT in the kernel configuration.)
|
|
||||||
else
|
|
||||||
CC=$(cc_version_text)
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
CC ?= cc
|
CC ?= cc
|
||||||
LD ?= ld
|
LD ?= ld
|
||||||
OBJDUMP ?= objdump
|
OBJDUMP ?= objdump
|
||||||
@@ -77,25 +61,12 @@ else
|
|||||||
-e 's/armv[0-7]\w\+/arm/' \
|
-e 's/armv[0-7]\w\+/arm/' \
|
||||||
-e 's/aarch64/arm64/' \
|
-e 's/aarch64/arm64/' \
|
||||||
-e 's/ppc64le/powerpc/' \
|
-e 's/ppc64le/powerpc/' \
|
||||||
-e 's/riscv64/riscv/' \
|
|
||||||
)
|
)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
KERNEL_ARCH = $(ARCH)
|
|
||||||
|
|
||||||
ifneq ($(filter $(ARCH),i386 x86_64),)
|
|
||||||
KERNEL_ARCH = x86
|
|
||||||
else
|
|
||||||
ifeq ($(filter $(ARCH),arm64 powerpc),)
|
|
||||||
$(error Unsupported architecture $(ARCH))
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
NV_KERNEL_MODULES ?= $(wildcard nvidia nvidia-uvm nvidia-vgpu-vfio nvidia-modeset nvidia-drm nvidia-peermem)
|
NV_KERNEL_MODULES ?= $(wildcard nvidia nvidia-uvm nvidia-vgpu-vfio nvidia-modeset nvidia-drm nvidia-peermem)
|
||||||
NV_KERNEL_MODULES := $(filter-out $(NV_EXCLUDE_KERNEL_MODULES), \
|
NV_KERNEL_MODULES := $(filter-out $(NV_EXCLUDE_KERNEL_MODULES), \
|
||||||
$(NV_KERNEL_MODULES))
|
$(NV_KERNEL_MODULES))
|
||||||
INSTALL_MOD_DIR ?= kernel/drivers/video
|
|
||||||
|
|
||||||
NV_VERBOSE ?=
|
NV_VERBOSE ?=
|
||||||
SPECTRE_V2_RETPOLINE ?= 0
|
SPECTRE_V2_RETPOLINE ?= 0
|
||||||
|
|
||||||
@@ -107,7 +78,7 @@ else
|
|||||||
KBUILD_PARAMS += NV_KERNEL_SOURCES=$(KERNEL_SOURCES)
|
KBUILD_PARAMS += NV_KERNEL_SOURCES=$(KERNEL_SOURCES)
|
||||||
KBUILD_PARAMS += NV_KERNEL_OUTPUT=$(KERNEL_OUTPUT)
|
KBUILD_PARAMS += NV_KERNEL_OUTPUT=$(KERNEL_OUTPUT)
|
||||||
KBUILD_PARAMS += NV_KERNEL_MODULES="$(NV_KERNEL_MODULES)"
|
KBUILD_PARAMS += NV_KERNEL_MODULES="$(NV_KERNEL_MODULES)"
|
||||||
KBUILD_PARAMS += INSTALL_MOD_DIR="$(INSTALL_MOD_DIR)"
|
KBUILD_PARAMS += INSTALL_MOD_DIR=kernel/drivers/video
|
||||||
KBUILD_PARAMS += NV_SPECTRE_V2=$(SPECTRE_V2_RETPOLINE)
|
KBUILD_PARAMS += NV_SPECTRE_V2=$(SPECTRE_V2_RETPOLINE)
|
||||||
|
|
||||||
.PHONY: modules module clean clean_conftest modules_install
|
.PHONY: modules module clean clean_conftest modules_install
|
||||||
@@ -132,9 +103,8 @@ else
|
|||||||
# module symbols on which the Linux kernel's module resolution is dependent
|
# module symbols on which the Linux kernel's module resolution is dependent
|
||||||
# and hence must be used whenever present.
|
# and hence must be used whenever present.
|
||||||
|
|
||||||
LD_SCRIPT ?= $(KERNEL_SOURCES)/scripts/module-common.lds \
|
LD_SCRIPT ?= $(KERNEL_SOURCES)/scripts/module-common.lds \
|
||||||
$(KERNEL_SOURCES)/arch/$(KERNEL_ARCH)/kernel/module.lds \
|
$(KERNEL_SOURCES)/arch/$(ARCH)/kernel/module.lds \
|
||||||
$(KERNEL_OUTPUT)/arch/$(KERNEL_ARCH)/module.lds \
|
|
||||||
$(KERNEL_OUTPUT)/scripts/module.lds
|
$(KERNEL_OUTPUT)/scripts/module.lds
|
||||||
NV_MODULE_COMMON_SCRIPTS := $(foreach s, $(wildcard $(LD_SCRIPT)), -T $(s))
|
NV_MODULE_COMMON_SCRIPTS := $(foreach s, $(wildcard $(LD_SCRIPT)), -T $(s))
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -32,10 +32,7 @@
|
|||||||
typedef enum
|
typedef enum
|
||||||
{
|
{
|
||||||
NV_FIRMWARE_TYPE_GSP,
|
NV_FIRMWARE_TYPE_GSP,
|
||||||
NV_FIRMWARE_TYPE_GSP_LOG,
|
NV_FIRMWARE_TYPE_GSP_LOG
|
||||||
#if defined(NV_VMWARE)
|
|
||||||
NV_FIRMWARE_TYPE_BINDATA
|
|
||||||
#endif
|
|
||||||
} nv_firmware_type_t;
|
} nv_firmware_type_t;
|
||||||
|
|
||||||
typedef enum
|
typedef enum
|
||||||
@@ -47,8 +44,6 @@ typedef enum
|
|||||||
NV_FIRMWARE_CHIP_FAMILY_GA10X = 4,
|
NV_FIRMWARE_CHIP_FAMILY_GA10X = 4,
|
||||||
NV_FIRMWARE_CHIP_FAMILY_AD10X = 5,
|
NV_FIRMWARE_CHIP_FAMILY_AD10X = 5,
|
||||||
NV_FIRMWARE_CHIP_FAMILY_GH100 = 6,
|
NV_FIRMWARE_CHIP_FAMILY_GH100 = 6,
|
||||||
NV_FIRMWARE_CHIP_FAMILY_GB10X = 8,
|
|
||||||
NV_FIRMWARE_CHIP_FAMILY_GB20X = 9,
|
|
||||||
NV_FIRMWARE_CHIP_FAMILY_END,
|
NV_FIRMWARE_CHIP_FAMILY_END,
|
||||||
} nv_firmware_chip_family_t;
|
} nv_firmware_chip_family_t;
|
||||||
|
|
||||||
@@ -57,8 +52,6 @@ static inline const char *nv_firmware_chip_family_to_string(
|
|||||||
)
|
)
|
||||||
{
|
{
|
||||||
switch (fw_chip_family) {
|
switch (fw_chip_family) {
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_GB10X: return "gb10x";
|
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_GB20X: return "gb20x";
|
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_GH100: return "gh100";
|
case NV_FIRMWARE_CHIP_FAMILY_GH100: return "gh100";
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_AD10X: return "ad10x";
|
case NV_FIRMWARE_CHIP_FAMILY_AD10X: return "ad10x";
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_GA10X: return "ga10x";
|
case NV_FIRMWARE_CHIP_FAMILY_GA10X: return "ga10x";
|
||||||
@@ -73,13 +66,13 @@ static inline const char *nv_firmware_chip_family_to_string(
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// The includer may optionally define
|
// The includer (presumably nv.c) may optionally define
|
||||||
// NV_FIRMWARE_FOR_NAME(name)
|
// NV_FIRMWARE_PATH_FOR_FILENAME(filename)
|
||||||
// to return a platform-defined string for a given a gsp_* or gsp_log_* name.
|
// to return a string "path" given a gsp_*.bin or gsp_log_*.bin filename.
|
||||||
//
|
//
|
||||||
// The function nv_firmware_for_chip_family will then be available.
|
// The function nv_firmware_path will then be available.
|
||||||
#if defined(NV_FIRMWARE_FOR_NAME)
|
#if defined(NV_FIRMWARE_PATH_FOR_FILENAME)
|
||||||
static inline const char *nv_firmware_for_chip_family(
|
static inline const char *nv_firmware_path(
|
||||||
nv_firmware_type_t fw_type,
|
nv_firmware_type_t fw_type,
|
||||||
nv_firmware_chip_family_t fw_chip_family
|
nv_firmware_chip_family_t fw_chip_family
|
||||||
)
|
)
|
||||||
@@ -88,17 +81,15 @@ static inline const char *nv_firmware_for_chip_family(
|
|||||||
{
|
{
|
||||||
switch (fw_chip_family)
|
switch (fw_chip_family)
|
||||||
{
|
{
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_GB10X: // fall through
|
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_GB20X: // fall through
|
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
|
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
|
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_GA10X:
|
case NV_FIRMWARE_CHIP_FAMILY_GA10X:
|
||||||
return NV_FIRMWARE_FOR_NAME("gsp_ga10x");
|
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_ga10x.bin");
|
||||||
|
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_GA100: // fall through
|
case NV_FIRMWARE_CHIP_FAMILY_GA100: // fall through
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_TU11X: // fall through
|
case NV_FIRMWARE_CHIP_FAMILY_TU11X: // fall through
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_TU10X:
|
case NV_FIRMWARE_CHIP_FAMILY_TU10X:
|
||||||
return NV_FIRMWARE_FOR_NAME("gsp_tu10x");
|
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_tu10x.bin");
|
||||||
|
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_END: // fall through
|
case NV_FIRMWARE_CHIP_FAMILY_END: // fall through
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_NULL:
|
case NV_FIRMWARE_CHIP_FAMILY_NULL:
|
||||||
@@ -109,40 +100,33 @@ static inline const char *nv_firmware_for_chip_family(
|
|||||||
{
|
{
|
||||||
switch (fw_chip_family)
|
switch (fw_chip_family)
|
||||||
{
|
{
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_GB10X: // fall through
|
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_GB20X: // fall through
|
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
|
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
|
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_GA10X:
|
case NV_FIRMWARE_CHIP_FAMILY_GA10X:
|
||||||
return NV_FIRMWARE_FOR_NAME("gsp_log_ga10x");
|
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_log_ga10x.bin");
|
||||||
|
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_GA100: // fall through
|
case NV_FIRMWARE_CHIP_FAMILY_GA100: // fall through
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_TU11X: // fall through
|
case NV_FIRMWARE_CHIP_FAMILY_TU11X: // fall through
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_TU10X:
|
case NV_FIRMWARE_CHIP_FAMILY_TU10X:
|
||||||
return NV_FIRMWARE_FOR_NAME("gsp_log_tu10x");
|
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_log_tu10x.bin");
|
||||||
|
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_END: // fall through
|
case NV_FIRMWARE_CHIP_FAMILY_END: // fall through
|
||||||
case NV_FIRMWARE_CHIP_FAMILY_NULL:
|
case NV_FIRMWARE_CHIP_FAMILY_NULL:
|
||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#if defined(NV_VMWARE)
|
|
||||||
else if (fw_type == NV_FIRMWARE_TYPE_BINDATA)
|
|
||||||
{
|
|
||||||
return NV_FIRMWARE_FOR_NAME("bindata_image");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
#endif // defined(NV_FIRMWARE_FOR_NAME)
|
#endif // defined(NV_FIRMWARE_PATH_FOR_FILENAME)
|
||||||
|
|
||||||
// The includer may optionally define
|
// The includer (presumably nv.c) may optionally define
|
||||||
// NV_FIRMWARE_DECLARE_GSP(name)
|
// NV_FIRMWARE_DECLARE_GSP_FILENAME(filename)
|
||||||
// which will then be invoked (at the top-level) for each
|
// which will then be invoked (at the top-level) for each
|
||||||
// gsp_* (but not gsp_log_*)
|
// gsp_*.bin (but not gsp_log_*.bin)
|
||||||
#if defined(NV_FIRMWARE_DECLARE_GSP)
|
#if defined(NV_FIRMWARE_DECLARE_GSP_FILENAME)
|
||||||
NV_FIRMWARE_DECLARE_GSP("gsp_ga10x")
|
NV_FIRMWARE_DECLARE_GSP_FILENAME("gsp_ga10x.bin")
|
||||||
NV_FIRMWARE_DECLARE_GSP("gsp_tu10x")
|
NV_FIRMWARE_DECLARE_GSP_FILENAME("gsp_tu10x.bin")
|
||||||
#endif // defined(NV_FIRMWARE_DECLARE_GSP)
|
#endif // defined(NV_FIRMWARE_DECLARE_GSP_FILENAME)
|
||||||
|
|
||||||
#endif // NV_FIRMWARE_DECLARE_GSP
|
#endif // NV_FIRMWARE_DECLARE_GSP_FILENAME
|
||||||
|
|||||||
@@ -25,12 +25,14 @@
|
|||||||
#ifndef NV_IOCTL_NUMA_H
|
#ifndef NV_IOCTL_NUMA_H
|
||||||
#define NV_IOCTL_NUMA_H
|
#define NV_IOCTL_NUMA_H
|
||||||
|
|
||||||
|
#if defined(NV_LINUX)
|
||||||
|
|
||||||
#include <nv-ioctl-numbers.h>
|
#include <nv-ioctl-numbers.h>
|
||||||
|
|
||||||
#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX)
|
#if defined(NV_KERNEL_INTERFACE_LAYER)
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#elif defined (NV_KERNEL_INTERFACE_LAYER) && defined(NV_BSD)
|
|
||||||
#include <sys/stdint.h>
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
@@ -79,3 +81,5 @@ typedef struct nv_ioctl_set_numa_status
|
|||||||
#define NV_IOCTL_NUMA_STATUS_OFFLINE_FAILED 6
|
#define NV_IOCTL_NUMA_STATUS_OFFLINE_FAILED 6
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -39,6 +39,5 @@
|
|||||||
#define NV_ESC_QUERY_DEVICE_INTR (NV_IOCTL_BASE + 13)
|
#define NV_ESC_QUERY_DEVICE_INTR (NV_IOCTL_BASE + 13)
|
||||||
#define NV_ESC_SYS_PARAMS (NV_IOCTL_BASE + 14)
|
#define NV_ESC_SYS_PARAMS (NV_IOCTL_BASE + 14)
|
||||||
#define NV_ESC_EXPORT_TO_DMABUF_FD (NV_IOCTL_BASE + 17)
|
#define NV_ESC_EXPORT_TO_DMABUF_FD (NV_IOCTL_BASE + 17)
|
||||||
#define NV_ESC_WAIT_OPEN_COMPLETE (NV_IOCTL_BASE + 18)
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -128,9 +128,6 @@ typedef struct nv_ioctl_register_fd
|
|||||||
|
|
||||||
#define NV_DMABUF_EXPORT_MAX_HANDLES 128
|
#define NV_DMABUF_EXPORT_MAX_HANDLES 128
|
||||||
|
|
||||||
#define NV_DMABUF_EXPORT_MAPPING_TYPE_DEFAULT 0
|
|
||||||
#define NV_DMABUF_EXPORT_MAPPING_TYPE_FORCE_PCIE 1
|
|
||||||
|
|
||||||
typedef struct nv_ioctl_export_to_dma_buf_fd
|
typedef struct nv_ioctl_export_to_dma_buf_fd
|
||||||
{
|
{
|
||||||
int fd;
|
int fd;
|
||||||
@@ -139,17 +136,10 @@ typedef struct nv_ioctl_export_to_dma_buf_fd
|
|||||||
NvU32 numObjects;
|
NvU32 numObjects;
|
||||||
NvU32 index;
|
NvU32 index;
|
||||||
NvU64 totalSize NV_ALIGN_BYTES(8);
|
NvU64 totalSize NV_ALIGN_BYTES(8);
|
||||||
NvU8 mappingType;
|
|
||||||
NvHandle handles[NV_DMABUF_EXPORT_MAX_HANDLES];
|
NvHandle handles[NV_DMABUF_EXPORT_MAX_HANDLES];
|
||||||
NvU64 offsets[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8);
|
NvU64 offsets[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8);
|
||||||
NvU64 sizes[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8);
|
NvU64 sizes[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8);
|
||||||
NvU32 status;
|
NvU32 status;
|
||||||
} nv_ioctl_export_to_dma_buf_fd_t;
|
} nv_ioctl_export_to_dma_buf_fd_t;
|
||||||
|
|
||||||
typedef struct nv_ioctl_wait_open_complete
|
|
||||||
{
|
|
||||||
int rc;
|
|
||||||
NvU32 adapterStatus;
|
|
||||||
} nv_ioctl_wait_open_complete_t;
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -1,64 +0,0 @@
|
|||||||
/*
|
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
||||||
* SPDX-License-Identifier: MIT
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef __NV_KTHREAD_QUEUE_OS_H__
|
|
||||||
#define __NV_KTHREAD_QUEUE_OS_H__
|
|
||||||
|
|
||||||
#include <linux/types.h> // atomic_t
|
|
||||||
#include <linux/list.h> // list
|
|
||||||
#include <linux/sched.h> // task_struct
|
|
||||||
#include <linux/numa.h> // NUMA_NO_NODE
|
|
||||||
#include <linux/semaphore.h>
|
|
||||||
|
|
||||||
#include "conftest.h"
|
|
||||||
|
|
||||||
struct nv_kthread_q
|
|
||||||
{
|
|
||||||
struct list_head q_list_head;
|
|
||||||
spinlock_t q_lock;
|
|
||||||
|
|
||||||
// This is a counting semaphore. It gets incremented and decremented
|
|
||||||
// exactly once for each item that is added to the queue.
|
|
||||||
struct semaphore q_sem;
|
|
||||||
atomic_t main_loop_should_exit;
|
|
||||||
|
|
||||||
struct task_struct *q_kthread;
|
|
||||||
|
|
||||||
bool is_unload_flush_ongoing;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct nv_kthread_q_item
|
|
||||||
{
|
|
||||||
struct list_head q_list_node;
|
|
||||||
nv_q_func_t function_to_run;
|
|
||||||
void *function_args;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
#ifndef NUMA_NO_NODE
|
|
||||||
#define NUMA_NO_NODE (-1)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define NV_KTHREAD_NO_NODE NUMA_NO_NODE
|
|
||||||
|
|
||||||
#endif
|
|
||||||
@@ -24,14 +24,13 @@
|
|||||||
#ifndef __NV_KTHREAD_QUEUE_H__
|
#ifndef __NV_KTHREAD_QUEUE_H__
|
||||||
#define __NV_KTHREAD_QUEUE_H__
|
#define __NV_KTHREAD_QUEUE_H__
|
||||||
|
|
||||||
struct nv_kthread_q;
|
#include <linux/types.h> // atomic_t
|
||||||
struct nv_kthread_q_item;
|
#include <linux/list.h> // list
|
||||||
typedef struct nv_kthread_q nv_kthread_q_t;
|
#include <linux/sched.h> // task_struct
|
||||||
typedef struct nv_kthread_q_item nv_kthread_q_item_t;
|
#include <linux/numa.h> // NUMA_NO_NODE
|
||||||
|
#include <linux/semaphore.h>
|
||||||
|
|
||||||
typedef void (*nv_q_func_t)(void *args);
|
#include "conftest.h"
|
||||||
|
|
||||||
#include "nv-kthread-q-os.h"
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
// nv_kthread_q:
|
// nv_kthread_q:
|
||||||
@@ -86,6 +85,38 @@ typedef void (*nv_q_func_t)(void *args);
|
|||||||
//
|
//
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
typedef struct nv_kthread_q nv_kthread_q_t;
|
||||||
|
typedef struct nv_kthread_q_item nv_kthread_q_item_t;
|
||||||
|
|
||||||
|
typedef void (*nv_q_func_t)(void *args);
|
||||||
|
|
||||||
|
struct nv_kthread_q
|
||||||
|
{
|
||||||
|
struct list_head q_list_head;
|
||||||
|
spinlock_t q_lock;
|
||||||
|
|
||||||
|
// This is a counting semaphore. It gets incremented and decremented
|
||||||
|
// exactly once for each item that is added to the queue.
|
||||||
|
struct semaphore q_sem;
|
||||||
|
atomic_t main_loop_should_exit;
|
||||||
|
|
||||||
|
struct task_struct *q_kthread;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct nv_kthread_q_item
|
||||||
|
{
|
||||||
|
struct list_head q_list_node;
|
||||||
|
nv_q_func_t function_to_run;
|
||||||
|
void *function_args;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
#ifndef NUMA_NO_NODE
|
||||||
|
#define NUMA_NO_NODE (-1)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define NV_KTHREAD_NO_NODE NUMA_NO_NODE
|
||||||
|
|
||||||
//
|
//
|
||||||
// The queue must not be used before calling this routine.
|
// The queue must not be used before calling this routine.
|
||||||
//
|
//
|
||||||
@@ -124,7 +155,10 @@ int nv_kthread_q_init_on_node(nv_kthread_q_t *q,
|
|||||||
// This routine is the same as nv_kthread_q_init_on_node() with the exception
|
// This routine is the same as nv_kthread_q_init_on_node() with the exception
|
||||||
// that the queue stack will be allocated on the NUMA node of the caller.
|
// that the queue stack will be allocated on the NUMA node of the caller.
|
||||||
//
|
//
|
||||||
int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname);
|
static inline int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname)
|
||||||
|
{
|
||||||
|
return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE);
|
||||||
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// The caller is responsible for stopping all queues, by calling this routine
|
// The caller is responsible for stopping all queues, by calling this routine
|
||||||
|
|||||||
@@ -35,7 +35,6 @@
|
|||||||
#include "os-interface.h"
|
#include "os-interface.h"
|
||||||
#include "nv-timer.h"
|
#include "nv-timer.h"
|
||||||
#include "nv-time.h"
|
#include "nv-time.h"
|
||||||
#include "nv-chardev-numbers.h"
|
|
||||||
|
|
||||||
#define NV_KERNEL_NAME "Linux"
|
#define NV_KERNEL_NAME "Linux"
|
||||||
|
|
||||||
@@ -58,10 +57,14 @@
|
|||||||
#include <linux/version.h>
|
#include <linux/version.h>
|
||||||
#include <linux/utsname.h>
|
#include <linux/utsname.h>
|
||||||
|
|
||||||
#if LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 0)
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
|
||||||
// Version 4.4 is allowed, temporarily, although not officially supported.
|
#error "This driver does not support kernels older than 2.6.32!"
|
||||||
#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
|
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 7, 0)
|
||||||
#error "This driver does not support kernels older than Linux 4.15!"
|
# define KERNEL_2_6
|
||||||
|
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
|
||||||
|
# define KERNEL_3
|
||||||
|
#else
|
||||||
|
#error "This driver does not support development kernels!"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined (CONFIG_SMP) && !defined (__SMP__)
|
#if defined (CONFIG_SMP) && !defined (__SMP__)
|
||||||
@@ -231,6 +234,12 @@ NV_STATUS nvos_forward_error_to_cray(struct pci_dev *, NvU32,
|
|||||||
const char *, va_list);
|
const char *, va_list);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(NVCPU_PPC64LE) && defined(CONFIG_EEH)
|
||||||
|
#include <asm/eeh.h>
|
||||||
|
#define NV_PCI_ERROR_RECOVERY_ENABLED() eeh_enabled()
|
||||||
|
#define NV_PCI_ERROR_RECOVERY
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(NV_ASM_SET_MEMORY_H_PRESENT)
|
#if defined(NV_ASM_SET_MEMORY_H_PRESENT)
|
||||||
#include <asm/set_memory.h>
|
#include <asm/set_memory.h>
|
||||||
#endif
|
#endif
|
||||||
@@ -239,7 +248,7 @@ NV_STATUS nvos_forward_error_to_cray(struct pci_dev *, NvU32,
|
|||||||
#undef NV_SET_PAGES_UC_PRESENT
|
#undef NV_SET_PAGES_UC_PRESENT
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if !defined(NVCPU_AARCH64) && !defined(NVCPU_PPC64LE) && !defined(NVCPU_RISCV64)
|
#if !defined(NVCPU_AARCH64) && !defined(NVCPU_PPC64LE)
|
||||||
#if !defined(NV_SET_MEMORY_UC_PRESENT) && !defined(NV_SET_PAGES_UC_PRESENT)
|
#if !defined(NV_SET_MEMORY_UC_PRESENT) && !defined(NV_SET_PAGES_UC_PRESENT)
|
||||||
#error "This driver requires the ability to change memory types!"
|
#error "This driver requires the ability to change memory types!"
|
||||||
#endif
|
#endif
|
||||||
@@ -345,6 +354,8 @@ extern int nv_pat_mode;
|
|||||||
|
|
||||||
#define NV_PAGE_COUNT(page) \
|
#define NV_PAGE_COUNT(page) \
|
||||||
((unsigned int)page_count(page))
|
((unsigned int)page_count(page))
|
||||||
|
#define NV_GET_PAGE_COUNT(page_ptr) \
|
||||||
|
(NV_PAGE_COUNT(NV_GET_PAGE_STRUCT(page_ptr->phys_addr)))
|
||||||
#define NV_GET_PAGE_FLAGS(page_ptr) \
|
#define NV_GET_PAGE_FLAGS(page_ptr) \
|
||||||
(NV_GET_PAGE_STRUCT(page_ptr->phys_addr)->flags)
|
(NV_GET_PAGE_STRUCT(page_ptr->phys_addr)->flags)
|
||||||
|
|
||||||
@@ -395,6 +406,32 @@ extern int nv_pat_mode;
|
|||||||
#define NV_GFP_DMA32 (NV_GFP_KERNEL)
|
#define NV_GFP_DMA32 (NV_GFP_KERNEL)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
extern NvBool nvos_is_chipset_io_coherent(void);
|
||||||
|
|
||||||
|
#if defined(NVCPU_X86_64)
|
||||||
|
#define CACHE_FLUSH() asm volatile("wbinvd":::"memory")
|
||||||
|
#define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory")
|
||||||
|
#elif defined(NVCPU_AARCH64)
|
||||||
|
static inline void nv_flush_cache_cpu(void *info)
|
||||||
|
{
|
||||||
|
if (!nvos_is_chipset_io_coherent())
|
||||||
|
{
|
||||||
|
#if defined(NV_FLUSH_CACHE_ALL_PRESENT)
|
||||||
|
flush_cache_all();
|
||||||
|
#else
|
||||||
|
WARN_ONCE(0, "NVRM: kernel does not support flush_cache_all()\n");
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#define CACHE_FLUSH() nv_flush_cache_cpu(NULL)
|
||||||
|
#define CACHE_FLUSH_ALL() on_each_cpu(nv_flush_cache_cpu, NULL, 1)
|
||||||
|
#define WRITE_COMBINE_FLUSH() mb()
|
||||||
|
#elif defined(NVCPU_PPC64LE)
|
||||||
|
#define CACHE_FLUSH() asm volatile("sync; \n" \
|
||||||
|
"isync; \n" ::: "memory")
|
||||||
|
#define WRITE_COMBINE_FLUSH() CACHE_FLUSH()
|
||||||
|
#endif
|
||||||
|
|
||||||
typedef enum
|
typedef enum
|
||||||
{
|
{
|
||||||
NV_MEMORY_TYPE_SYSTEM, /* Memory mapped for ROM, SBIOS and physical RAM. */
|
NV_MEMORY_TYPE_SYSTEM, /* Memory mapped for ROM, SBIOS and physical RAM. */
|
||||||
@@ -403,7 +440,7 @@ typedef enum
|
|||||||
NV_MEMORY_TYPE_DEVICE_MMIO, /* All kinds of MMIO referred by NVRM e.g. BARs and MCFG of device */
|
NV_MEMORY_TYPE_DEVICE_MMIO, /* All kinds of MMIO referred by NVRM e.g. BARs and MCFG of device */
|
||||||
} nv_memory_type_t;
|
} nv_memory_type_t;
|
||||||
|
|
||||||
#if defined(NVCPU_AARCH64) || defined(NVCPU_PPC64LE) || defined(NVCPU_RISCV64)
|
#if defined(NVCPU_AARCH64) || defined(NVCPU_PPC64LE)
|
||||||
#define NV_ALLOW_WRITE_COMBINING(mt) 1
|
#define NV_ALLOW_WRITE_COMBINING(mt) 1
|
||||||
#elif defined(NVCPU_X86_64)
|
#elif defined(NVCPU_X86_64)
|
||||||
#if defined(NV_ENABLE_PAT_SUPPORT)
|
#if defined(NV_ENABLE_PAT_SUPPORT)
|
||||||
@@ -601,7 +638,7 @@ static NvBool nv_numa_node_has_memory(int node_id)
|
|||||||
|
|
||||||
#define NV_ALLOC_PAGES_NODE(ptr, nid, order, gfp_mask) \
|
#define NV_ALLOC_PAGES_NODE(ptr, nid, order, gfp_mask) \
|
||||||
{ \
|
{ \
|
||||||
(ptr) = (unsigned long) alloc_pages_node(nid, gfp_mask, order); \
|
(ptr) = (unsigned long)page_address(alloc_pages_node(nid, gfp_mask, order)); \
|
||||||
}
|
}
|
||||||
|
|
||||||
#define NV_GET_FREE_PAGES(ptr, order, gfp_mask) \
|
#define NV_GET_FREE_PAGES(ptr, order, gfp_mask) \
|
||||||
@@ -716,7 +753,6 @@ static inline dma_addr_t nv_phys_to_dma(struct device *dev, NvU64 pa)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#define NV_GET_OFFSET_IN_PAGE(phys_page) offset_in_page(phys_page)
|
|
||||||
#define NV_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page))
|
#define NV_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page))
|
||||||
#define NV_VMA_PGOFF(vma) ((vma)->vm_pgoff)
|
#define NV_VMA_PGOFF(vma) ((vma)->vm_pgoff)
|
||||||
#define NV_VMA_SIZE(vma) ((vma)->vm_end - (vma)->vm_start)
|
#define NV_VMA_SIZE(vma) ((vma)->vm_end - (vma)->vm_start)
|
||||||
@@ -725,6 +761,7 @@ static inline dma_addr_t nv_phys_to_dma(struct device *dev, NvU64 pa)
|
|||||||
#define NV_VMA_FILE(vma) ((vma)->vm_file)
|
#define NV_VMA_FILE(vma) ((vma)->vm_file)
|
||||||
|
|
||||||
#define NV_DEVICE_MINOR_NUMBER(x) minor((x)->i_rdev)
|
#define NV_DEVICE_MINOR_NUMBER(x) minor((x)->i_rdev)
|
||||||
|
#define NV_CONTROL_DEVICE_MINOR 255
|
||||||
|
|
||||||
#define NV_PCI_DISABLE_DEVICE(pci_dev) \
|
#define NV_PCI_DISABLE_DEVICE(pci_dev) \
|
||||||
{ \
|
{ \
|
||||||
@@ -833,16 +870,16 @@ static inline dma_addr_t nv_phys_to_dma(struct device *dev, NvU64 pa)
|
|||||||
#define NV_PRINT_AT(nv_debug_level,at) \
|
#define NV_PRINT_AT(nv_debug_level,at) \
|
||||||
{ \
|
{ \
|
||||||
nv_printf(nv_debug_level, \
|
nv_printf(nv_debug_level, \
|
||||||
"NVRM: VM: %s:%d: 0x%p, %d page(s), count = %d, " \
|
"NVRM: VM: %s:%d: 0x%p, %d page(s), count = %d, flags = 0x%08x, " \
|
||||||
"page_table = 0x%p\n", __FUNCTION__, __LINE__, at, \
|
"page_table = 0x%p\n", __FUNCTION__, __LINE__, at, \
|
||||||
at->num_pages, NV_ATOMIC_READ(at->usage_count), \
|
at->num_pages, NV_ATOMIC_READ(at->usage_count), \
|
||||||
at->page_table); \
|
at->flags, at->page_table); \
|
||||||
}
|
}
|
||||||
|
|
||||||
#define NV_PRINT_VMA(nv_debug_level,vma) \
|
#define NV_PRINT_VMA(nv_debug_level,vma) \
|
||||||
{ \
|
{ \
|
||||||
nv_printf(nv_debug_level, \
|
nv_printf(nv_debug_level, \
|
||||||
"NVRM: VM: %s:%d: 0x%lx - 0x%lx, 0x%08lx bytes @ 0x%016llx, 0x%p, 0x%p\n", \
|
"NVRM: VM: %s:%d: 0x%lx - 0x%lx, 0x%08x bytes @ 0x%016llx, 0x%p, 0x%p\n", \
|
||||||
__FUNCTION__, __LINE__, vma->vm_start, vma->vm_end, NV_VMA_SIZE(vma), \
|
__FUNCTION__, __LINE__, vma->vm_start, vma->vm_end, NV_VMA_SIZE(vma), \
|
||||||
NV_VMA_OFFSET(vma), NV_VMA_PRIVATE(vma), NV_VMA_FILE(vma)); \
|
NV_VMA_OFFSET(vma), NV_VMA_PRIVATE(vma), NV_VMA_FILE(vma)); \
|
||||||
}
|
}
|
||||||
@@ -873,6 +910,16 @@ typedef void irqreturn_t;
|
|||||||
#define PCI_CAP_ID_EXP 0x10
|
#define PCI_CAP_ID_EXP 0x10
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On Linux on PPC64LE enable basic support for Linux PCI error recovery (see
|
||||||
|
* Documentation/PCI/pci-error-recovery.txt). Currently RM only supports error
|
||||||
|
* notification and data collection, not actual recovery of the device.
|
||||||
|
*/
|
||||||
|
#if defined(NVCPU_PPC64LE) && defined(CONFIG_EEH)
|
||||||
|
#include <asm/eeh.h>
|
||||||
|
#define NV_PCI_ERROR_RECOVERY
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the host OS has page sizes larger than 4KB, we may have a security
|
* If the host OS has page sizes larger than 4KB, we may have a security
|
||||||
* problem. Registers are typically grouped in 4KB pages, but if there are
|
* problem. Registers are typically grouped in 4KB pages, but if there are
|
||||||
@@ -934,14 +981,14 @@ static inline int nv_remap_page_range(struct vm_area_struct *vma,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline int nv_io_remap_page_range(struct vm_area_struct *vma,
|
static inline int nv_io_remap_page_range(struct vm_area_struct *vma,
|
||||||
NvU64 phys_addr, NvU64 size, NvU32 extra_prot, NvU64 start)
|
NvU64 phys_addr, NvU64 size, NvU32 extra_prot)
|
||||||
{
|
{
|
||||||
int ret = -1;
|
int ret = -1;
|
||||||
#if !defined(NV_XEN_SUPPORT_FULLY_VIRTUALIZED_KERNEL)
|
#if !defined(NV_XEN_SUPPORT_FULLY_VIRTUALIZED_KERNEL)
|
||||||
ret = nv_remap_page_range(vma, start, phys_addr, size,
|
ret = nv_remap_page_range(vma, vma->vm_start, phys_addr, size,
|
||||||
nv_adjust_pgprot(vma->vm_page_prot, extra_prot));
|
nv_adjust_pgprot(vma->vm_page_prot, extra_prot));
|
||||||
#else
|
#else
|
||||||
ret = io_remap_pfn_range(vma, start, (phys_addr >> PAGE_SHIFT),
|
ret = io_remap_pfn_range(vma, vma->vm_start, (phys_addr >> PAGE_SHIFT),
|
||||||
size, nv_adjust_pgprot(vma->vm_page_prot, extra_prot));
|
size, nv_adjust_pgprot(vma->vm_page_prot, extra_prot));
|
||||||
#endif
|
#endif
|
||||||
return ret;
|
return ret;
|
||||||
@@ -1065,8 +1112,6 @@ static inline void nv_kmem_ctor_dummy(void *arg)
|
|||||||
kmem_cache_destroy(kmem_cache); \
|
kmem_cache_destroy(kmem_cache); \
|
||||||
}
|
}
|
||||||
|
|
||||||
#define NV_KMEM_CACHE_ALLOC_ATOMIC(kmem_cache) \
|
|
||||||
kmem_cache_alloc(kmem_cache, GFP_ATOMIC)
|
|
||||||
#define NV_KMEM_CACHE_ALLOC(kmem_cache) \
|
#define NV_KMEM_CACHE_ALLOC(kmem_cache) \
|
||||||
kmem_cache_alloc(kmem_cache, GFP_KERNEL)
|
kmem_cache_alloc(kmem_cache, GFP_KERNEL)
|
||||||
#define NV_KMEM_CACHE_FREE(ptr, kmem_cache) \
|
#define NV_KMEM_CACHE_FREE(ptr, kmem_cache) \
|
||||||
@@ -1093,23 +1138,6 @@ static inline void *nv_kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int nv_kmem_cache_alloc_stack_atomic(nvidia_stack_t **stack)
|
|
||||||
{
|
|
||||||
nvidia_stack_t *sp = NULL;
|
|
||||||
#if defined(NVCPU_X86_64)
|
|
||||||
if (rm_is_altstack_in_use())
|
|
||||||
{
|
|
||||||
sp = NV_KMEM_CACHE_ALLOC_ATOMIC(nvidia_stack_t_cache);
|
|
||||||
if (sp == NULL)
|
|
||||||
return -ENOMEM;
|
|
||||||
sp->size = sizeof(sp->stack);
|
|
||||||
sp->top = sp->stack + sp->size;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
*stack = sp;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int nv_kmem_cache_alloc_stack(nvidia_stack_t **stack)
|
static inline int nv_kmem_cache_alloc_stack(nvidia_stack_t **stack)
|
||||||
{
|
{
|
||||||
nvidia_stack_t *sp = NULL;
|
nvidia_stack_t *sp = NULL;
|
||||||
@@ -1159,18 +1187,12 @@ typedef struct nvidia_pte_s {
|
|||||||
NvU64 phys_addr;
|
NvU64 phys_addr;
|
||||||
unsigned long virt_addr;
|
unsigned long virt_addr;
|
||||||
NvU64 dma_addr;
|
NvU64 dma_addr;
|
||||||
|
#ifdef CONFIG_XEN
|
||||||
|
unsigned int guest_pfn;
|
||||||
|
#endif
|
||||||
|
unsigned int page_count;
|
||||||
} nvidia_pte_t;
|
} nvidia_pte_t;
|
||||||
|
|
||||||
#if defined(CONFIG_DMA_SHARED_BUFFER)
|
|
||||||
/* Standard dma_buf-related information. */
|
|
||||||
struct nv_dma_buf
|
|
||||||
{
|
|
||||||
struct dma_buf *dma_buf;
|
|
||||||
struct dma_buf_attachment *dma_attach;
|
|
||||||
struct sg_table *sgt;
|
|
||||||
};
|
|
||||||
#endif // CONFIG_DMA_SHARED_BUFFER
|
|
||||||
|
|
||||||
typedef struct nv_alloc_s {
|
typedef struct nv_alloc_s {
|
||||||
struct nv_alloc_s *next;
|
struct nv_alloc_s *next;
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
@@ -1186,7 +1208,6 @@ typedef struct nv_alloc_s {
|
|||||||
NvBool physical : 1;
|
NvBool physical : 1;
|
||||||
NvBool unencrypted : 1;
|
NvBool unencrypted : 1;
|
||||||
NvBool coherent : 1;
|
NvBool coherent : 1;
|
||||||
NvBool carveout : 1;
|
|
||||||
} flags;
|
} flags;
|
||||||
unsigned int cache_type;
|
unsigned int cache_type;
|
||||||
unsigned int num_pages;
|
unsigned int num_pages;
|
||||||
@@ -1363,19 +1384,7 @@ typedef struct nv_dma_map_s {
|
|||||||
i < dm->mapping.discontig.submap_count; \
|
i < dm->mapping.discontig.submap_count; \
|
||||||
i++, sm = &dm->mapping.discontig.submaps[i])
|
i++, sm = &dm->mapping.discontig.submaps[i])
|
||||||
|
|
||||||
/*
|
|
||||||
* On 4K ARM kernels, use max submap size a multiple of 64K to keep nv-p2p happy.
|
|
||||||
* Despite 4K OS pages, we still use 64K P2P pages due to dependent modules still using 64K.
|
|
||||||
* Instead of using (4G-4K), use max submap size as (4G-64K) since the mapped IOVA range
|
|
||||||
* must be aligned at 64K boundary.
|
|
||||||
*/
|
|
||||||
#if defined(CONFIG_ARM64_4K_PAGES)
|
|
||||||
#define NV_DMA_U32_MAX_4K_PAGES ((NvU32)((NV_U32_MAX >> PAGE_SHIFT) + 1))
|
|
||||||
#define NV_DMA_SUBMAP_MAX_PAGES ((NvU32)(NV_DMA_U32_MAX_4K_PAGES - 16))
|
|
||||||
#else
|
|
||||||
#define NV_DMA_SUBMAP_MAX_PAGES ((NvU32)(NV_U32_MAX >> PAGE_SHIFT))
|
#define NV_DMA_SUBMAP_MAX_PAGES ((NvU32)(NV_U32_MAX >> PAGE_SHIFT))
|
||||||
#endif
|
|
||||||
|
|
||||||
#define NV_DMA_SUBMAP_IDX_TO_PAGE_IDX(s) (s * NV_DMA_SUBMAP_MAX_PAGES)
|
#define NV_DMA_SUBMAP_IDX_TO_PAGE_IDX(s) (s * NV_DMA_SUBMAP_MAX_PAGES)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -1397,6 +1406,8 @@ typedef struct nv_dma_map_s {
|
|||||||
0 ? NV_OK : NV_ERR_OPERATING_SYSTEM)
|
0 ? NV_OK : NV_ERR_OPERATING_SYSTEM)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
typedef struct nv_ibmnpu_info nv_ibmnpu_info_t;
|
||||||
|
|
||||||
typedef struct nv_work_s {
|
typedef struct nv_work_s {
|
||||||
struct work_struct task;
|
struct work_struct task;
|
||||||
void *data;
|
void *data;
|
||||||
@@ -1444,6 +1455,7 @@ struct nv_dma_device {
|
|||||||
} addressable_range;
|
} addressable_range;
|
||||||
|
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
|
NvBool nvlink;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Properties of the coherent link */
|
/* Properties of the coherent link */
|
||||||
@@ -1452,11 +1464,6 @@ typedef struct coherent_link_info_s {
|
|||||||
* baremetal OS environment it is System Physical Address(SPA) and in the case
|
* baremetal OS environment it is System Physical Address(SPA) and in the case
|
||||||
* of virutalized OS environment it is Intermediate Physical Address(IPA) */
|
* of virutalized OS environment it is Intermediate Physical Address(IPA) */
|
||||||
NvU64 gpu_mem_pa;
|
NvU64 gpu_mem_pa;
|
||||||
|
|
||||||
/* Physical address of the reserved portion of the GPU memory, applicable
|
|
||||||
* only in Grace Hopper self hosted passthrough virtualizatioan platform. */
|
|
||||||
NvU64 rsvd_mem_pa;
|
|
||||||
|
|
||||||
/* Bitmap of NUMA node ids, corresponding to the reserved PXMs,
|
/* Bitmap of NUMA node ids, corresponding to the reserved PXMs,
|
||||||
* available for adding GPU memory to the kernel as system RAM */
|
* available for adding GPU memory to the kernel as system RAM */
|
||||||
DECLARE_BITMAP(free_node_bitmap, MAX_NUMNODES);
|
DECLARE_BITMAP(free_node_bitmap, MAX_NUMNODES);
|
||||||
@@ -1492,6 +1499,9 @@ typedef struct nv_linux_state_s {
|
|||||||
struct device *dev;
|
struct device *dev;
|
||||||
struct pci_dev *pci_dev;
|
struct pci_dev *pci_dev;
|
||||||
|
|
||||||
|
/* IBM-NPU info associated with this GPU */
|
||||||
|
nv_ibmnpu_info_t *npu;
|
||||||
|
|
||||||
/* coherent link information */
|
/* coherent link information */
|
||||||
coherent_link_info_t coherent_link_info;
|
coherent_link_info_t coherent_link_info;
|
||||||
|
|
||||||
@@ -1601,26 +1611,6 @@ typedef struct nv_linux_state_s {
|
|||||||
|
|
||||||
struct nv_dma_device dma_dev;
|
struct nv_dma_device dma_dev;
|
||||||
struct nv_dma_device niso_dma_dev;
|
struct nv_dma_device niso_dma_dev;
|
||||||
|
|
||||||
/*
|
|
||||||
* Background kthread for handling deferred open operations
|
|
||||||
* (e.g. from O_NONBLOCK).
|
|
||||||
*
|
|
||||||
* Adding to open_q and reading/writing is_accepting_opens
|
|
||||||
* are protected by nvl->open_q_lock (not nvl->ldata_lock).
|
|
||||||
* This allows new deferred open operations to be enqueued without
|
|
||||||
* blocking behind previous ones (which hold nvl->ldata_lock).
|
|
||||||
*
|
|
||||||
* Adding to open_q is only safe if is_accepting_opens is true.
|
|
||||||
* This prevents open operations from racing with device removal.
|
|
||||||
*
|
|
||||||
* Stopping open_q is only safe after setting is_accepting_opens to false.
|
|
||||||
* This ensures that the open_q (and the larger nvl structure) will
|
|
||||||
* outlive any of the open operations enqueued.
|
|
||||||
*/
|
|
||||||
nv_kthread_q_t open_q;
|
|
||||||
NvBool is_accepting_opens;
|
|
||||||
struct semaphore open_q_lock;
|
|
||||||
#if defined(NV_VGPU_KVM_BUILD)
|
#if defined(NV_VGPU_KVM_BUILD)
|
||||||
wait_queue_head_t wait;
|
wait_queue_head_t wait;
|
||||||
NvS32 return_status;
|
NvS32 return_status;
|
||||||
@@ -1668,13 +1658,22 @@ typedef struct nvidia_event
|
|||||||
nv_event_t event;
|
nv_event_t event;
|
||||||
} nvidia_event_t;
|
} nvidia_event_t;
|
||||||
|
|
||||||
|
typedef enum
|
||||||
|
{
|
||||||
|
NV_FOPS_STACK_INDEX_MMAP,
|
||||||
|
NV_FOPS_STACK_INDEX_IOCTL,
|
||||||
|
NV_FOPS_STACK_INDEX_COUNT
|
||||||
|
} nvidia_entry_point_index_t;
|
||||||
|
|
||||||
typedef struct
|
typedef struct
|
||||||
{
|
{
|
||||||
nv_file_private_t nvfp;
|
nv_file_private_t nvfp;
|
||||||
|
|
||||||
nvidia_stack_t *sp;
|
nvidia_stack_t *sp;
|
||||||
|
nvidia_stack_t *fops_sp[NV_FOPS_STACK_INDEX_COUNT];
|
||||||
|
struct semaphore fops_sp_lock[NV_FOPS_STACK_INDEX_COUNT];
|
||||||
nv_alloc_t *free_list;
|
nv_alloc_t *free_list;
|
||||||
nv_linux_state_t *nvptr;
|
void *nvptr;
|
||||||
nvidia_event_t *event_data_head, *event_data_tail;
|
nvidia_event_t *event_data_head, *event_data_tail;
|
||||||
NvBool dataless_event_pending;
|
NvBool dataless_event_pending;
|
||||||
nv_spinlock_t fp_lock;
|
nv_spinlock_t fp_lock;
|
||||||
@@ -1685,12 +1684,6 @@ typedef struct
|
|||||||
nv_alloc_mapping_context_t mmap_context;
|
nv_alloc_mapping_context_t mmap_context;
|
||||||
struct address_space mapping;
|
struct address_space mapping;
|
||||||
|
|
||||||
nv_kthread_q_item_t open_q_item;
|
|
||||||
struct completion open_complete;
|
|
||||||
nv_linux_state_t *deferred_open_nvl;
|
|
||||||
int open_rc;
|
|
||||||
NV_STATUS adapter_status;
|
|
||||||
|
|
||||||
struct list_head entry;
|
struct list_head entry;
|
||||||
} nv_linux_file_private_t;
|
} nv_linux_file_private_t;
|
||||||
|
|
||||||
@@ -1699,21 +1692,6 @@ static inline nv_linux_file_private_t *nv_get_nvlfp_from_nvfp(nv_file_private_t
|
|||||||
return container_of(nvfp, nv_linux_file_private_t, nvfp);
|
return container_of(nvfp, nv_linux_file_private_t, nvfp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int nv_wait_open_complete_interruptible(nv_linux_file_private_t *nvlfp)
|
|
||||||
{
|
|
||||||
return wait_for_completion_interruptible(&nvlfp->open_complete);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void nv_wait_open_complete(nv_linux_file_private_t *nvlfp)
|
|
||||||
{
|
|
||||||
wait_for_completion(&nvlfp->open_complete);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline NvBool nv_is_open_complete(nv_linux_file_private_t *nvlfp)
|
|
||||||
{
|
|
||||||
return completion_done(&nvlfp->open_complete);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define NV_SET_FILE_PRIVATE(filep,data) ((filep)->private_data = (data))
|
#define NV_SET_FILE_PRIVATE(filep,data) ((filep)->private_data = (data))
|
||||||
#define NV_GET_LINUX_FILE_PRIVATE(filep) ((nv_linux_file_private_t *)(filep)->private_data)
|
#define NV_GET_LINUX_FILE_PRIVATE(filep) ((nv_linux_file_private_t *)(filep)->private_data)
|
||||||
|
|
||||||
@@ -1723,6 +1701,28 @@ static inline NvBool nv_is_open_complete(nv_linux_file_private_t *nvlfp)
|
|||||||
|
|
||||||
#define NV_STATE_PTR(nvl) &(((nv_linux_state_t *)(nvl))->nv_state)
|
#define NV_STATE_PTR(nvl) &(((nv_linux_state_t *)(nvl))->nv_state)
|
||||||
|
|
||||||
|
static inline nvidia_stack_t *nv_nvlfp_get_sp(nv_linux_file_private_t *nvlfp, nvidia_entry_point_index_t which)
|
||||||
|
{
|
||||||
|
#if defined(NVCPU_X86_64)
|
||||||
|
if (rm_is_altstack_in_use())
|
||||||
|
{
|
||||||
|
down(&nvlfp->fops_sp_lock[which]);
|
||||||
|
return nvlfp->fops_sp[which];
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void nv_nvlfp_put_sp(nv_linux_file_private_t *nvlfp, nvidia_entry_point_index_t which)
|
||||||
|
{
|
||||||
|
#if defined(NVCPU_X86_64)
|
||||||
|
if (rm_is_altstack_in_use())
|
||||||
|
{
|
||||||
|
up(&nvlfp->fops_sp_lock[which]);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
#define NV_ATOMIC_READ(data) atomic_read(&(data))
|
#define NV_ATOMIC_READ(data) atomic_read(&(data))
|
||||||
#define NV_ATOMIC_SET(data,val) atomic_set(&(data), (val))
|
#define NV_ATOMIC_SET(data,val) atomic_set(&(data), (val))
|
||||||
#define NV_ATOMIC_INC(data) atomic_inc(&(data))
|
#define NV_ATOMIC_INC(data) atomic_inc(&(data))
|
||||||
@@ -1795,25 +1795,33 @@ static inline NV_STATUS nv_check_gpu_state(nv_state_t *nv)
|
|||||||
extern NvU32 NVreg_EnableUserNUMAManagement;
|
extern NvU32 NVreg_EnableUserNUMAManagement;
|
||||||
extern NvU32 NVreg_RegisterPCIDriver;
|
extern NvU32 NVreg_RegisterPCIDriver;
|
||||||
extern NvU32 NVreg_EnableResizableBar;
|
extern NvU32 NVreg_EnableResizableBar;
|
||||||
extern NvU32 NVreg_EnableNonblockingOpen;
|
|
||||||
|
|
||||||
extern NvU32 num_probed_nv_devices;
|
extern NvU32 num_probed_nv_devices;
|
||||||
extern NvU32 num_nv_devices;
|
extern NvU32 num_nv_devices;
|
||||||
|
|
||||||
#define NV_FILE_INODE(file) (file)->f_inode
|
#define NV_FILE_INODE(file) (file)->f_inode
|
||||||
|
|
||||||
static inline int nv_is_control_device(struct inode *inode)
|
#if defined(NV_DOM0_KERNEL_PRESENT) || defined(NV_VGPU_KVM_BUILD)
|
||||||
{
|
|
||||||
return (minor((inode)->i_rdev) == NV_MINOR_DEVICE_NUMBER_CONTROL_DEVICE);
|
|
||||||
}
|
|
||||||
|
|
||||||
#if defined(NV_DOM0_KERNEL_PRESENT) || defined(NV_VGPU_KVM_BUILD) || defined(NV_DEVICE_VM_BUILD)
|
|
||||||
#define NV_VGX_HYPER
|
#define NV_VGX_HYPER
|
||||||
#if defined(NV_XEN_IOEMU_INJECT_MSI)
|
#if defined(NV_XEN_IOEMU_INJECT_MSI)
|
||||||
#include <xen/ioemu.h>
|
#include <xen/ioemu.h>
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static inline NvU64 nv_pci_bus_address(struct pci_dev *dev, NvU8 bar_index)
|
||||||
|
{
|
||||||
|
NvU64 bus_addr = 0;
|
||||||
|
#if defined(NV_PCI_BUS_ADDRESS_PRESENT)
|
||||||
|
bus_addr = pci_bus_address(dev, bar_index);
|
||||||
|
#elif defined(CONFIG_PCI)
|
||||||
|
struct pci_bus_region region;
|
||||||
|
|
||||||
|
pcibios_resource_to_bus(dev, ®ion, &dev->resource[bar_index]);
|
||||||
|
bus_addr = region.start;
|
||||||
|
#endif
|
||||||
|
return bus_addr;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Decrements the usage count of the allocation, and moves the allocation to
|
* Decrements the usage count of the allocation, and moves the allocation to
|
||||||
* the given nvlfp's free list if the usage count drops to zero.
|
* the given nvlfp's free list if the usage count drops to zero.
|
||||||
@@ -1844,6 +1852,59 @@ static inline NvBool nv_alloc_release(nv_linux_file_private_t *nvlfp, nv_alloc_t
|
|||||||
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
|
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Starting on Power9 systems, DMA addresses for NVLink are no longer
|
||||||
|
* the same as used over PCIe.
|
||||||
|
*
|
||||||
|
* Power9 supports a 56-bit Real Address. This address range is compressed
|
||||||
|
* when accessed over NVLink to allow the GPU to access all of memory using
|
||||||
|
* its 47-bit Physical address.
|
||||||
|
*
|
||||||
|
* If there is an NPU device present on the system, it implies that NVLink
|
||||||
|
* sysmem links are present and we need to apply the required address
|
||||||
|
* conversion for NVLink within the driver.
|
||||||
|
*
|
||||||
|
* See Bug 1920398 for further background and details.
|
||||||
|
*
|
||||||
|
* Note, a deviation from the documented compression scheme is that the
|
||||||
|
* upper address bits (i.e. bit 56-63) instead of being set to zero are
|
||||||
|
* preserved during NVLink address compression so the orignal PCIe DMA
|
||||||
|
* address can be reconstructed on expansion. These bits can be safely
|
||||||
|
* ignored on NVLink since they are truncated by the GPU.
|
||||||
|
*
|
||||||
|
* Bug 1968345: As a performance enhancement it is the responsibility of
|
||||||
|
* the caller on PowerPC platforms to check for presence of an NPU device
|
||||||
|
* before the address transformation is applied.
|
||||||
|
*/
|
||||||
|
static inline NvU64 nv_compress_nvlink_addr(NvU64 addr)
|
||||||
|
{
|
||||||
|
NvU64 addr47 = addr;
|
||||||
|
|
||||||
|
#if defined(NVCPU_PPC64LE)
|
||||||
|
addr47 = addr & ((1ULL << 43) - 1);
|
||||||
|
addr47 |= (addr & (0x3ULL << 45)) >> 2;
|
||||||
|
WARN_ON(addr47 & (1ULL << 44));
|
||||||
|
addr47 |= (addr & (0x3ULL << 49)) >> 4;
|
||||||
|
addr47 |= addr & ~((1ULL << 56) - 1);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return addr47;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline NvU64 nv_expand_nvlink_addr(NvU64 addr47)
|
||||||
|
{
|
||||||
|
NvU64 addr = addr47;
|
||||||
|
|
||||||
|
#if defined(NVCPU_PPC64LE)
|
||||||
|
addr = addr47 & ((1ULL << 43) - 1);
|
||||||
|
addr |= (addr47 & (3ULL << 43)) << 2;
|
||||||
|
addr |= (addr47 & (3ULL << 45)) << 4;
|
||||||
|
addr |= addr47 & ~((1ULL << 56) - 1);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
// Default flags for ISRs
|
// Default flags for ISRs
|
||||||
static inline NvU32 nv_default_irq_flags(nv_state_t *nv)
|
static inline NvU32 nv_default_irq_flags(nv_state_t *nv)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2017-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -37,7 +37,6 @@
|
|||||||
|
|
||||||
#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_PREEMPT_RT_FULL)
|
#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_PREEMPT_RT_FULL)
|
||||||
typedef raw_spinlock_t nv_spinlock_t;
|
typedef raw_spinlock_t nv_spinlock_t;
|
||||||
#define NV_DEFINE_SPINLOCK(lock) DEFINE_RAW_SPINLOCK(lock)
|
|
||||||
#define NV_SPIN_LOCK_INIT(lock) raw_spin_lock_init(lock)
|
#define NV_SPIN_LOCK_INIT(lock) raw_spin_lock_init(lock)
|
||||||
#define NV_SPIN_LOCK_IRQ(lock) raw_spin_lock_irq(lock)
|
#define NV_SPIN_LOCK_IRQ(lock) raw_spin_lock_irq(lock)
|
||||||
#define NV_SPIN_UNLOCK_IRQ(lock) raw_spin_unlock_irq(lock)
|
#define NV_SPIN_UNLOCK_IRQ(lock) raw_spin_unlock_irq(lock)
|
||||||
@@ -48,7 +47,6 @@ typedef raw_spinlock_t nv_spinlock_t;
|
|||||||
#define NV_SPIN_UNLOCK_WAIT(lock) raw_spin_unlock_wait(lock)
|
#define NV_SPIN_UNLOCK_WAIT(lock) raw_spin_unlock_wait(lock)
|
||||||
#else
|
#else
|
||||||
typedef spinlock_t nv_spinlock_t;
|
typedef spinlock_t nv_spinlock_t;
|
||||||
#define NV_DEFINE_SPINLOCK(lock) DEFINE_SPINLOCK(lock)
|
|
||||||
#define NV_SPIN_LOCK_INIT(lock) spin_lock_init(lock)
|
#define NV_SPIN_LOCK_INIT(lock) spin_lock_init(lock)
|
||||||
#define NV_SPIN_LOCK_IRQ(lock) spin_lock_irq(lock)
|
#define NV_SPIN_LOCK_IRQ(lock) spin_lock_irq(lock)
|
||||||
#define NV_SPIN_UNLOCK_IRQ(lock) spin_unlock_irq(lock)
|
#define NV_SPIN_UNLOCK_IRQ(lock) spin_unlock_irq(lock)
|
||||||
|
|||||||
@@ -29,33 +29,27 @@
|
|||||||
typedef int vm_fault_t;
|
typedef int vm_fault_t;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/* pin_user_pages
|
||||||
* pin_user_pages()
|
|
||||||
*
|
|
||||||
* Presence of pin_user_pages() also implies the presence of unpin-user_page().
|
* Presence of pin_user_pages() also implies the presence of unpin-user_page().
|
||||||
* Both were added in the v5.6.
|
* Both were added in the v5.6-rc1
|
||||||
*
|
*
|
||||||
* pin_user_pages() was added by commit eddb1c228f79
|
* pin_user_pages() was added by commit eddb1c228f7951d399240
|
||||||
* ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in v5.6.
|
* ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in v5.6-rc1 (2020-01-30)
|
||||||
|
*
|
||||||
|
* Removed vmas parameter from pin_user_pages() by commit 40896a02751
|
||||||
|
* ("mm/gup: remove vmas parameter from pin_user_pages()")
|
||||||
|
* in linux-next, expected in v6.5-rc1 (2023-05-17)
|
||||||
*
|
*
|
||||||
* Removed vmas parameter from pin_user_pages() by commit 4c630f307455
|
|
||||||
* ("mm/gup: remove vmas parameter from pin_user_pages()") in v6.5.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
#if defined(NV_PIN_USER_PAGES_PRESENT)
|
||||||
/*
|
|
||||||
* FreeBSD's pin_user_pages's conftest breaks since pin_user_pages is an inline
|
|
||||||
* function. Because it simply maps to get_user_pages, we can just replace
|
|
||||||
* NV_PIN_USER_PAGES with NV_GET_USER_PAGES on FreeBSD
|
|
||||||
*/
|
|
||||||
#if defined(NV_PIN_USER_PAGES_PRESENT) && !defined(NV_BSD)
|
|
||||||
#if defined(NV_PIN_USER_PAGES_HAS_ARGS_VMAS)
|
#if defined(NV_PIN_USER_PAGES_HAS_ARGS_VMAS)
|
||||||
#define NV_PIN_USER_PAGES(start, nr_pages, gup_flags, pages) \
|
|
||||||
pin_user_pages(start, nr_pages, gup_flags, pages, NULL)
|
|
||||||
#else
|
|
||||||
#define NV_PIN_USER_PAGES pin_user_pages
|
#define NV_PIN_USER_PAGES pin_user_pages
|
||||||
|
#else
|
||||||
|
#define NV_PIN_USER_PAGES(start, nr_pages, gup_flags, pages, vmas) \
|
||||||
|
pin_user_pages(start, nr_pages, gup_flags, pages)
|
||||||
#endif // NV_PIN_USER_PAGES_HAS_ARGS_VMAS
|
#endif // NV_PIN_USER_PAGES_HAS_ARGS_VMAS
|
||||||
#define NV_UNPIN_USER_PAGE unpin_user_page
|
#define NV_UNPIN_USER_PAGE unpin_user_page
|
||||||
#else
|
#else
|
||||||
@@ -63,83 +57,80 @@ typedef int vm_fault_t;
|
|||||||
#define NV_UNPIN_USER_PAGE put_page
|
#define NV_UNPIN_USER_PAGE put_page
|
||||||
#endif // NV_PIN_USER_PAGES_PRESENT
|
#endif // NV_PIN_USER_PAGES_PRESENT
|
||||||
|
|
||||||
/*
|
/* get_user_pages
|
||||||
* get_user_pages()
|
|
||||||
*
|
*
|
||||||
* The 8-argument version of get_user_pages() was deprecated by commit
|
* The 8-argument version of get_user_pages was deprecated by commit
|
||||||
* cde70140fed8 ("mm/gup: Overload get_user_pages() functions") in v4.6-rc1.
|
* (2016 Feb 12: cde70140fed8429acf7a14e2e2cbd3e329036653)for the non-remote case
|
||||||
* (calling get_user_pages with current and current->mm).
|
* (calling get_user_pages with current and current->mm).
|
||||||
*
|
*
|
||||||
* Completely moved to the 6 argument version of get_user_pages() by
|
* Completely moved to the 6 argument version of get_user_pages -
|
||||||
* commit c12d2da56d0e ("mm/gup: Remove the macro overload API migration
|
* 2016 Apr 4: c12d2da56d0e07d230968ee2305aaa86b93a6832
|
||||||
* helpers from the get_user*() APIs") in v4.6-rc4.
|
|
||||||
*
|
*
|
||||||
* write and force parameters were replaced with gup_flags by
|
* write and force parameters were replaced with gup_flags by -
|
||||||
* commit 768ae309a961 ("mm: replace get_user_pages() write/force parameters
|
* 2016 Oct 12: 768ae309a96103ed02eb1e111e838c87854d8b51
|
||||||
* with gup_flags") in v4.9.
|
|
||||||
*
|
*
|
||||||
* A 7-argument version of get_user_pages was introduced into linux-4.4.y by
|
* A 7-argument version of get_user_pages was introduced into linux-4.4.y by
|
||||||
* commit 8e50b8b07f462 ("mm: replace get_user_pages() write/force parameters
|
* commit 8e50b8b07f462ab4b91bc1491b1c91bd75e4ad40 which cherry-picked the
|
||||||
* with gup_flags") which cherry-picked the replacement of the write and
|
* replacement of the write and force parameters with gup_flags
|
||||||
* force parameters with gup_flags.
|
|
||||||
*
|
*
|
||||||
* Removed vmas parameter from get_user_pages() by commit 54d020692b34
|
* Removed vmas parameter from get_user_pages() by commit 7bbf9c8c99
|
||||||
* ("mm/gup: remove unused vmas parameter from get_user_pages()") in v6.5.
|
* ("mm/gup: remove unused vmas parameter from get_user_pages()")
|
||||||
|
* in linux-next, expected in v6.5-rc1 (2023-05-17)
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#if defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS)
|
#if defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS)
|
||||||
#define NV_GET_USER_PAGES get_user_pages
|
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages, vmas) \
|
||||||
|
get_user_pages(start, nr_pages, flags, pages)
|
||||||
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS_VMAS)
|
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS_VMAS)
|
||||||
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages) \
|
#define NV_GET_USER_PAGES get_user_pages
|
||||||
get_user_pages(start, nr_pages, flags, pages, NULL)
|
|
||||||
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS_VMAS)
|
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS_VMAS)
|
||||||
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages) \
|
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages, vmas) \
|
||||||
get_user_pages(current, current->mm, start, nr_pages, flags, pages, NULL)
|
get_user_pages(current, current->mm, start, nr_pages, flags, pages, vmas)
|
||||||
#else
|
#else
|
||||||
static inline long NV_GET_USER_PAGES(unsigned long start,
|
static inline long NV_GET_USER_PAGES(unsigned long start,
|
||||||
unsigned long nr_pages,
|
unsigned long nr_pages,
|
||||||
unsigned int flags,
|
unsigned int flags,
|
||||||
struct page **pages)
|
struct page **pages,
|
||||||
|
struct vm_area_struct **vmas)
|
||||||
{
|
{
|
||||||
int write = flags & FOLL_WRITE;
|
int write = flags & FOLL_WRITE;
|
||||||
int force = flags & FOLL_FORCE;
|
int force = flags & FOLL_FORCE;
|
||||||
|
|
||||||
#if defined(NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS)
|
#if defined(NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS)
|
||||||
return get_user_pages(start, nr_pages, write, force, pages, NULL);
|
return get_user_pages(start, nr_pages, write, force, pages, vmas);
|
||||||
#else
|
#else
|
||||||
// NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS
|
// NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS
|
||||||
return get_user_pages(current, current->mm, start, nr_pages, write,
|
return get_user_pages(current, current->mm, start, nr_pages, write,
|
||||||
force, pages, NULL);
|
force, pages, vmas);
|
||||||
#endif // NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS
|
#endif // NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS
|
||||||
}
|
}
|
||||||
#endif // NV_GET_USER_PAGES_HAS_ARGS_FLAGS
|
#endif // NV_GET_USER_PAGES_HAS_ARGS_FLAGS
|
||||||
|
|
||||||
/*
|
/* pin_user_pages_remote
|
||||||
* pin_user_pages_remote()
|
|
||||||
*
|
*
|
||||||
* pin_user_pages_remote() was added by commit eddb1c228f79
|
* pin_user_pages_remote() was added by commit eddb1c228f7951d399240
|
||||||
* ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in v5.6.
|
* ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in v5.6 (2020-01-30)
|
||||||
*
|
*
|
||||||
* pin_user_pages_remote() removed 'tsk' parameter by commit
|
* pin_user_pages_remote() removed 'tsk' parameter by commit
|
||||||
* 64019a2e467a ("mm/gup: remove task_struct pointer for all gup code")
|
* 64019a2e467a ("mm/gup: remove task_struct pointer for all gup code")
|
||||||
* in v5.9.
|
* in v5.9-rc1 (2020-08-11). *
|
||||||
*
|
*
|
||||||
* Removed unused vmas parameter from pin_user_pages_remote() by commit
|
* Removed unused vmas parameter from pin_user_pages_remote() by commit
|
||||||
* 0b295316b3a9 ("mm/gup: remove unused vmas parameter from
|
* 83bcc2e132("mm/gup: remove unused vmas parameter from pin_user_pages_remote()")
|
||||||
* pin_user_pages_remote()") in v6.5.
|
* in linux-next, expected in v6.5-rc1 (2023-05-14)
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#if defined(NV_PIN_USER_PAGES_REMOTE_PRESENT)
|
#if defined(NV_PIN_USER_PAGES_REMOTE_PRESENT)
|
||||||
#if defined(NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS)
|
#if defined(NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS)
|
||||||
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
|
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
|
||||||
pin_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, NULL, locked)
|
pin_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas, locked)
|
||||||
#elif defined(NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS)
|
#elif defined(NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS)
|
||||||
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
|
|
||||||
pin_user_pages_remote(mm, start, nr_pages, flags, pages, NULL, locked)
|
|
||||||
#else
|
|
||||||
#define NV_PIN_USER_PAGES_REMOTE pin_user_pages_remote
|
#define NV_PIN_USER_PAGES_REMOTE pin_user_pages_remote
|
||||||
|
#else
|
||||||
|
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
|
||||||
|
pin_user_pages_remote(mm, start, nr_pages, flags, pages, locked)
|
||||||
#endif // NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS
|
#endif // NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS
|
||||||
#else
|
#else
|
||||||
#define NV_PIN_USER_PAGES_REMOTE NV_GET_USER_PAGES_REMOTE
|
#define NV_PIN_USER_PAGES_REMOTE NV_GET_USER_PAGES_REMOTE
|
||||||
@@ -147,7 +138,7 @@ typedef int vm_fault_t;
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* get_user_pages_remote() was added by commit 1e9877902dc7
|
* get_user_pages_remote() was added by commit 1e9877902dc7
|
||||||
* ("mm/gup: Introduce get_user_pages_remote()") in v4.6.
|
* ("mm/gup: Introduce get_user_pages_remote()") in v4.6 (2016-02-12).
|
||||||
*
|
*
|
||||||
* Note that get_user_pages_remote() requires the caller to hold a reference on
|
* Note that get_user_pages_remote() requires the caller to hold a reference on
|
||||||
* the task_struct (if non-NULL and if this API has tsk argument) and the mm_struct.
|
* the task_struct (if non-NULL and if this API has tsk argument) and the mm_struct.
|
||||||
@@ -157,35 +148,37 @@ typedef int vm_fault_t;
|
|||||||
*
|
*
|
||||||
* get_user_pages_remote() write/force parameters were replaced
|
* get_user_pages_remote() write/force parameters were replaced
|
||||||
* with gup_flags by commit 9beae1ea8930 ("mm: replace get_user_pages_remote()
|
* with gup_flags by commit 9beae1ea8930 ("mm: replace get_user_pages_remote()
|
||||||
* write/force parameters with gup_flags") in v4.9.
|
* write/force parameters with gup_flags") in v4.9 (2016-10-13).
|
||||||
*
|
*
|
||||||
* get_user_pages_remote() added 'locked' parameter by commit 5b56d49fc31d
|
* get_user_pages_remote() added 'locked' parameter by commit 5b56d49fc31d
|
||||||
* ("mm: add locked parameter to get_user_pages_remote()") in v4.10.
|
* ("mm: add locked parameter to get_user_pages_remote()") in
|
||||||
|
* v4.10 (2016-12-14).
|
||||||
*
|
*
|
||||||
* get_user_pages_remote() removed 'tsk' parameter by
|
* get_user_pages_remote() removed 'tsk' parameter by
|
||||||
* commit 64019a2e467a ("mm/gup: remove task_struct pointer for
|
* commit 64019a2e467a ("mm/gup: remove task_struct pointer for
|
||||||
* all gup code") in v5.9.
|
* all gup code") in v5.9-rc1 (2020-08-11).
|
||||||
*
|
*
|
||||||
* Removed vmas parameter from get_user_pages_remote() by commit ca5e863233e8
|
* Removed vmas parameter from get_user_pages_remote() by commit a4bde14d549
|
||||||
* ("mm/gup: remove vmas parameter from get_user_pages_remote()") in v6.5.
|
* ("mm/gup: remove vmas parameter from get_user_pages_remote()")
|
||||||
|
* in linux-next, expected in v6.5-rc1 (2023-05-14)
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#if defined(NV_GET_USER_PAGES_REMOTE_PRESENT)
|
#if defined(NV_GET_USER_PAGES_REMOTE_PRESENT)
|
||||||
#if defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED)
|
#if defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED)
|
||||||
#define NV_GET_USER_PAGES_REMOTE get_user_pages_remote
|
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
|
||||||
|
get_user_pages_remote(mm, start, nr_pages, flags, pages, locked)
|
||||||
|
|
||||||
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED_VMAS)
|
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED_VMAS)
|
||||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
|
#define NV_GET_USER_PAGES_REMOTE get_user_pages_remote
|
||||||
get_user_pages_remote(mm, start, nr_pages, flags, pages, NULL, locked)
|
|
||||||
|
|
||||||
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED_VMAS)
|
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED_VMAS)
|
||||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
|
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
|
||||||
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, NULL, locked)
|
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas, locked)
|
||||||
|
|
||||||
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_VMAS)
|
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_VMAS)
|
||||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
|
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
|
||||||
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, NULL)
|
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas)
|
||||||
|
|
||||||
#else
|
#else
|
||||||
// NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE_VMAS
|
// NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE_VMAS
|
||||||
@@ -194,13 +187,14 @@ typedef int vm_fault_t;
|
|||||||
unsigned long nr_pages,
|
unsigned long nr_pages,
|
||||||
unsigned int flags,
|
unsigned int flags,
|
||||||
struct page **pages,
|
struct page **pages,
|
||||||
|
struct vm_area_struct **vmas,
|
||||||
int *locked)
|
int *locked)
|
||||||
{
|
{
|
||||||
int write = flags & FOLL_WRITE;
|
int write = flags & FOLL_WRITE;
|
||||||
int force = flags & FOLL_FORCE;
|
int force = flags & FOLL_FORCE;
|
||||||
|
|
||||||
return get_user_pages_remote(NULL, mm, start, nr_pages, write, force,
|
return get_user_pages_remote(NULL, mm, start, nr_pages, write, force,
|
||||||
pages, NULL);
|
pages, vmas);
|
||||||
}
|
}
|
||||||
#endif // NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED
|
#endif // NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED
|
||||||
#else
|
#else
|
||||||
@@ -210,17 +204,18 @@ typedef int vm_fault_t;
|
|||||||
unsigned long nr_pages,
|
unsigned long nr_pages,
|
||||||
unsigned int flags,
|
unsigned int flags,
|
||||||
struct page **pages,
|
struct page **pages,
|
||||||
|
struct vm_area_struct **vmas,
|
||||||
int *locked)
|
int *locked)
|
||||||
{
|
{
|
||||||
int write = flags & FOLL_WRITE;
|
int write = flags & FOLL_WRITE;
|
||||||
int force = flags & FOLL_FORCE;
|
int force = flags & FOLL_FORCE;
|
||||||
|
|
||||||
return get_user_pages(NULL, mm, start, nr_pages, write, force, pages, NULL);
|
return get_user_pages(NULL, mm, start, nr_pages, write, force, pages, vmas);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
|
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
|
||||||
get_user_pages(NULL, mm, start, nr_pages, flags, pages, NULL)
|
get_user_pages(NULL, mm, start, nr_pages, flags, pages, vmas)
|
||||||
#endif // NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS
|
#endif // NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS
|
||||||
#endif // NV_GET_USER_PAGES_REMOTE_PRESENT
|
#endif // NV_GET_USER_PAGES_REMOTE_PRESENT
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -36,6 +36,5 @@ int nv_pci_count_devices(void);
|
|||||||
NvU8 nv_find_pci_capability(struct pci_dev *, NvU8);
|
NvU8 nv_find_pci_capability(struct pci_dev *, NvU8);
|
||||||
int nvidia_dev_get_pci_info(const NvU8 *, struct pci_dev **, NvU64 *, NvU64 *);
|
int nvidia_dev_get_pci_info(const NvU8 *, struct pci_dev **, NvU64 *, NvU64 *);
|
||||||
nv_linux_state_t * find_pci(NvU32, NvU8, NvU8, NvU8);
|
nv_linux_state_t * find_pci(NvU32, NvU8, NvU8, NvU8);
|
||||||
NvBool nv_pci_is_valid_topology_for_direct_pci(nv_state_t *, struct device *);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -60,7 +60,6 @@ static inline pgprot_t pgprot_modify_writecombine(pgprot_t old_prot)
|
|||||||
#endif /* !defined(NV_VMWARE) */
|
#endif /* !defined(NV_VMWARE) */
|
||||||
|
|
||||||
#if defined(NVCPU_AARCH64)
|
#if defined(NVCPU_AARCH64)
|
||||||
extern NvBool nvos_is_chipset_io_coherent(void);
|
|
||||||
/*
|
/*
|
||||||
* Don't rely on the kernel's definition of pgprot_noncached(), as on 64-bit
|
* Don't rely on the kernel's definition of pgprot_noncached(), as on 64-bit
|
||||||
* ARM that's not for system memory, but device memory instead. For I/O cache
|
* ARM that's not for system memory, but device memory instead. For I/O cache
|
||||||
@@ -120,13 +119,6 @@ extern NvBool nvos_is_chipset_io_coherent(void);
|
|||||||
#define NV_PGPROT_WRITE_COMBINED(old_prot) old_prot
|
#define NV_PGPROT_WRITE_COMBINED(old_prot) old_prot
|
||||||
#define NV_PGPROT_READ_ONLY(old_prot) \
|
#define NV_PGPROT_READ_ONLY(old_prot) \
|
||||||
__pgprot(pgprot_val((old_prot)) & ~NV_PAGE_RW)
|
__pgprot(pgprot_val((old_prot)) & ~NV_PAGE_RW)
|
||||||
#elif defined(NVCPU_RISCV64)
|
|
||||||
#define NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) \
|
|
||||||
pgprot_writecombine(old_prot)
|
|
||||||
/* Don't attempt to mark sysmem pages as write combined on riscv */
|
|
||||||
#define NV_PGPROT_WRITE_COMBINED(old_prot) old_prot
|
|
||||||
#define NV_PGPROT_READ_ONLY(old_prot) \
|
|
||||||
__pgprot(pgprot_val((old_prot)) & ~_PAGE_WRITE)
|
|
||||||
#else
|
#else
|
||||||
/* Writecombine is not supported */
|
/* Writecombine is not supported */
|
||||||
#undef NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot)
|
#undef NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot)
|
||||||
|
|||||||
@@ -92,24 +92,6 @@ typedef struct file_operations nv_proc_ops_t;
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define NV_DEFINE_SINGLE_PROCFS_FILE_HELPER(name, lock) \
|
#define NV_DEFINE_SINGLE_PROCFS_FILE_HELPER(name, lock) \
|
||||||
static ssize_t nv_procfs_read_lock_##name( \
|
|
||||||
struct file *file, \
|
|
||||||
char __user *buf, \
|
|
||||||
size_t size, \
|
|
||||||
loff_t *ppos \
|
|
||||||
) \
|
|
||||||
{ \
|
|
||||||
int ret; \
|
|
||||||
ret = nv_down_read_interruptible(&lock); \
|
|
||||||
if (ret < 0) \
|
|
||||||
{ \
|
|
||||||
return ret; \
|
|
||||||
} \
|
|
||||||
size = seq_read(file, buf, size, ppos); \
|
|
||||||
up_read(&lock); \
|
|
||||||
return size; \
|
|
||||||
} \
|
|
||||||
\
|
|
||||||
static int nv_procfs_open_##name( \
|
static int nv_procfs_open_##name( \
|
||||||
struct inode *inode, \
|
struct inode *inode, \
|
||||||
struct file *filep \
|
struct file *filep \
|
||||||
@@ -122,6 +104,11 @@ typedef struct file_operations nv_proc_ops_t;
|
|||||||
{ \
|
{ \
|
||||||
return ret; \
|
return ret; \
|
||||||
} \
|
} \
|
||||||
|
ret = nv_down_read_interruptible(&lock); \
|
||||||
|
if (ret < 0) \
|
||||||
|
{ \
|
||||||
|
single_release(inode, filep); \
|
||||||
|
} \
|
||||||
return ret; \
|
return ret; \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
@@ -130,6 +117,7 @@ typedef struct file_operations nv_proc_ops_t;
|
|||||||
struct file *filep \
|
struct file *filep \
|
||||||
) \
|
) \
|
||||||
{ \
|
{ \
|
||||||
|
up_read(&lock); \
|
||||||
return single_release(inode, filep); \
|
return single_release(inode, filep); \
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -139,7 +127,46 @@ typedef struct file_operations nv_proc_ops_t;
|
|||||||
static const nv_proc_ops_t nv_procfs_##name##_fops = { \
|
static const nv_proc_ops_t nv_procfs_##name##_fops = { \
|
||||||
NV_PROC_OPS_SET_OWNER() \
|
NV_PROC_OPS_SET_OWNER() \
|
||||||
.NV_PROC_OPS_OPEN = nv_procfs_open_##name, \
|
.NV_PROC_OPS_OPEN = nv_procfs_open_##name, \
|
||||||
.NV_PROC_OPS_READ = nv_procfs_read_lock_##name, \
|
.NV_PROC_OPS_READ = seq_read, \
|
||||||
|
.NV_PROC_OPS_LSEEK = seq_lseek, \
|
||||||
|
.NV_PROC_OPS_RELEASE = nv_procfs_release_##name, \
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
#define NV_DEFINE_SINGLE_PROCFS_FILE_READ_WRITE(name, lock, \
|
||||||
|
write_callback) \
|
||||||
|
NV_DEFINE_SINGLE_PROCFS_FILE_HELPER(name, lock) \
|
||||||
|
\
|
||||||
|
static ssize_t nv_procfs_write_##name( \
|
||||||
|
struct file *file, \
|
||||||
|
const char __user *buf, \
|
||||||
|
size_t size, \
|
||||||
|
loff_t *ppos \
|
||||||
|
) \
|
||||||
|
{ \
|
||||||
|
ssize_t ret; \
|
||||||
|
struct seq_file *s; \
|
||||||
|
\
|
||||||
|
s = file->private_data; \
|
||||||
|
if (s == NULL) \
|
||||||
|
{ \
|
||||||
|
return -EIO; \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ret = write_callback(s, buf + *ppos, size - *ppos); \
|
||||||
|
if (ret == 0) \
|
||||||
|
{ \
|
||||||
|
/* avoid infinite loop */ \
|
||||||
|
ret = -EIO; \
|
||||||
|
} \
|
||||||
|
return ret; \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
static const nv_proc_ops_t nv_procfs_##name##_fops = { \
|
||||||
|
NV_PROC_OPS_SET_OWNER() \
|
||||||
|
.NV_PROC_OPS_OPEN = nv_procfs_open_##name, \
|
||||||
|
.NV_PROC_OPS_READ = seq_read, \
|
||||||
|
.NV_PROC_OPS_WRITE = nv_procfs_write_##name, \
|
||||||
.NV_PROC_OPS_LSEEK = seq_lseek, \
|
.NV_PROC_OPS_LSEEK = seq_lseek, \
|
||||||
.NV_PROC_OPS_RELEASE = nv_procfs_release_##name, \
|
.NV_PROC_OPS_RELEASE = nv_procfs_release_##name, \
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -25,8 +25,10 @@
|
|||||||
#define _NV_PROTO_H_
|
#define _NV_PROTO_H_
|
||||||
|
|
||||||
#include "nv-pci.h"
|
#include "nv-pci.h"
|
||||||
|
#include "nv-register-module.h"
|
||||||
|
|
||||||
extern const char *nv_device_name;
|
extern const char *nv_device_name;
|
||||||
|
extern nvidia_module_t nv_fops;
|
||||||
|
|
||||||
void nv_acpi_register_notifier (nv_linux_state_t *);
|
void nv_acpi_register_notifier (nv_linux_state_t *);
|
||||||
void nv_acpi_unregister_notifier (nv_linux_state_t *);
|
void nv_acpi_unregister_notifier (nv_linux_state_t *);
|
||||||
@@ -59,8 +61,6 @@ NV_STATUS nv_uvm_resume (void);
|
|||||||
void nv_uvm_notify_start_device (const NvU8 *uuid);
|
void nv_uvm_notify_start_device (const NvU8 *uuid);
|
||||||
void nv_uvm_notify_stop_device (const NvU8 *uuid);
|
void nv_uvm_notify_stop_device (const NvU8 *uuid);
|
||||||
NV_STATUS nv_uvm_event_interrupt (const NvU8 *uuid);
|
NV_STATUS nv_uvm_event_interrupt (const NvU8 *uuid);
|
||||||
NV_STATUS nv_uvm_drain_P2P (const NvU8 *uuid);
|
|
||||||
NV_STATUS nv_uvm_resume_P2P (const NvU8 *uuid);
|
|
||||||
|
|
||||||
/* Move these to nv.h once implemented by other UNIX platforms */
|
/* Move these to nv.h once implemented by other UNIX platforms */
|
||||||
NvBool nvidia_get_gpuid_list (NvU32 *gpu_ids, NvU32 *gpu_count);
|
NvBool nvidia_get_gpuid_list (NvU32 *gpu_ids, NvU32 *gpu_count);
|
||||||
@@ -86,11 +86,8 @@ void nv_shutdown_adapter(nvidia_stack_t *, nv_state_t *, nv_linux_state
|
|||||||
void nv_dev_free_stacks(nv_linux_state_t *);
|
void nv_dev_free_stacks(nv_linux_state_t *);
|
||||||
NvBool nv_lock_init_locks(nvidia_stack_t *, nv_state_t *);
|
NvBool nv_lock_init_locks(nvidia_stack_t *, nv_state_t *);
|
||||||
void nv_lock_destroy_locks(nvidia_stack_t *, nv_state_t *);
|
void nv_lock_destroy_locks(nvidia_stack_t *, nv_state_t *);
|
||||||
int nv_linux_add_device_locked(nv_linux_state_t *);
|
void nv_linux_add_device_locked(nv_linux_state_t *);
|
||||||
void nv_linux_remove_device_locked(nv_linux_state_t *);
|
void nv_linux_remove_device_locked(nv_linux_state_t *);
|
||||||
NvBool nv_acpi_power_resource_method_present(struct pci_dev *);
|
NvBool nv_acpi_power_resource_method_present(struct pci_dev *);
|
||||||
|
|
||||||
int nv_linux_init_open_q(nv_linux_state_t *);
|
|
||||||
void nv_linux_stop_open_q(nv_linux_state_t *);
|
|
||||||
|
|
||||||
#endif /* _NV_PROTO_H_ */
|
#endif /* _NV_PROTO_H_ */
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -21,31 +21,35 @@
|
|||||||
* DEALINGS IN THE SOFTWARE.
|
* DEALINGS IN THE SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*!
|
|
||||||
* @file rpcga102.c
|
#ifndef _NV_REGISTER_MODULE_H_
|
||||||
* @brief Ampere HAL routines
|
#define _NV_REGISTER_MODULE_H_
|
||||||
*/
|
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/fs.h>
|
||||||
|
#include <linux/poll.h>
|
||||||
|
|
||||||
#include "nvtypes.h"
|
#include "nvtypes.h"
|
||||||
#include "nv_sriov_defines.h"
|
|
||||||
#include "gpu/gpu.h"
|
|
||||||
#include "gpu/gpu_access.h"
|
|
||||||
#include "published/ampere/ga102/dev_vm.h"
|
|
||||||
|
|
||||||
void rpcVgpuGspWriteScratchRegister_GA102(OBJGPU *pGpu, NvU64 scratchRegVal)
|
typedef struct nvidia_module_s {
|
||||||
{
|
struct module *owner;
|
||||||
// Write the scratch register
|
|
||||||
GPU_VREG_WR32(pGpu,
|
|
||||||
NV_VIRTUAL_FUNCTION_PRIV_MAILBOX_SCRATCH(NV_VF_SCRATCH_REGISTER_GUEST_RPC_HI),
|
|
||||||
NvU64_HI32(scratchRegVal));
|
|
||||||
GPU_VREG_WR32(pGpu,
|
|
||||||
NV_VIRTUAL_FUNCTION_PRIV_MAILBOX_SCRATCH(NV_VF_SCRATCH_REGISTER_GUEST_RPC_LO),
|
|
||||||
NvU64_LO32(scratchRegVal));
|
|
||||||
}
|
|
||||||
|
|
||||||
void rpcVgpuGspRingDoorbell_GA102(OBJGPU *pGpu, NvU32 doorbellToken)
|
/* nvidia0, nvidia1 ..*/
|
||||||
{
|
const char *module_name;
|
||||||
// Ring the setup doorbell to send the request
|
|
||||||
GPU_VREG_WR32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_DOORBELL, doorbellToken);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
/* module instance */
|
||||||
|
NvU32 instance;
|
||||||
|
|
||||||
|
/* file operations */
|
||||||
|
int (*open)(struct inode *, struct file *filp);
|
||||||
|
int (*close)(struct inode *, struct file *filp);
|
||||||
|
int (*mmap)(struct file *filp, struct vm_area_struct *vma);
|
||||||
|
int (*ioctl)(struct inode *, struct file * file, unsigned int cmd, unsigned long arg);
|
||||||
|
unsigned int (*poll)(struct file * file, poll_table *wait);
|
||||||
|
|
||||||
|
} nvidia_module_t;
|
||||||
|
|
||||||
|
int nvidia_register_module(nvidia_module_t *);
|
||||||
|
int nvidia_unregister_module(nvidia_module_t *);
|
||||||
|
|
||||||
|
#endif
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -42,9 +42,7 @@
|
|||||||
#include <nv-caps.h>
|
#include <nv-caps.h>
|
||||||
#include <nv-firmware.h>
|
#include <nv-firmware.h>
|
||||||
#include <nv-ioctl.h>
|
#include <nv-ioctl.h>
|
||||||
#include <nv-ioctl-numa.h>
|
|
||||||
#include <nvmisc.h>
|
#include <nvmisc.h>
|
||||||
#include <os/nv_memory_area.h>
|
|
||||||
|
|
||||||
extern nv_cap_t *nvidia_caps_root;
|
extern nv_cap_t *nvidia_caps_root;
|
||||||
|
|
||||||
@@ -52,6 +50,9 @@ extern const NvBool nv_is_rm_firmware_supported_os;
|
|||||||
|
|
||||||
#include <nv-kernel-interface-api.h>
|
#include <nv-kernel-interface-api.h>
|
||||||
|
|
||||||
|
/* NVIDIA's reserved major character device number (Linux). */
|
||||||
|
#define NV_MAJOR_DEVICE_NUMBER 195
|
||||||
|
|
||||||
#define GPU_UUID_LEN (16)
|
#define GPU_UUID_LEN (16)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -111,15 +112,15 @@ typedef enum _TEGRASOC_WHICH_CLK
|
|||||||
TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTPN,
|
TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTPN,
|
||||||
TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTA,
|
TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTA,
|
||||||
TEGRASOC_WHICH_CLK_SPPLL0_VCO,
|
TEGRASOC_WHICH_CLK_SPPLL0_VCO,
|
||||||
|
TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN,
|
||||||
TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTA,
|
TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTA,
|
||||||
TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTB,
|
TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTB,
|
||||||
TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN,
|
|
||||||
TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN,
|
|
||||||
TEGRASOC_WHICH_CLK_SPPLL0_DIV27,
|
|
||||||
TEGRASOC_WHICH_CLK_SPPLL1_DIV27,
|
|
||||||
TEGRASOC_WHICH_CLK_SPPLL0_DIV10,
|
TEGRASOC_WHICH_CLK_SPPLL0_DIV10,
|
||||||
TEGRASOC_WHICH_CLK_SPPLL0_DIV25,
|
TEGRASOC_WHICH_CLK_SPPLL0_DIV25,
|
||||||
|
TEGRASOC_WHICH_CLK_SPPLL0_DIV27,
|
||||||
TEGRASOC_WHICH_CLK_SPPLL1_VCO,
|
TEGRASOC_WHICH_CLK_SPPLL1_VCO,
|
||||||
|
TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN,
|
||||||
|
TEGRASOC_WHICH_CLK_SPPLL1_DIV27,
|
||||||
TEGRASOC_WHICH_CLK_VPLL0_REF,
|
TEGRASOC_WHICH_CLK_VPLL0_REF,
|
||||||
TEGRASOC_WHICH_CLK_VPLL0,
|
TEGRASOC_WHICH_CLK_VPLL0,
|
||||||
TEGRASOC_WHICH_CLK_VPLL1,
|
TEGRASOC_WHICH_CLK_VPLL1,
|
||||||
@@ -133,7 +134,7 @@ typedef enum _TEGRASOC_WHICH_CLK
|
|||||||
TEGRASOC_WHICH_CLK_DSI_PIXEL,
|
TEGRASOC_WHICH_CLK_DSI_PIXEL,
|
||||||
TEGRASOC_WHICH_CLK_PRE_SOR0,
|
TEGRASOC_WHICH_CLK_PRE_SOR0,
|
||||||
TEGRASOC_WHICH_CLK_PRE_SOR1,
|
TEGRASOC_WHICH_CLK_PRE_SOR1,
|
||||||
TEGRASOC_WHICH_CLK_DP_LINKA_REF,
|
TEGRASOC_WHICH_CLK_DP_LINK_REF,
|
||||||
TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT,
|
TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT,
|
||||||
TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO,
|
TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO,
|
||||||
TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M,
|
TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M,
|
||||||
@@ -144,7 +145,7 @@ typedef enum _TEGRASOC_WHICH_CLK
|
|||||||
TEGRASOC_WHICH_CLK_PLLHUB,
|
TEGRASOC_WHICH_CLK_PLLHUB,
|
||||||
TEGRASOC_WHICH_CLK_SOR0,
|
TEGRASOC_WHICH_CLK_SOR0,
|
||||||
TEGRASOC_WHICH_CLK_SOR1,
|
TEGRASOC_WHICH_CLK_SOR1,
|
||||||
TEGRASOC_WHICH_CLK_SOR_PADA_INPUT,
|
TEGRASOC_WHICH_CLK_SOR_PAD_INPUT,
|
||||||
TEGRASOC_WHICH_CLK_PRE_SF0,
|
TEGRASOC_WHICH_CLK_PRE_SF0,
|
||||||
TEGRASOC_WHICH_CLK_SF0,
|
TEGRASOC_WHICH_CLK_SF0,
|
||||||
TEGRASOC_WHICH_CLK_SF1,
|
TEGRASOC_WHICH_CLK_SF1,
|
||||||
@@ -222,6 +223,7 @@ typedef struct
|
|||||||
#define NV_RM_PAGE_MASK (NV_RM_PAGE_SIZE - 1)
|
#define NV_RM_PAGE_MASK (NV_RM_PAGE_SIZE - 1)
|
||||||
|
|
||||||
#define NV_RM_TO_OS_PAGE_SHIFT (os_page_shift - NV_RM_PAGE_SHIFT)
|
#define NV_RM_TO_OS_PAGE_SHIFT (os_page_shift - NV_RM_PAGE_SHIFT)
|
||||||
|
#define NV_RM_PAGES_PER_OS_PAGE (1U << NV_RM_TO_OS_PAGE_SHIFT)
|
||||||
#define NV_RM_PAGES_TO_OS_PAGES(count) \
|
#define NV_RM_PAGES_TO_OS_PAGES(count) \
|
||||||
((((NvUPtr)(count)) >> NV_RM_TO_OS_PAGE_SHIFT) + \
|
((((NvUPtr)(count)) >> NV_RM_TO_OS_PAGE_SHIFT) + \
|
||||||
((((count) & ((1 << NV_RM_TO_OS_PAGE_SHIFT) - 1)) != 0) ? 1 : 0))
|
((((count) & ((1 << NV_RM_TO_OS_PAGE_SHIFT) - 1)) != 0) ? 1 : 0))
|
||||||
@@ -280,7 +282,8 @@ typedef struct nv_usermap_access_params_s
|
|||||||
NvU64 offset;
|
NvU64 offset;
|
||||||
NvU64 *page_array;
|
NvU64 *page_array;
|
||||||
NvU64 num_pages;
|
NvU64 num_pages;
|
||||||
MemoryArea memArea;
|
NvU64 mmap_start;
|
||||||
|
NvU64 mmap_size;
|
||||||
NvU64 access_start;
|
NvU64 access_start;
|
||||||
NvU64 access_size;
|
NvU64 access_size;
|
||||||
NvU64 remap_prot_extra;
|
NvU64 remap_prot_extra;
|
||||||
@@ -296,7 +299,8 @@ typedef struct nv_alloc_mapping_context_s {
|
|||||||
NvU64 page_index;
|
NvU64 page_index;
|
||||||
NvU64 *page_array;
|
NvU64 *page_array;
|
||||||
NvU64 num_pages;
|
NvU64 num_pages;
|
||||||
MemoryArea memArea;
|
NvU64 mmap_start;
|
||||||
|
NvU64 mmap_size;
|
||||||
NvU64 access_start;
|
NvU64 access_start;
|
||||||
NvU64 access_size;
|
NvU64 access_size;
|
||||||
NvU64 remap_prot_extra;
|
NvU64 remap_prot_extra;
|
||||||
@@ -329,11 +333,9 @@ typedef struct nv_soc_irq_info_s {
|
|||||||
NvS32 ref_count;
|
NvS32 ref_count;
|
||||||
} nv_soc_irq_info_t;
|
} nv_soc_irq_info_t;
|
||||||
|
|
||||||
#define NV_MAX_SOC_IRQS 10
|
#define NV_MAX_SOC_IRQS 6
|
||||||
#define NV_MAX_DPAUX_NUM_DEVICES 4
|
#define NV_MAX_DPAUX_NUM_DEVICES 4
|
||||||
|
#define NV_MAX_SOC_DPAUX_NUM_DEVICES 2 // From SOC_DEV_MAPPING
|
||||||
#define NV_MAX_SOC_DPAUX_NUM_DEVICES 2
|
|
||||||
|
|
||||||
|
|
||||||
#define NV_IGPU_LEGACY_STALL_IRQ 70
|
#define NV_IGPU_LEGACY_STALL_IRQ 70
|
||||||
#define NV_IGPU_MAX_STALL_IRQS 3
|
#define NV_IGPU_MAX_STALL_IRQS 3
|
||||||
@@ -368,8 +370,6 @@ typedef struct nv_state_t
|
|||||||
{
|
{
|
||||||
NvBool valid;
|
NvBool valid;
|
||||||
NvU8 uuid[GPU_UUID_LEN];
|
NvU8 uuid[GPU_UUID_LEN];
|
||||||
NvBool pci_uuid_read_attempted;
|
|
||||||
NV_STATUS pci_uuid_status;
|
|
||||||
} nv_uuid_cache;
|
} nv_uuid_cache;
|
||||||
void *handle;
|
void *handle;
|
||||||
|
|
||||||
@@ -469,9 +469,17 @@ typedef struct nv_state_t
|
|||||||
NvHandle hDisp;
|
NvHandle hDisp;
|
||||||
} rmapi;
|
} rmapi;
|
||||||
|
|
||||||
|
/* Bool to check if ISO iommu enabled */
|
||||||
|
NvBool iso_iommu_present;
|
||||||
|
|
||||||
|
/* Bool to check if NISO iommu enabled */
|
||||||
|
NvBool niso_iommu_present;
|
||||||
|
|
||||||
/* Bool to check if dma-buf is supported */
|
/* Bool to check if dma-buf is supported */
|
||||||
NvBool dma_buf_supported;
|
NvBool dma_buf_supported;
|
||||||
|
|
||||||
|
NvBool printed_openrm_enable_unsupported_gpus_error;
|
||||||
|
|
||||||
/* Check if NVPCF DSM function is implemented under NVPCF or GPU device scope */
|
/* Check if NVPCF DSM function is implemented under NVPCF or GPU device scope */
|
||||||
NvBool nvpcf_dsm_in_gpu_scope;
|
NvBool nvpcf_dsm_in_gpu_scope;
|
||||||
|
|
||||||
@@ -480,29 +488,14 @@ typedef struct nv_state_t
|
|||||||
|
|
||||||
/* Bool to check if the GPU has a coherent sysmem link */
|
/* Bool to check if the GPU has a coherent sysmem link */
|
||||||
NvBool coherent;
|
NvBool coherent;
|
||||||
|
|
||||||
/* OS detected GPU has ATS capability */
|
|
||||||
NvBool ats_support;
|
|
||||||
/*
|
|
||||||
* NUMA node ID of the CPU to which the GPU is attached.
|
|
||||||
* Holds NUMA_NO_NODE on platforms that don't support NUMA configuration.
|
|
||||||
*/
|
|
||||||
NvS32 cpu_numa_node_id;
|
|
||||||
|
|
||||||
struct {
|
|
||||||
/* Bool to check if ISO iommu enabled */
|
|
||||||
NvBool iso_iommu_present;
|
|
||||||
/* Bool to check if NISO iommu enabled */
|
|
||||||
NvBool niso_iommu_present;
|
|
||||||
/* Display SMMU Stream IDs */
|
|
||||||
NvU32 dispIsoStreamId;
|
|
||||||
NvU32 dispNisoStreamId;
|
|
||||||
} iommus;
|
|
||||||
|
|
||||||
/* Console is managed by drm drivers or NVKMS */
|
|
||||||
NvBool client_managed_console;
|
|
||||||
} nv_state_t;
|
} nv_state_t;
|
||||||
|
|
||||||
|
// These define need to be in sync with defines in system.h
|
||||||
|
#define OS_TYPE_LINUX 0x1
|
||||||
|
#define OS_TYPE_FREEBSD 0x2
|
||||||
|
#define OS_TYPE_SUNOS 0x3
|
||||||
|
#define OS_TYPE_VMWARE 0x4
|
||||||
|
|
||||||
#define NVFP_TYPE_NONE 0x0
|
#define NVFP_TYPE_NONE 0x0
|
||||||
#define NVFP_TYPE_REFCOUNTED 0x1
|
#define NVFP_TYPE_REFCOUNTED 0x1
|
||||||
#define NVFP_TYPE_REGISTERED 0x2
|
#define NVFP_TYPE_REGISTERED 0x2
|
||||||
@@ -512,7 +505,6 @@ struct nv_file_private_t
|
|||||||
NvHandle *handles;
|
NvHandle *handles;
|
||||||
NvU16 maxHandles;
|
NvU16 maxHandles;
|
||||||
NvU32 deviceInstance;
|
NvU32 deviceInstance;
|
||||||
NvU32 gpuInstanceId;
|
|
||||||
NvU8 metadata[64];
|
NvU8 metadata[64];
|
||||||
|
|
||||||
nv_file_private_t *ctl_nvfp;
|
nv_file_private_t *ctl_nvfp;
|
||||||
@@ -541,18 +533,16 @@ typedef struct UvmGpuAddressSpaceInfo_tag *nvgpuAddressSpaceInfo_t;
|
|||||||
typedef struct UvmGpuAllocInfo_tag *nvgpuAllocInfo_t;
|
typedef struct UvmGpuAllocInfo_tag *nvgpuAllocInfo_t;
|
||||||
typedef struct UvmGpuP2PCapsParams_tag *nvgpuP2PCapsParams_t;
|
typedef struct UvmGpuP2PCapsParams_tag *nvgpuP2PCapsParams_t;
|
||||||
typedef struct UvmGpuFbInfo_tag *nvgpuFbInfo_t;
|
typedef struct UvmGpuFbInfo_tag *nvgpuFbInfo_t;
|
||||||
typedef struct UvmGpuNvlinkInfo_tag *nvgpuNvlinkInfo_t;
|
|
||||||
typedef struct UvmGpuEccInfo_tag *nvgpuEccInfo_t;
|
typedef struct UvmGpuEccInfo_tag *nvgpuEccInfo_t;
|
||||||
typedef struct UvmGpuFaultInfo_tag *nvgpuFaultInfo_t;
|
typedef struct UvmGpuFaultInfo_tag *nvgpuFaultInfo_t;
|
||||||
typedef struct UvmGpuAccessCntrInfo_tag *nvgpuAccessCntrInfo_t;
|
typedef struct UvmGpuAccessCntrInfo_tag *nvgpuAccessCntrInfo_t;
|
||||||
typedef struct UvmGpuAccessCntrConfig_tag nvgpuAccessCntrConfig_t;
|
typedef struct UvmGpuAccessCntrConfig_tag *nvgpuAccessCntrConfig_t;
|
||||||
typedef struct UvmGpuInfo_tag nvgpuInfo_t;
|
typedef struct UvmGpuInfo_tag nvgpuInfo_t;
|
||||||
typedef struct UvmGpuClientInfo_tag nvgpuClientInfo_t;
|
typedef struct UvmGpuClientInfo_tag nvgpuClientInfo_t;
|
||||||
typedef struct UvmPmaAllocationOptions_tag *nvgpuPmaAllocationOptions_t;
|
typedef struct UvmPmaAllocationOptions_tag *nvgpuPmaAllocationOptions_t;
|
||||||
typedef struct UvmPmaStatistics_tag *nvgpuPmaStatistics_t;
|
typedef struct UvmPmaStatistics_tag *nvgpuPmaStatistics_t;
|
||||||
typedef struct UvmGpuMemoryInfo_tag *nvgpuMemoryInfo_t;
|
typedef struct UvmGpuMemoryInfo_tag *nvgpuMemoryInfo_t;
|
||||||
typedef struct UvmGpuExternalMappingInfo_tag *nvgpuExternalMappingInfo_t;
|
typedef struct UvmGpuExternalMappingInfo_tag *nvgpuExternalMappingInfo_t;
|
||||||
typedef struct UvmGpuExternalPhysAddrInfo_tag *nvgpuExternalPhysAddrInfo_t;
|
|
||||||
typedef struct UvmGpuChannelResourceInfo_tag *nvgpuChannelResourceInfo_t;
|
typedef struct UvmGpuChannelResourceInfo_tag *nvgpuChannelResourceInfo_t;
|
||||||
typedef struct UvmGpuChannelInstanceInfo_tag *nvgpuChannelInstanceInfo_t;
|
typedef struct UvmGpuChannelInstanceInfo_tag *nvgpuChannelInstanceInfo_t;
|
||||||
typedef struct UvmGpuChannelResourceBindParams_tag *nvgpuChannelResourceBindParams_t;
|
typedef struct UvmGpuChannelResourceBindParams_tag *nvgpuChannelResourceBindParams_t;
|
||||||
@@ -577,8 +567,7 @@ typedef NV_STATUS (*nvPmaEvictRangeCallback)(void *, NvU64, NvU64, nvgpuGpuMemor
|
|||||||
#define NV_FLAG_PASSTHRU 0x0080
|
#define NV_FLAG_PASSTHRU 0x0080
|
||||||
#define NV_FLAG_SUSPENDED 0x0100
|
#define NV_FLAG_SUSPENDED 0x0100
|
||||||
#define NV_FLAG_SOC_IGPU 0x0200
|
#define NV_FLAG_SOC_IGPU 0x0200
|
||||||
/* To be set when an FLR needs to be triggered after device shut down. */
|
// Unused 0x0400
|
||||||
#define NV_FLAG_TRIGGER_FLR 0x0400
|
|
||||||
#define NV_FLAG_PERSISTENT_SW_STATE 0x0800
|
#define NV_FLAG_PERSISTENT_SW_STATE 0x0800
|
||||||
#define NV_FLAG_IN_RECOVERY 0x1000
|
#define NV_FLAG_IN_RECOVERY 0x1000
|
||||||
// Unused 0x2000
|
// Unused 0x2000
|
||||||
@@ -614,16 +603,6 @@ typedef enum
|
|||||||
NV_POWER_STATE_RUNNING
|
NV_POWER_STATE_RUNNING
|
||||||
} nv_power_state_t;
|
} nv_power_state_t;
|
||||||
|
|
||||||
typedef struct
|
|
||||||
{
|
|
||||||
const char *vidmem_power_status;
|
|
||||||
const char *dynamic_power_status;
|
|
||||||
const char *gc6_support;
|
|
||||||
const char *gcoff_support;
|
|
||||||
const char *s0ix_status;
|
|
||||||
const char *db_support;
|
|
||||||
} nv_power_info_t;
|
|
||||||
|
|
||||||
#define NV_PRIMARY_VGA(nv) ((nv)->primary_vga)
|
#define NV_PRIMARY_VGA(nv) ((nv)->primary_vga)
|
||||||
|
|
||||||
#define NV_IS_CTL_DEVICE(nv) ((nv)->flags & NV_FLAG_CONTROL)
|
#define NV_IS_CTL_DEVICE(nv) ((nv)->flags & NV_FLAG_CONTROL)
|
||||||
@@ -645,10 +624,10 @@ typedef struct
|
|||||||
((addr) == ((nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000)))
|
((addr) == ((nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000)))
|
||||||
|
|
||||||
#define NV_SOC_IS_ISO_IOMMU_PRESENT(nv) \
|
#define NV_SOC_IS_ISO_IOMMU_PRESENT(nv) \
|
||||||
((nv)->iommus.iso_iommu_present)
|
((nv)->iso_iommu_present)
|
||||||
|
|
||||||
#define NV_SOC_IS_NISO_IOMMU_PRESENT(nv) \
|
#define NV_SOC_IS_NISO_IOMMU_PRESENT(nv) \
|
||||||
((nv)->iommus.niso_iommu_present)
|
((nv)->niso_iommu_present)
|
||||||
/*
|
/*
|
||||||
* GPU add/remove events
|
* GPU add/remove events
|
||||||
*/
|
*/
|
||||||
@@ -767,7 +746,6 @@ static inline NvBool IS_IMEM_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
|
|||||||
#define NV_ALIGN_DOWN(v,g) ((v) & ~((g) - 1))
|
#define NV_ALIGN_DOWN(v,g) ((v) & ~((g) - 1))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* driver internal interfaces
|
* driver internal interfaces
|
||||||
*/
|
*/
|
||||||
@@ -794,8 +772,8 @@ nv_state_t* NV_API_CALL nv_get_ctl_state (void);
|
|||||||
|
|
||||||
void NV_API_CALL nv_set_dma_address_size (nv_state_t *, NvU32 );
|
void NV_API_CALL nv_set_dma_address_size (nv_state_t *, NvU32 );
|
||||||
|
|
||||||
NV_STATUS NV_API_CALL nv_alias_pages (nv_state_t *, NvU32, NvU64, NvU32, NvU32, NvU64, NvU64 *, NvBool, void **);
|
NV_STATUS NV_API_CALL nv_alias_pages (nv_state_t *, NvU32, NvU32, NvU32, NvU64, NvU64 *, void **);
|
||||||
NV_STATUS NV_API_CALL nv_alloc_pages (nv_state_t *, NvU32, NvU64, NvBool, NvU32, NvBool, NvBool, NvS32, NvU64 *, void **);
|
NV_STATUS NV_API_CALL nv_alloc_pages (nv_state_t *, NvU32, NvBool, NvU32, NvBool, NvBool, NvS32, NvU64 *, void **);
|
||||||
NV_STATUS NV_API_CALL nv_free_pages (nv_state_t *, NvU32, NvBool, NvU32, void *);
|
NV_STATUS NV_API_CALL nv_free_pages (nv_state_t *, NvU32, NvBool, NvU32, void *);
|
||||||
|
|
||||||
NV_STATUS NV_API_CALL nv_register_user_pages (nv_state_t *, NvU64, NvU64 *, void *, void **);
|
NV_STATUS NV_API_CALL nv_register_user_pages (nv_state_t *, NvU64, NvU64 *, void *, void **);
|
||||||
@@ -812,6 +790,8 @@ NV_STATUS NV_API_CALL nv_register_phys_pages (nv_state_t *, NvU64 *, NvU64,
|
|||||||
void NV_API_CALL nv_unregister_phys_pages (nv_state_t *, void *);
|
void NV_API_CALL nv_unregister_phys_pages (nv_state_t *, void *);
|
||||||
|
|
||||||
NV_STATUS NV_API_CALL nv_dma_map_sgt (nv_dma_device_t *, NvU64, NvU64 *, NvU32, void **);
|
NV_STATUS NV_API_CALL nv_dma_map_sgt (nv_dma_device_t *, NvU64, NvU64 *, NvU32, void **);
|
||||||
|
NV_STATUS NV_API_CALL nv_dma_map_pages (nv_dma_device_t *, NvU64, NvU64 *, NvBool, NvU32, void **);
|
||||||
|
NV_STATUS NV_API_CALL nv_dma_unmap_pages (nv_dma_device_t *, NvU64, NvU64 *, void **);
|
||||||
|
|
||||||
NV_STATUS NV_API_CALL nv_dma_map_alloc (nv_dma_device_t *, NvU64, NvU64 *, NvBool, void **);
|
NV_STATUS NV_API_CALL nv_dma_map_alloc (nv_dma_device_t *, NvU64, NvU64 *, NvBool, void **);
|
||||||
NV_STATUS NV_API_CALL nv_dma_unmap_alloc (nv_dma_device_t *, NvU64, NvU64 *, void **);
|
NV_STATUS NV_API_CALL nv_dma_unmap_alloc (nv_dma_device_t *, NvU64, NvU64 *, void **);
|
||||||
@@ -823,6 +803,7 @@ NV_STATUS NV_API_CALL nv_dma_map_mmio (nv_dma_device_t *, NvU64, NvU6
|
|||||||
void NV_API_CALL nv_dma_unmap_mmio (nv_dma_device_t *, NvU64, NvU64);
|
void NV_API_CALL nv_dma_unmap_mmio (nv_dma_device_t *, NvU64, NvU64);
|
||||||
|
|
||||||
void NV_API_CALL nv_dma_cache_invalidate (nv_dma_device_t *, void *);
|
void NV_API_CALL nv_dma_cache_invalidate (nv_dma_device_t *, void *);
|
||||||
|
void NV_API_CALL nv_dma_enable_nvlink (nv_dma_device_t *);
|
||||||
|
|
||||||
NvS32 NV_API_CALL nv_start_rc_timer (nv_state_t *);
|
NvS32 NV_API_CALL nv_start_rc_timer (nv_state_t *);
|
||||||
NvS32 NV_API_CALL nv_stop_rc_timer (nv_state_t *);
|
NvS32 NV_API_CALL nv_stop_rc_timer (nv_state_t *);
|
||||||
@@ -837,7 +818,6 @@ void NV_API_CALL nv_acpi_methods_init (NvU32 *);
|
|||||||
void NV_API_CALL nv_acpi_methods_uninit (void);
|
void NV_API_CALL nv_acpi_methods_uninit (void);
|
||||||
|
|
||||||
NV_STATUS NV_API_CALL nv_acpi_method (NvU32, NvU32, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *);
|
NV_STATUS NV_API_CALL nv_acpi_method (NvU32, NvU32, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *);
|
||||||
NV_STATUS NV_API_CALL nv_acpi_d3cold_dsm_for_upstream_port (nv_state_t *, NvU8 *, NvU32, NvU32, NvU32 *);
|
|
||||||
NV_STATUS NV_API_CALL nv_acpi_dsm_method (nv_state_t *, NvU8 *, NvU32, NvBool, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *);
|
NV_STATUS NV_API_CALL nv_acpi_dsm_method (nv_state_t *, NvU8 *, NvU32, NvBool, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *);
|
||||||
NV_STATUS NV_API_CALL nv_acpi_ddc_method (nv_state_t *, void *, NvU32 *, NvBool);
|
NV_STATUS NV_API_CALL nv_acpi_ddc_method (nv_state_t *, void *, NvU32 *, NvBool);
|
||||||
NV_STATUS NV_API_CALL nv_acpi_dod_method (nv_state_t *, NvU32 *, NvU32 *);
|
NV_STATUS NV_API_CALL nv_acpi_dod_method (nv_state_t *, NvU32 *, NvU32 *);
|
||||||
@@ -849,7 +829,9 @@ NV_STATUS NV_API_CALL nv_acpi_mux_method (nv_state_t *, NvU32 *, NvU32,
|
|||||||
|
|
||||||
NV_STATUS NV_API_CALL nv_log_error (nv_state_t *, NvU32, const char *, va_list);
|
NV_STATUS NV_API_CALL nv_log_error (nv_state_t *, NvU32, const char *, va_list);
|
||||||
|
|
||||||
|
NvU64 NV_API_CALL nv_get_dma_start_address (nv_state_t *);
|
||||||
NV_STATUS NV_API_CALL nv_set_primary_vga_status(nv_state_t *);
|
NV_STATUS NV_API_CALL nv_set_primary_vga_status(nv_state_t *);
|
||||||
|
NV_STATUS NV_API_CALL nv_pci_trigger_recovery (nv_state_t *);
|
||||||
NvBool NV_API_CALL nv_requires_dma_remap (nv_state_t *);
|
NvBool NV_API_CALL nv_requires_dma_remap (nv_state_t *);
|
||||||
|
|
||||||
NvBool NV_API_CALL nv_is_rm_firmware_active(nv_state_t *);
|
NvBool NV_API_CALL nv_is_rm_firmware_active(nv_state_t *);
|
||||||
@@ -859,11 +841,22 @@ void NV_API_CALL nv_put_firmware(const void *);
|
|||||||
nv_file_private_t* NV_API_CALL nv_get_file_private(NvS32, NvBool, void **);
|
nv_file_private_t* NV_API_CALL nv_get_file_private(NvS32, NvBool, void **);
|
||||||
void NV_API_CALL nv_put_file_private(void *);
|
void NV_API_CALL nv_put_file_private(void *);
|
||||||
|
|
||||||
NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU64 *, NvU32 *, NvS32 *);
|
NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU32 *, NvS32 *);
|
||||||
NV_STATUS NV_API_CALL nv_get_egm_info(nv_state_t *, NvU64 *, NvU64 *, NvS32 *);
|
NV_STATUS NV_API_CALL nv_get_egm_info(nv_state_t *, NvU64 *, NvU64 *, NvS32 *);
|
||||||
|
|
||||||
|
NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *, NvU64 *, NvU64 *, void**);
|
||||||
|
NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv, NvBool *mode);
|
||||||
|
|
||||||
|
void NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv);
|
||||||
|
|
||||||
|
void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64, NvU64);
|
||||||
|
|
||||||
void NV_API_CALL nv_p2p_free_platform_data(void *data);
|
void NV_API_CALL nv_p2p_free_platform_data(void *data);
|
||||||
|
|
||||||
|
#if defined(NVCPU_PPC64LE)
|
||||||
|
NV_STATUS NV_API_CALL nv_get_nvlink_line_rate (nv_state_t *, NvU32 *);
|
||||||
|
#endif
|
||||||
|
|
||||||
NV_STATUS NV_API_CALL nv_revoke_gpu_mappings (nv_state_t *);
|
NV_STATUS NV_API_CALL nv_revoke_gpu_mappings (nv_state_t *);
|
||||||
void NV_API_CALL nv_acquire_mmap_lock (nv_state_t *);
|
void NV_API_CALL nv_acquire_mmap_lock (nv_state_t *);
|
||||||
void NV_API_CALL nv_release_mmap_lock (nv_state_t *);
|
void NV_API_CALL nv_release_mmap_lock (nv_state_t *);
|
||||||
@@ -886,8 +879,10 @@ void NV_API_CALL nv_cap_drv_exit(void);
|
|||||||
NvBool NV_API_CALL nv_is_gpu_accessible(nv_state_t *);
|
NvBool NV_API_CALL nv_is_gpu_accessible(nv_state_t *);
|
||||||
NvBool NV_API_CALL nv_match_gpu_os_info(nv_state_t *, void *);
|
NvBool NV_API_CALL nv_match_gpu_os_info(nv_state_t *, void *);
|
||||||
|
|
||||||
|
NvU32 NV_API_CALL nv_get_os_type(void);
|
||||||
|
|
||||||
void NV_API_CALL nv_get_updated_emu_seg(NvU32 *start, NvU32 *end);
|
void NV_API_CALL nv_get_updated_emu_seg(NvU32 *start, NvU32 *end);
|
||||||
void NV_API_CALL nv_get_screen_info(nv_state_t *, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU32 *, NvU64 *);
|
void NV_API_CALL nv_get_screen_info(nv_state_t *, NvU64 *, NvU16 *, NvU16 *, NvU16 *, NvU16 *, NvU64 *);
|
||||||
|
|
||||||
struct dma_buf;
|
struct dma_buf;
|
||||||
typedef struct nv_dma_buf nv_dma_buf_t;
|
typedef struct nv_dma_buf nv_dma_buf_t;
|
||||||
@@ -895,15 +890,12 @@ struct drm_gem_object;
|
|||||||
|
|
||||||
NV_STATUS NV_API_CALL nv_dma_import_sgt (nv_dma_device_t *, struct sg_table *, struct drm_gem_object *);
|
NV_STATUS NV_API_CALL nv_dma_import_sgt (nv_dma_device_t *, struct sg_table *, struct drm_gem_object *);
|
||||||
void NV_API_CALL nv_dma_release_sgt(struct sg_table *, struct drm_gem_object *);
|
void NV_API_CALL nv_dma_release_sgt(struct sg_table *, struct drm_gem_object *);
|
||||||
NV_STATUS NV_API_CALL nv_dma_import_dma_buf (nv_dma_device_t *, struct dma_buf *, NvU32 *, struct sg_table **, nv_dma_buf_t **);
|
NV_STATUS NV_API_CALL nv_dma_import_dma_buf (nv_dma_device_t *, struct dma_buf *, NvU32 *, void **, struct sg_table **, nv_dma_buf_t **);
|
||||||
NV_STATUS NV_API_CALL nv_dma_import_from_fd (nv_dma_device_t *, NvS32, NvU32 *, struct sg_table **, nv_dma_buf_t **);
|
NV_STATUS NV_API_CALL nv_dma_import_from_fd (nv_dma_device_t *, NvS32, NvU32 *, void **, struct sg_table **, nv_dma_buf_t **);
|
||||||
void NV_API_CALL nv_dma_release_dma_buf (nv_dma_buf_t *);
|
void NV_API_CALL nv_dma_release_dma_buf (void *, nv_dma_buf_t *);
|
||||||
|
|
||||||
void NV_API_CALL nv_schedule_uvm_isr (nv_state_t *);
|
void NV_API_CALL nv_schedule_uvm_isr (nv_state_t *);
|
||||||
|
|
||||||
NV_STATUS NV_API_CALL nv_schedule_uvm_drain_p2p (NvU8 *);
|
|
||||||
void NV_API_CALL nv_schedule_uvm_resume_p2p (NvU8 *);
|
|
||||||
|
|
||||||
NvBool NV_API_CALL nv_platform_supports_s0ix (void);
|
NvBool NV_API_CALL nv_platform_supports_s0ix (void);
|
||||||
NvBool NV_API_CALL nv_s2idle_pm_configured (void);
|
NvBool NV_API_CALL nv_s2idle_pm_configured (void);
|
||||||
|
|
||||||
@@ -916,8 +908,6 @@ typedef void (*nvTegraDceClientIpcCallback)(NvU32, NvU32, NvU32, void *, void *)
|
|||||||
NV_STATUS NV_API_CALL nv_get_num_phys_pages (void *, NvU32 *);
|
NV_STATUS NV_API_CALL nv_get_num_phys_pages (void *, NvU32 *);
|
||||||
NV_STATUS NV_API_CALL nv_get_phys_pages (void *, void *, NvU32 *);
|
NV_STATUS NV_API_CALL nv_get_phys_pages (void *, void *, NvU32 *);
|
||||||
|
|
||||||
void NV_API_CALL nv_get_disp_smmu_stream_ids (nv_state_t *, NvU32 *, NvU32 *);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ---------------------------------------------------------------------------
|
* ---------------------------------------------------------------------------
|
||||||
*
|
*
|
||||||
@@ -964,7 +954,6 @@ void NV_API_CALL rm_parse_option_string (nvidia_stack_t *, const char *
|
|||||||
char* NV_API_CALL rm_remove_spaces (const char *);
|
char* NV_API_CALL rm_remove_spaces (const char *);
|
||||||
char* NV_API_CALL rm_string_token (char **, const char);
|
char* NV_API_CALL rm_string_token (char **, const char);
|
||||||
void NV_API_CALL rm_vgpu_vfio_set_driver_vm(nvidia_stack_t *, NvBool);
|
void NV_API_CALL rm_vgpu_vfio_set_driver_vm(nvidia_stack_t *, NvBool);
|
||||||
NV_STATUS NV_API_CALL rm_get_adapter_status_external(nvidia_stack_t *, nv_state_t *);
|
|
||||||
|
|
||||||
NV_STATUS NV_API_CALL rm_run_rc_callback (nvidia_stack_t *, nv_state_t *);
|
NV_STATUS NV_API_CALL rm_run_rc_callback (nvidia_stack_t *, nv_state_t *);
|
||||||
void NV_API_CALL rm_execute_work_item (nvidia_stack_t *, void *);
|
void NV_API_CALL rm_execute_work_item (nvidia_stack_t *, void *);
|
||||||
@@ -994,24 +983,18 @@ NV_STATUS NV_API_CALL rm_p2p_init_mapping (nvidia_stack_t *, NvU64, NvU6
|
|||||||
NV_STATUS NV_API_CALL rm_p2p_destroy_mapping (nvidia_stack_t *, NvU64);
|
NV_STATUS NV_API_CALL rm_p2p_destroy_mapping (nvidia_stack_t *, NvU64);
|
||||||
NV_STATUS NV_API_CALL rm_p2p_get_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU8 **, void *);
|
NV_STATUS NV_API_CALL rm_p2p_get_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU8 **, void *);
|
||||||
NV_STATUS NV_API_CALL rm_p2p_get_gpu_info (nvidia_stack_t *, NvU64, NvU64, NvU8 **, void **);
|
NV_STATUS NV_API_CALL rm_p2p_get_gpu_info (nvidia_stack_t *, NvU64, NvU64, NvU8 **, void **);
|
||||||
NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent (nvidia_stack_t *, NvU64, NvU64, void **, NvU64 *, NvU32 *, NvBool, void *, void *, void **);
|
NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent (nvidia_stack_t *, NvU64, NvU64, void **, NvU64 *, NvU32 *, void *, void *);
|
||||||
NV_STATUS NV_API_CALL rm_p2p_register_callback (nvidia_stack_t *, NvU64, NvU64, NvU64, void *, void (*)(void *), void *);
|
NV_STATUS NV_API_CALL rm_p2p_register_callback (nvidia_stack_t *, NvU64, NvU64, NvU64, void *, void (*)(void *), void *);
|
||||||
NV_STATUS NV_API_CALL rm_p2p_put_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, void *);
|
NV_STATUS NV_API_CALL rm_p2p_put_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, void *);
|
||||||
NV_STATUS NV_API_CALL rm_p2p_put_pages_persistent(nvidia_stack_t *, void *, void *, void *);
|
NV_STATUS NV_API_CALL rm_p2p_put_pages_persistent(nvidia_stack_t *, void *, void *);
|
||||||
NV_STATUS NV_API_CALL rm_p2p_dma_map_pages (nvidia_stack_t *, nv_dma_device_t *, NvU8 *, NvU64, NvU32, NvU64 *, void **);
|
NV_STATUS NV_API_CALL rm_p2p_dma_map_pages (nvidia_stack_t *, nv_dma_device_t *, NvU8 *, NvU64, NvU32, NvU64 *, void **);
|
||||||
NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *, void **);
|
NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *, void **);
|
||||||
void NV_API_CALL rm_dma_buf_undup_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle);
|
void NV_API_CALL rm_dma_buf_undup_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle);
|
||||||
NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *,
|
NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, NvU64, void *, nv_phys_addr_range_t **, NvU32 *);
|
||||||
NvHandle, NvHandle, MemoryRange,
|
void NV_API_CALL rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, nv_phys_addr_range_t **, NvU32);
|
||||||
NvU8, void *, NvBool, MemoryArea *);
|
NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle *, NvHandle *, NvHandle *, void **, NvBool *);
|
||||||
void NV_API_CALL rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *,
|
|
||||||
NvHandle, NvHandle, NvU8, void *,
|
|
||||||
NvBool, MemoryArea);
|
|
||||||
NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *,
|
|
||||||
nv_state_t *, NvHandle, NvHandle,
|
|
||||||
NvU8, NvHandle *, NvHandle *,
|
|
||||||
NvHandle *, void **, NvBool *);
|
|
||||||
void NV_API_CALL rm_dma_buf_put_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, void *);
|
void NV_API_CALL rm_dma_buf_put_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, void *);
|
||||||
|
NV_STATUS NV_API_CALL rm_log_gpu_crash (nv_stack_t *, nv_state_t *);
|
||||||
|
|
||||||
void NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd);
|
void NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd);
|
||||||
NvBool NV_API_CALL rm_get_device_remove_flag(nvidia_stack_t *sp, NvU32 gpu_id);
|
NvBool NV_API_CALL rm_get_device_remove_flag(nvidia_stack_t *sp, NvU32 gpu_id);
|
||||||
@@ -1021,13 +1004,14 @@ NvBool NV_API_CALL rm_gpu_need_4k_page_isolation(nv_state_t *);
|
|||||||
NvBool NV_API_CALL rm_is_chipset_io_coherent(nv_stack_t *);
|
NvBool NV_API_CALL rm_is_chipset_io_coherent(nv_stack_t *);
|
||||||
NvBool NV_API_CALL rm_init_event_locks(nvidia_stack_t *, nv_state_t *);
|
NvBool NV_API_CALL rm_init_event_locks(nvidia_stack_t *, nv_state_t *);
|
||||||
void NV_API_CALL rm_destroy_event_locks(nvidia_stack_t *, nv_state_t *);
|
void NV_API_CALL rm_destroy_event_locks(nvidia_stack_t *, nv_state_t *);
|
||||||
NV_STATUS NV_API_CALL rm_get_gpu_numa_info(nvidia_stack_t *, nv_state_t *, nv_ioctl_numa_info_t *);
|
NV_STATUS NV_API_CALL rm_get_gpu_numa_info(nvidia_stack_t *, nv_state_t *, NvS32 *, NvU64 *, NvU64 *, NvU64 *, NvU32 *);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_numa_online(nvidia_stack_t *, nv_state_t *);
|
NV_STATUS NV_API_CALL rm_gpu_numa_online(nvidia_stack_t *, nv_state_t *);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_numa_offline(nvidia_stack_t *, nv_state_t *);
|
NV_STATUS NV_API_CALL rm_gpu_numa_offline(nvidia_stack_t *, nv_state_t *);
|
||||||
NvBool NV_API_CALL rm_is_device_sequestered(nvidia_stack_t *, nv_state_t *);
|
NvBool NV_API_CALL rm_is_device_sequestered(nvidia_stack_t *, nv_state_t *);
|
||||||
void NV_API_CALL rm_check_for_gpu_surprise_removal(nvidia_stack_t *, nv_state_t *);
|
void NV_API_CALL rm_check_for_gpu_surprise_removal(nvidia_stack_t *, nv_state_t *);
|
||||||
NV_STATUS NV_API_CALL rm_set_external_kernel_client_count(nvidia_stack_t *, nv_state_t *, NvBool);
|
NV_STATUS NV_API_CALL rm_set_external_kernel_client_count(nvidia_stack_t *, nv_state_t *, NvBool);
|
||||||
NV_STATUS NV_API_CALL rm_schedule_gpu_wakeup(nvidia_stack_t *, nv_state_t *);
|
NV_STATUS NV_API_CALL rm_schedule_gpu_wakeup(nvidia_stack_t *, nv_state_t *);
|
||||||
|
NvBool NV_API_CALL rm_is_iommu_needed_for_sriov(nvidia_stack_t *, nv_state_t *);
|
||||||
NvBool NV_API_CALL rm_disable_iomap_wc(void);
|
NvBool NV_API_CALL rm_disable_iomap_wc(void);
|
||||||
|
|
||||||
void NV_API_CALL rm_init_dynamic_power_management(nvidia_stack_t *, nv_state_t *, NvBool);
|
void NV_API_CALL rm_init_dynamic_power_management(nvidia_stack_t *, nv_state_t *, NvBool);
|
||||||
@@ -1035,8 +1019,10 @@ void NV_API_CALL rm_cleanup_dynamic_power_management(nvidia_stack_t *, nv_
|
|||||||
void NV_API_CALL rm_enable_dynamic_power_management(nvidia_stack_t *, nv_state_t *);
|
void NV_API_CALL rm_enable_dynamic_power_management(nvidia_stack_t *, nv_state_t *);
|
||||||
NV_STATUS NV_API_CALL rm_ref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t);
|
NV_STATUS NV_API_CALL rm_ref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t);
|
||||||
void NV_API_CALL rm_unref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t);
|
void NV_API_CALL rm_unref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t);
|
||||||
NV_STATUS NV_API_CALL rm_transition_dynamic_power(nvidia_stack_t *, nv_state_t *, NvBool, NvBool *);
|
NV_STATUS NV_API_CALL rm_transition_dynamic_power(nvidia_stack_t *, nv_state_t *, NvBool);
|
||||||
void NV_API_CALL rm_get_power_info(nvidia_stack_t *, nv_state_t *, nv_power_info_t *);
|
const char* NV_API_CALL rm_get_vidmem_power_status(nvidia_stack_t *, nv_state_t *);
|
||||||
|
const char* NV_API_CALL rm_get_dynamic_power_management_status(nvidia_stack_t *, nv_state_t *);
|
||||||
|
const char* NV_API_CALL rm_get_gpu_gcx_support(nvidia_stack_t *, nv_state_t *, NvBool);
|
||||||
|
|
||||||
void NV_API_CALL rm_acpi_notify(nvidia_stack_t *, nv_state_t *, NvU32);
|
void NV_API_CALL rm_acpi_notify(nvidia_stack_t *, nv_state_t *, NvU32);
|
||||||
void NV_API_CALL rm_acpi_nvpcf_notify(nvidia_stack_t *);
|
void NV_API_CALL rm_acpi_nvpcf_notify(nvidia_stack_t *);
|
||||||
@@ -1044,15 +1030,12 @@ void NV_API_CALL rm_acpi_nvpcf_notify(nvidia_stack_t *);
|
|||||||
NvBool NV_API_CALL rm_is_altstack_in_use(void);
|
NvBool NV_API_CALL rm_is_altstack_in_use(void);
|
||||||
|
|
||||||
/* vGPU VFIO specific functions */
|
/* vGPU VFIO specific functions */
|
||||||
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *,
|
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32);
|
||||||
NvU32 *, NvU32 *, NvU32);
|
|
||||||
NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16);
|
NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16);
|
||||||
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool);
|
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool);
|
||||||
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8);
|
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8);
|
||||||
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *,
|
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *,
|
||||||
NvU64 *, NvU64 *, NvU32 *, NvBool *, NvU8 *);
|
NvU64 *, NvU64 *, NvU32 *, NvU8 *);
|
||||||
NV_STATUS NV_API_CALL nv_vgpu_update_sysfs_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU32);
|
|
||||||
NV_STATUS NV_API_CALL nv_vgpu_get_hbm_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU64 *);
|
|
||||||
NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *);
|
NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *);
|
||||||
NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *, NvU32, NvBool *);
|
NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *, NvU32, NvBool *);
|
||||||
NV_STATUS NV_API_CALL nv_gpu_unbind_event(nvidia_stack_t *, NvU32, NvBool *);
|
NV_STATUS NV_API_CALL nv_gpu_unbind_event(nvidia_stack_t *, NvU32, NvBool *);
|
||||||
@@ -1086,9 +1069,6 @@ NV_STATUS NV_API_CALL rm_run_nano_timer_callback(nvidia_stack_t *, nv_state_t
|
|||||||
void NV_API_CALL nv_cancel_nano_timer(nv_state_t *, nv_nano_timer_t *);
|
void NV_API_CALL nv_cancel_nano_timer(nv_state_t *, nv_nano_timer_t *);
|
||||||
void NV_API_CALL nv_destroy_nano_timer(nv_state_t *nv, nv_nano_timer_t *);
|
void NV_API_CALL nv_destroy_nano_timer(nv_state_t *nv, nv_nano_timer_t *);
|
||||||
|
|
||||||
// Host1x specific functions.
|
|
||||||
NV_STATUS NV_API_CALL nv_get_syncpoint_aperture(NvU32, NvU64 *, NvU64 *, NvU32 *);
|
|
||||||
|
|
||||||
#if defined(NVCPU_X86_64)
|
#if defined(NVCPU_X86_64)
|
||||||
|
|
||||||
static inline NvU64 nv_rdtsc(void)
|
static inline NvU64 nv_rdtsc(void)
|
||||||
|
|||||||
@@ -86,7 +86,7 @@
|
|||||||
/* Not currently implemented for MSVC/ARM64. See bug 3366890. */
|
/* Not currently implemented for MSVC/ARM64. See bug 3366890. */
|
||||||
# define nv_speculation_barrier()
|
# define nv_speculation_barrier()
|
||||||
# define speculation_barrier() nv_speculation_barrier()
|
# define speculation_barrier() nv_speculation_barrier()
|
||||||
#elif defined(NVCPU_IS_RISCV64)
|
#elif defined(NVCPU_NVRISCV64) && NVOS_IS_LIBOS
|
||||||
# define nv_speculation_barrier()
|
# define nv_speculation_barrier()
|
||||||
#else
|
#else
|
||||||
#error "Unknown compiler/chip family"
|
#error "Unknown compiler/chip family"
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -62,10 +62,10 @@ typedef struct
|
|||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
nvUvmInterfaceRegisterGpu
|
nvUvmInterfaceRegisterGpu
|
||||||
|
|
||||||
Registers the GPU with the provided physical UUID for use. A GPU must be
|
Registers the GPU with the provided UUID for use. A GPU must be registered
|
||||||
registered before its UUID can be used with any other API. This call is
|
before its UUID can be used with any other API. This call is ref-counted so
|
||||||
ref-counted so every nvUvmInterfaceRegisterGpu must be paired with a
|
every nvUvmInterfaceRegisterGpu must be paired with a corresponding
|
||||||
corresponding nvUvmInterfaceUnregisterGpu.
|
nvUvmInterfaceUnregisterGpu.
|
||||||
|
|
||||||
You don't need to call nvUvmInterfaceSessionCreate before calling this.
|
You don't need to call nvUvmInterfaceSessionCreate before calling this.
|
||||||
|
|
||||||
@@ -79,13 +79,12 @@ NV_STATUS nvUvmInterfaceRegisterGpu(const NvProcessorUuid *gpuUuid, UvmGpuPlatfo
|
|||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
nvUvmInterfaceUnregisterGpu
|
nvUvmInterfaceUnregisterGpu
|
||||||
|
|
||||||
Unregisters the GPU with the provided physical UUID. This drops the ref
|
Unregisters the GPU with the provided UUID. This drops the ref count from
|
||||||
count from nvUvmInterfaceRegisterGpu. Once the reference count goes to 0
|
nvUvmInterfaceRegisterGpu. Once the reference count goes to 0 the device may
|
||||||
the device may no longer be accessible until the next
|
no longer be accessible until the next nvUvmInterfaceRegisterGpu call. No
|
||||||
nvUvmInterfaceRegisterGpu call. No automatic resource freeing is performed,
|
automatic resource freeing is performed, so only make the last unregister
|
||||||
so only make the last unregister call after destroying all your allocations
|
call after destroying all your allocations associated with that UUID (such
|
||||||
associated with that UUID (such as those from
|
as those from nvUvmInterfaceAddressSpaceCreate).
|
||||||
nvUvmInterfaceAddressSpaceCreate).
|
|
||||||
|
|
||||||
If the UUID is not found, no operation is performed.
|
If the UUID is not found, no operation is performed.
|
||||||
*/
|
*/
|
||||||
@@ -122,10 +121,10 @@ NV_STATUS nvUvmInterfaceSessionDestroy(uvmGpuSessionHandle session);
|
|||||||
nvUvmInterfaceDeviceCreate
|
nvUvmInterfaceDeviceCreate
|
||||||
|
|
||||||
Creates a device object under the given session for the GPU with the given
|
Creates a device object under the given session for the GPU with the given
|
||||||
physical UUID. Also creates a partition object for the device iff
|
UUID. Also creates a partition object for the device iff bCreateSmcPartition
|
||||||
bCreateSmcPartition is true and pGpuInfo->smcEnabled is true.
|
is true and pGpuInfo->smcEnabled is true. pGpuInfo->smcUserClientInfo will
|
||||||
pGpuInfo->smcUserClientInfo will be used to determine the SMC partition in
|
be used to determine the SMC partition in this case. A device handle is
|
||||||
this case. A device handle is returned in the device output parameter.
|
returned in the device output parameter.
|
||||||
|
|
||||||
Error codes:
|
Error codes:
|
||||||
NV_ERR_GENERIC
|
NV_ERR_GENERIC
|
||||||
@@ -162,7 +161,6 @@ void nvUvmInterfaceDeviceDestroy(uvmGpuDeviceHandle device);
|
|||||||
NV_STATUS nvUvmInterfaceAddressSpaceCreate(uvmGpuDeviceHandle device,
|
NV_STATUS nvUvmInterfaceAddressSpaceCreate(uvmGpuDeviceHandle device,
|
||||||
unsigned long long vaBase,
|
unsigned long long vaBase,
|
||||||
unsigned long long vaSize,
|
unsigned long long vaSize,
|
||||||
NvBool enableAts,
|
|
||||||
uvmGpuAddressSpaceHandle *vaSpace,
|
uvmGpuAddressSpaceHandle *vaSpace,
|
||||||
UvmGpuAddressSpaceInfo *vaSpaceInfo);
|
UvmGpuAddressSpaceInfo *vaSpaceInfo);
|
||||||
|
|
||||||
@@ -424,6 +422,33 @@ NV_STATUS nvUvmInterfacePmaPinPages(void *pPma,
|
|||||||
NvU64 pageSize,
|
NvU64 pageSize,
|
||||||
NvU32 flags);
|
NvU32 flags);
|
||||||
|
|
||||||
|
/*******************************************************************************
|
||||||
|
nvUvmInterfacePmaUnpinPages
|
||||||
|
|
||||||
|
This function will unpin the physical memory allocated using PMA. The pages
|
||||||
|
passed as input must be already pinned, else this function will return an
|
||||||
|
error and rollback any change if any page is not previously marked "pinned".
|
||||||
|
Behaviour is undefined if any blacklisted pages are unpinned.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
pPma[IN] - Pointer to PMA object.
|
||||||
|
pPages[IN] - Array of pointers, containing the PA base
|
||||||
|
address of each page to be unpinned.
|
||||||
|
pageCount [IN] - Number of pages required to be unpinned.
|
||||||
|
pageSize [IN] - Page size of each page to be unpinned.
|
||||||
|
|
||||||
|
Error codes:
|
||||||
|
NV_ERR_INVALID_ARGUMENT - Invalid input arguments.
|
||||||
|
NV_ERR_GENERIC - Unexpected error. We try hard to avoid
|
||||||
|
returning this error code as is not very
|
||||||
|
informative.
|
||||||
|
NV_ERR_NOT_SUPPORTED - Operation not supported on broken FB
|
||||||
|
*/
|
||||||
|
NV_STATUS nvUvmInterfacePmaUnpinPages(void *pPma,
|
||||||
|
NvU64 *pPages,
|
||||||
|
NvLength pageCount,
|
||||||
|
NvU64 pageSize);
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
nvUvmInterfaceMemoryFree
|
nvUvmInterfaceMemoryFree
|
||||||
|
|
||||||
@@ -592,14 +617,6 @@ void nvUvmInterfaceChannelDestroy(uvmGpuChannelHandle channel);
|
|||||||
Error codes:
|
Error codes:
|
||||||
NV_ERR_GENERIC
|
NV_ERR_GENERIC
|
||||||
NV_ERR_NO_MEMORY
|
NV_ERR_NO_MEMORY
|
||||||
NV_ERR_INVALID_STATE
|
|
||||||
NV_ERR_NOT_SUPPORTED
|
|
||||||
NV_ERR_NOT_READY
|
|
||||||
NV_ERR_INVALID_LOCK_STATE
|
|
||||||
NV_ERR_INVALID_STATE
|
|
||||||
NV_ERR_NVLINK_FABRIC_NOT_READY
|
|
||||||
NV_ERR_NVLINK_FABRIC_FAILURE
|
|
||||||
NV_ERR_GPU_MEMORY_ONLINING_FAILURE
|
|
||||||
*/
|
*/
|
||||||
NV_STATUS nvUvmInterfaceQueryCaps(uvmGpuDeviceHandle device,
|
NV_STATUS nvUvmInterfaceQueryCaps(uvmGpuDeviceHandle device,
|
||||||
UvmGpuCaps *caps);
|
UvmGpuCaps *caps);
|
||||||
@@ -621,8 +638,6 @@ NV_STATUS nvUvmInterfaceQueryCopyEnginesCaps(uvmGpuDeviceHandle device,
|
|||||||
nvUvmInterfaceGetGpuInfo
|
nvUvmInterfaceGetGpuInfo
|
||||||
|
|
||||||
Return various gpu info, refer to the UvmGpuInfo struct for details.
|
Return various gpu info, refer to the UvmGpuInfo struct for details.
|
||||||
The input UUID is for the physical GPU and the pGpuClientInfo identifies
|
|
||||||
the SMC partition if SMC is enabled and the partition exists.
|
|
||||||
If no gpu matching the uuid is found, an error will be returned.
|
If no gpu matching the uuid is found, an error will be returned.
|
||||||
|
|
||||||
On Ampere+ GPUs, pGpuClientInfo contains SMC information provided by the
|
On Ampere+ GPUs, pGpuClientInfo contains SMC information provided by the
|
||||||
@@ -630,9 +645,6 @@ NV_STATUS nvUvmInterfaceQueryCopyEnginesCaps(uvmGpuDeviceHandle device,
|
|||||||
|
|
||||||
Error codes:
|
Error codes:
|
||||||
NV_ERR_GENERIC
|
NV_ERR_GENERIC
|
||||||
NV_ERR_NO_MEMORY
|
|
||||||
NV_ERR_GPU_UUID_NOT_FOUND
|
|
||||||
NV_ERR_INSUFFICIENT_PERMISSIONS
|
|
||||||
NV_ERR_INSUFFICIENT_RESOURCES
|
NV_ERR_INSUFFICIENT_RESOURCES
|
||||||
*/
|
*/
|
||||||
NV_STATUS nvUvmInterfaceGetGpuInfo(const NvProcessorUuid *gpuUuid,
|
NV_STATUS nvUvmInterfaceGetGpuInfo(const NvProcessorUuid *gpuUuid,
|
||||||
@@ -845,7 +857,7 @@ NV_STATUS nvUvmInterfaceGetEccInfo(uvmGpuDeviceHandle device,
|
|||||||
UVM GPU UNLOCK
|
UVM GPU UNLOCK
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
device[IN] - Device handle associated with the gpu
|
gpuUuid[IN] - UUID of the GPU to operate on
|
||||||
bOwnInterrupts - Set to NV_TRUE for UVM to take ownership of the
|
bOwnInterrupts - Set to NV_TRUE for UVM to take ownership of the
|
||||||
replayable page fault interrupts. Set to NV_FALSE
|
replayable page fault interrupts. Set to NV_FALSE
|
||||||
to return ownership of the page fault interrupts
|
to return ownership of the page fault interrupts
|
||||||
@@ -961,45 +973,14 @@ NV_STATUS nvUvmInterfaceGetNonReplayableFaults(UvmGpuFaultInfo *pFaultInfo,
|
|||||||
NOTES:
|
NOTES:
|
||||||
- This function DOES NOT acquire the RM API or GPU locks. That is because
|
- This function DOES NOT acquire the RM API or GPU locks. That is because
|
||||||
it is called during fault servicing, which could produce deadlocks.
|
it is called during fault servicing, which could produce deadlocks.
|
||||||
- This function should not be called when interrupts are disabled.
|
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
pFaultInfo[IN] - information provided by RM for fault handling.
|
device[IN] - Device handle associated with the gpu
|
||||||
used for obtaining the device handle without locks.
|
|
||||||
bCopyAndFlush[IN] - Instructs RM to perform the flush in the Copy+Flush mode.
|
|
||||||
In this mode, RM will perform a copy of the packets from
|
|
||||||
the HW buffer to UVM's SW buffer as part of performing
|
|
||||||
the flush. This mode gives UVM the opportunity to observe
|
|
||||||
the packets contained within the HW buffer at the time
|
|
||||||
of issuing the call.
|
|
||||||
|
|
||||||
Error codes:
|
Error codes:
|
||||||
NV_ERR_INVALID_ARGUMENT
|
NV_ERR_INVALID_ARGUMENT
|
||||||
*/
|
*/
|
||||||
NV_STATUS nvUvmInterfaceFlushReplayableFaultBuffer(UvmGpuFaultInfo *pFaultInfo,
|
NV_STATUS nvUvmInterfaceFlushReplayableFaultBuffer(uvmGpuDeviceHandle device);
|
||||||
NvBool bCopyAndFlush);
|
|
||||||
|
|
||||||
/*******************************************************************************
|
|
||||||
nvUvmInterfaceTogglePrefetchFaults
|
|
||||||
|
|
||||||
This function sends an RPC to GSP in order to toggle the prefetch fault PRI.
|
|
||||||
|
|
||||||
NOTES:
|
|
||||||
- This function DOES NOT acquire the RM API or GPU locks. That is because
|
|
||||||
it is called during fault servicing, which could produce deadlocks.
|
|
||||||
- This function should not be called when interrupts are disabled.
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
pFaultInfo[IN] - Information provided by RM for fault handling.
|
|
||||||
Used for obtaining the device handle without locks.
|
|
||||||
bEnable[IN] - Instructs RM whether to toggle generating faults on
|
|
||||||
prefetch on/off.
|
|
||||||
|
|
||||||
Error codes:
|
|
||||||
NV_ERR_INVALID_ARGUMENT
|
|
||||||
*/
|
|
||||||
NV_STATUS nvUvmInterfaceTogglePrefetchFaults(UvmGpuFaultInfo *pFaultInfo,
|
|
||||||
NvBool bEnable);
|
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
nvUvmInterfaceInitAccessCntrInfo
|
nvUvmInterfaceInitAccessCntrInfo
|
||||||
@@ -1056,7 +1037,7 @@ NV_STATUS nvUvmInterfaceDestroyAccessCntrInfo(uvmGpuDeviceHandle device,
|
|||||||
*/
|
*/
|
||||||
NV_STATUS nvUvmInterfaceEnableAccessCntr(uvmGpuDeviceHandle device,
|
NV_STATUS nvUvmInterfaceEnableAccessCntr(uvmGpuDeviceHandle device,
|
||||||
UvmGpuAccessCntrInfo *pAccessCntrInfo,
|
UvmGpuAccessCntrInfo *pAccessCntrInfo,
|
||||||
const UvmGpuAccessCntrConfig *pAccessCntrConfig);
|
UvmGpuAccessCntrConfig *pAccessCntrConfig);
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
nvUvmInterfaceDisableAccessCntr
|
nvUvmInterfaceDisableAccessCntr
|
||||||
@@ -1093,22 +1074,6 @@ NV_STATUS nvUvmInterfaceRegisterUvmCallbacks(struct UvmOpsUvmEvents *importedUvm
|
|||||||
//
|
//
|
||||||
void nvUvmInterfaceDeRegisterUvmOps(void);
|
void nvUvmInterfaceDeRegisterUvmOps(void);
|
||||||
|
|
||||||
/*******************************************************************************
|
|
||||||
nvUvmInterfaceGetNvlinkInfo
|
|
||||||
|
|
||||||
Gets NVLINK information from RM.
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
device[IN] - GPU device handle
|
|
||||||
nvlinkInfo [OUT] - Pointer to NvlinkInfo structure
|
|
||||||
|
|
||||||
Error codes:
|
|
||||||
NV_ERROR
|
|
||||||
NV_ERR_INVALID_ARGUMENT
|
|
||||||
*/
|
|
||||||
NV_STATUS nvUvmInterfaceGetNvlinkInfo(uvmGpuDeviceHandle device,
|
|
||||||
UvmGpuNvlinkInfo *nvlinkInfo);
|
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
nvUvmInterfaceP2pObjectCreate
|
nvUvmInterfaceP2pObjectCreate
|
||||||
|
|
||||||
@@ -1122,8 +1087,7 @@ NV_STATUS nvUvmInterfaceGetNvlinkInfo(uvmGpuDeviceHandle device,
|
|||||||
|
|
||||||
Error codes:
|
Error codes:
|
||||||
NV_ERR_INVALID_ARGUMENT
|
NV_ERR_INVALID_ARGUMENT
|
||||||
NV_ERR_OBJECT_NOT_FOUND : If device object associated with the device
|
NV_ERR_OBJECT_NOT_FOUND : If device object associated with the uuids aren't found.
|
||||||
handles isn't found.
|
|
||||||
*/
|
*/
|
||||||
NV_STATUS nvUvmInterfaceP2pObjectCreate(uvmGpuDeviceHandle device1,
|
NV_STATUS nvUvmInterfaceP2pObjectCreate(uvmGpuDeviceHandle device1,
|
||||||
uvmGpuDeviceHandle device2,
|
uvmGpuDeviceHandle device2,
|
||||||
@@ -1176,8 +1140,6 @@ void nvUvmInterfaceP2pObjectDestroy(uvmGpuSessionHandle session,
|
|||||||
NV_ERR_NOT_READY - Returned when querying the PTEs requires a deferred setup
|
NV_ERR_NOT_READY - Returned when querying the PTEs requires a deferred setup
|
||||||
which has not yet completed. It is expected that the caller
|
which has not yet completed. It is expected that the caller
|
||||||
will reattempt the call until a different code is returned.
|
will reattempt the call until a different code is returned.
|
||||||
As an example, multi-node systems which require querying
|
|
||||||
PTEs from the Fabric Manager may return this code.
|
|
||||||
*/
|
*/
|
||||||
NV_STATUS nvUvmInterfaceGetExternalAllocPtes(uvmGpuAddressSpaceHandle vaSpace,
|
NV_STATUS nvUvmInterfaceGetExternalAllocPtes(uvmGpuAddressSpaceHandle vaSpace,
|
||||||
NvHandle hMemory,
|
NvHandle hMemory,
|
||||||
@@ -1185,48 +1147,6 @@ NV_STATUS nvUvmInterfaceGetExternalAllocPtes(uvmGpuAddressSpaceHandle vaSpace,
|
|||||||
NvU64 size,
|
NvU64 size,
|
||||||
UvmGpuExternalMappingInfo *gpuExternalMappingInfo);
|
UvmGpuExternalMappingInfo *gpuExternalMappingInfo);
|
||||||
|
|
||||||
/*******************************************************************************
|
|
||||||
nvUvmInterfaceGetExternalAllocPhysAddrs
|
|
||||||
|
|
||||||
The interface builds the RM physical addrs using the provided input parameters.
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
vaSpace[IN] - vaSpace handle.
|
|
||||||
hMemory[IN] - Memory handle.
|
|
||||||
offset [IN] - Offset from the beginning of the allocation
|
|
||||||
where PTE mappings should begin.
|
|
||||||
Should be aligned with mappingPagesize
|
|
||||||
in gpuExternalMappingInfo associated
|
|
||||||
with the allocation.
|
|
||||||
size [IN] - Length of the allocation for which PhysAddrs
|
|
||||||
should be built.
|
|
||||||
Should be aligned with mappingPagesize
|
|
||||||
in gpuExternalMappingInfo associated
|
|
||||||
with the allocation.
|
|
||||||
size = 0 will be interpreted as the total size
|
|
||||||
of the allocation.
|
|
||||||
gpuExternalMappingInfo[IN/OUT] - See nv_uvm_types.h for more information.
|
|
||||||
|
|
||||||
Error codes:
|
|
||||||
NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed.
|
|
||||||
NV_ERR_INVALID_OBJECT_HANDLE - Invalid memory handle is passed.
|
|
||||||
NV_ERR_NOT_SUPPORTED - Functionality is not supported (see comments in nv_gpu_ops.c)
|
|
||||||
NV_ERR_INVALID_BASE - offset is beyond the allocation size
|
|
||||||
NV_ERR_INVALID_LIMIT - (offset + size) is beyond the allocation size.
|
|
||||||
NV_ERR_BUFFER_TOO_SMALL - gpuExternalMappingInfo.physAddrBufferSize is insufficient to
|
|
||||||
store single physAddr.
|
|
||||||
NV_ERR_NOT_READY - Returned when querying the physAddrs requires a deferred setup
|
|
||||||
which has not yet completed. It is expected that the caller
|
|
||||||
will reattempt the call until a different code is returned.
|
|
||||||
As an example, multi-node systems which require querying
|
|
||||||
physAddrs from the Fabric Manager may return this code.
|
|
||||||
*/
|
|
||||||
NV_STATUS nvUvmInterfaceGetExternalAllocPhysAddrs(uvmGpuAddressSpaceHandle vaSpace,
|
|
||||||
NvHandle hMemory,
|
|
||||||
NvU64 offset,
|
|
||||||
NvU64 size,
|
|
||||||
UvmGpuExternalPhysAddrInfo *gpuExternalPhysAddrsInfo);
|
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
nvUvmInterfaceRetainChannel
|
nvUvmInterfaceRetainChannel
|
||||||
|
|
||||||
@@ -1529,17 +1449,18 @@ NV_STATUS nvUvmInterfacePagingChannelPushStream(UvmGpuPagingChannelHandle channe
|
|||||||
NvU32 methodStreamSize);
|
NvU32 methodStreamSize);
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
nvUvmInterfaceReportFatalError
|
CSL Interface and Locking
|
||||||
|
|
||||||
Reports a global fatal error so RM can inform the clients that a node reboot
|
The following functions do not acquire the RM API or GPU locks and must not be called
|
||||||
is necessary to recover from this error. This function can be called from
|
concurrently with the same UvmCslContext parameter in different threads. The caller must
|
||||||
any lock environment, bottom half or non-interrupt context.
|
guarantee this exclusion.
|
||||||
|
|
||||||
*/
|
* nvUvmInterfaceCslRotateIv
|
||||||
void nvUvmInterfaceReportFatalError(NV_STATUS error);
|
* nvUvmInterfaceCslEncrypt
|
||||||
|
* nvUvmInterfaceCslDecrypt
|
||||||
/*******************************************************************************
|
* nvUvmInterfaceCslSign
|
||||||
Cryptography Services Library (CSL) Interface
|
* nvUvmInterfaceCslQueryMessagePool
|
||||||
|
* nvUvmInterfaceCslIncrementIv
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
@@ -1550,11 +1471,8 @@ void nvUvmInterfaceReportFatalError(NV_STATUS error);
|
|||||||
The lifetime of the context is the same as the lifetime of the secure channel
|
The lifetime of the context is the same as the lifetime of the secure channel
|
||||||
it is paired with.
|
it is paired with.
|
||||||
|
|
||||||
Locking: This function acquires an API lock.
|
|
||||||
Memory : This function dynamically allocates memory.
|
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
uvmCslContext[IN/OUT] - The CSL context associated with a channel.
|
uvmCslContext[IN/OUT] - The CSL context.
|
||||||
channel[IN] - Handle to a secure channel.
|
channel[IN] - Handle to a secure channel.
|
||||||
|
|
||||||
Error codes:
|
Error codes:
|
||||||
@@ -1572,62 +1490,30 @@ NV_STATUS nvUvmInterfaceCslInitContext(UvmCslContext *uvmCslContext,
|
|||||||
|
|
||||||
If context is already deinitialized then function returns immediately.
|
If context is already deinitialized then function returns immediately.
|
||||||
|
|
||||||
Locking: This function does not acquire an API or GPU lock.
|
|
||||||
Memory : This function may free memory.
|
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
uvmCslContext[IN] - The CSL context associated with a channel.
|
uvmCslContext[IN] - The CSL context.
|
||||||
*/
|
*/
|
||||||
void nvUvmInterfaceDeinitCslContext(UvmCslContext *uvmCslContext);
|
void nvUvmInterfaceDeinitCslContext(UvmCslContext *uvmCslContext);
|
||||||
|
|
||||||
/*******************************************************************************
|
|
||||||
nvUvmInterfaceCslRotateKey
|
|
||||||
|
|
||||||
Disables channels and rotates keys.
|
|
||||||
|
|
||||||
This function disables channels and rotates associated keys. The channels
|
|
||||||
associated with the given CSL contexts must be idled before this function is
|
|
||||||
called. To trigger key rotation all allocated channels for a given key must
|
|
||||||
be present in the list. If the function returns successfully then the CSL
|
|
||||||
contexts have been updated with the new key.
|
|
||||||
|
|
||||||
Locking: This function attempts to acquire the GPU lock. In case of failure
|
|
||||||
to acquire the return code is NV_ERR_STATE_IN_USE. The caller must
|
|
||||||
guarantee that no CSL function, including this one, is invoked
|
|
||||||
concurrently with the CSL contexts in contextList.
|
|
||||||
Memory : This function dynamically allocates memory.
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
contextList[IN/OUT] - An array of pointers to CSL contexts.
|
|
||||||
contextListCount[IN] - Number of CSL contexts in contextList. Its value
|
|
||||||
must be greater than 0.
|
|
||||||
Error codes:
|
|
||||||
NV_ERR_INVALID_ARGUMENT - contextList is NULL or contextListCount is 0.
|
|
||||||
NV_ERR_STATE_IN_USE - Unable to acquire lock / resource. Caller
|
|
||||||
can retry at a later time.
|
|
||||||
NV_ERR_GENERIC - A failure other than _STATE_IN_USE occurred
|
|
||||||
when attempting to acquire a lock.
|
|
||||||
*/
|
|
||||||
NV_STATUS nvUvmInterfaceCslRotateKey(UvmCslContext *contextList[],
|
|
||||||
NvU32 contextListCount);
|
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
nvUvmInterfaceCslRotateIv
|
nvUvmInterfaceCslRotateIv
|
||||||
|
|
||||||
Rotates the IV for a given channel and operation.
|
Rotates the IV for a given channel and operation.
|
||||||
|
|
||||||
This function will rotate the IV on both the CPU and the GPU.
|
This function will rotate the IV on both the CPU and the GPU.
|
||||||
For a given operation the channel must be idle before calling this function.
|
Outstanding messages that have been encrypted by the GPU should first be
|
||||||
This function can be called regardless of the value of the IV's message counter.
|
decrypted before calling this function with operation equal to
|
||||||
|
UVM_CSL_OPERATION_DECRYPT. Similarly, outstanding messages that have been
|
||||||
|
encrypted by the CPU should first be decrypted before calling this function
|
||||||
|
with operation equal to UVM_CSL_OPERATION_ENCRYPT. For a given operation
|
||||||
|
the channel must be idle before calling this function. This function can be
|
||||||
|
called regardless of the value of the IV's message counter.
|
||||||
|
|
||||||
Locking: This function attempts to acquire the GPU lock. In case of failure to
|
See "CSL Interface and Locking" for locking requirements.
|
||||||
acquire the return code is NV_ERR_STATE_IN_USE. The caller must guarantee
|
This function does not perform dynamic memory allocation.
|
||||||
that no CSL function, including this one, is invoked concurrently with
|
|
||||||
the same CSL context.
|
|
||||||
Memory : This function does not dynamically allocate memory.
|
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
uvmCslContext[IN/OUT] - The CSL context associated with a channel.
|
uvmCslContext[IN/OUT] - The CSL context.
|
||||||
operation[IN] - Either
|
operation[IN] - Either
|
||||||
- UVM_CSL_OPERATION_ENCRYPT
|
- UVM_CSL_OPERATION_ENCRYPT
|
||||||
- UVM_CSL_OPERATION_DECRYPT
|
- UVM_CSL_OPERATION_DECRYPT
|
||||||
@@ -1635,11 +1521,7 @@ Arguments:
|
|||||||
Error codes:
|
Error codes:
|
||||||
NV_ERR_INSUFFICIENT_RESOURCES - The rotate operation would cause a counter
|
NV_ERR_INSUFFICIENT_RESOURCES - The rotate operation would cause a counter
|
||||||
to overflow.
|
to overflow.
|
||||||
NV_ERR_STATE_IN_USE - Unable to acquire lock / resource. Caller
|
|
||||||
can retry at a later time.
|
|
||||||
NV_ERR_INVALID_ARGUMENT - Invalid value for operation.
|
NV_ERR_INVALID_ARGUMENT - Invalid value for operation.
|
||||||
NV_ERR_GENERIC - A failure other than _STATE_IN_USE occurred
|
|
||||||
when attempting to acquire a lock.
|
|
||||||
*/
|
*/
|
||||||
NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext,
|
NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext,
|
||||||
UvmCslOperation operation);
|
UvmCslOperation operation);
|
||||||
@@ -1656,13 +1538,11 @@ NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext,
|
|||||||
The encryptIV can be obtained from nvUvmInterfaceCslIncrementIv.
|
The encryptIV can be obtained from nvUvmInterfaceCslIncrementIv.
|
||||||
However, it is optional. If it is NULL, the next IV in line will be used.
|
However, it is optional. If it is NULL, the next IV in line will be used.
|
||||||
|
|
||||||
Locking: This function does not acquire an API or GPU lock.
|
See "CSL Interface and Locking" for locking requirements.
|
||||||
The caller must guarantee that no CSL function, including this one,
|
This function does not perform dynamic memory allocation.
|
||||||
is invoked concurrently with the same CSL context.
|
|
||||||
Memory : This function does not dynamically allocate memory.
|
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
uvmCslContext[IN/OUT] - The CSL context associated with a channel.
|
uvmCslContext[IN/OUT] - The CSL context.
|
||||||
bufferSize[IN] - Size of the input and output buffers in
|
bufferSize[IN] - Size of the input and output buffers in
|
||||||
units of bytes. Value can range from 1 byte
|
units of bytes. Value can range from 1 byte
|
||||||
to (2^32) - 1 bytes.
|
to (2^32) - 1 bytes.
|
||||||
@@ -1673,9 +1553,8 @@ Arguments:
|
|||||||
Its size is UVM_CSL_CRYPT_AUTH_TAG_SIZE_BYTES.
|
Its size is UVM_CSL_CRYPT_AUTH_TAG_SIZE_BYTES.
|
||||||
|
|
||||||
Error codes:
|
Error codes:
|
||||||
NV_ERR_INVALID_ARGUMENT - The CSL context is not associated with a channel.
|
NV_ERR_INVALID_ARGUMENT - The size of the data is 0 bytes.
|
||||||
- The size of the data is 0 bytes.
|
- The encryptIv has already been used.
|
||||||
- The encryptIv has already been used.
|
|
||||||
*/
|
*/
|
||||||
NV_STATUS nvUvmInterfaceCslEncrypt(UvmCslContext *uvmCslContext,
|
NV_STATUS nvUvmInterfaceCslEncrypt(UvmCslContext *uvmCslContext,
|
||||||
NvU32 bufferSize,
|
NvU32 bufferSize,
|
||||||
@@ -1694,15 +1573,8 @@ NV_STATUS nvUvmInterfaceCslEncrypt(UvmCslContext *uvmCslContext,
|
|||||||
maximized when the input and output buffers are 16-byte aligned. This is
|
maximized when the input and output buffers are 16-byte aligned. This is
|
||||||
natural alignment for AES block.
|
natural alignment for AES block.
|
||||||
|
|
||||||
During a key rotation event the previous key is stored in the CSL context.
|
See "CSL Interface and Locking" for locking requirements.
|
||||||
This allows data encrypted by the GPU to be decrypted with the previous key.
|
This function does not perform dynamic memory allocation.
|
||||||
The keyRotationId parameter identifies which key is used. The first key rotation
|
|
||||||
ID has a value of 0 that increments by one for each key rotation event.
|
|
||||||
|
|
||||||
Locking: This function does not acquire an API or GPU lock.
|
|
||||||
The caller must guarantee that no CSL function, including this one,
|
|
||||||
is invoked concurrently with the same CSL context.
|
|
||||||
Memory : This function does not dynamically allocate memory.
|
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
uvmCslContext[IN/OUT] - The CSL context.
|
uvmCslContext[IN/OUT] - The CSL context.
|
||||||
@@ -1711,8 +1583,6 @@ NV_STATUS nvUvmInterfaceCslEncrypt(UvmCslContext *uvmCslContext,
|
|||||||
decryptIv[IN] - IV used to decrypt the ciphertext. Its value can either be given by
|
decryptIv[IN] - IV used to decrypt the ciphertext. Its value can either be given by
|
||||||
nvUvmInterfaceCslIncrementIv, or, if NULL, the CSL context's
|
nvUvmInterfaceCslIncrementIv, or, if NULL, the CSL context's
|
||||||
internal counter is used.
|
internal counter is used.
|
||||||
keyRotationId[IN] - Specifies the key that is used for decryption.
|
|
||||||
A value of NV_U32_MAX specifies the current key.
|
|
||||||
inputBuffer[IN] - Address of ciphertext input buffer.
|
inputBuffer[IN] - Address of ciphertext input buffer.
|
||||||
outputBuffer[OUT] - Address of plaintext output buffer.
|
outputBuffer[OUT] - Address of plaintext output buffer.
|
||||||
addAuthData[IN] - Address of the plaintext additional authenticated data used to
|
addAuthData[IN] - Address of the plaintext additional authenticated data used to
|
||||||
@@ -1733,7 +1603,6 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
|
|||||||
NvU32 bufferSize,
|
NvU32 bufferSize,
|
||||||
NvU8 const *inputBuffer,
|
NvU8 const *inputBuffer,
|
||||||
UvmCslIv const *decryptIv,
|
UvmCslIv const *decryptIv,
|
||||||
NvU32 keyRotationId,
|
|
||||||
NvU8 *outputBuffer,
|
NvU8 *outputBuffer,
|
||||||
NvU8 const *addAuthData,
|
NvU8 const *addAuthData,
|
||||||
NvU32 addAuthDataSize,
|
NvU32 addAuthDataSize,
|
||||||
@@ -1747,13 +1616,11 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
|
|||||||
Auth and input buffers must not overlap. If they do then calling this function produces
|
Auth and input buffers must not overlap. If they do then calling this function produces
|
||||||
undefined behavior.
|
undefined behavior.
|
||||||
|
|
||||||
Locking: This function does not acquire an API or GPU lock.
|
See "CSL Interface and Locking" for locking requirements.
|
||||||
The caller must guarantee that no CSL function, including this one,
|
This function does not perform dynamic memory allocation.
|
||||||
is invoked concurrently with the same CSL context.
|
|
||||||
Memory : This function does not dynamically allocate memory.
|
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
uvmCslContext[IN/OUT] - The CSL context associated with a channel.
|
uvmCslContext[IN/OUT] - The CSL context.
|
||||||
bufferSize[IN] - Size of the input buffer in units of bytes.
|
bufferSize[IN] - Size of the input buffer in units of bytes.
|
||||||
Value can range from 1 byte to (2^32) - 1 bytes.
|
Value can range from 1 byte to (2^32) - 1 bytes.
|
||||||
inputBuffer[IN] - Address of plaintext input buffer.
|
inputBuffer[IN] - Address of plaintext input buffer.
|
||||||
@@ -1762,8 +1629,7 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
|
|||||||
|
|
||||||
Error codes:
|
Error codes:
|
||||||
NV_ERR_INSUFFICIENT_RESOURCES - The signing operation would cause a counter overflow to occur.
|
NV_ERR_INSUFFICIENT_RESOURCES - The signing operation would cause a counter overflow to occur.
|
||||||
NV_ERR_INVALID_ARGUMENT - The CSL context is not associated with a channel.
|
NV_ERR_INVALID_ARGUMENT - The size of the data is 0 bytes.
|
||||||
- The size of the data is 0 bytes.
|
|
||||||
*/
|
*/
|
||||||
NV_STATUS nvUvmInterfaceCslSign(UvmCslContext *uvmCslContext,
|
NV_STATUS nvUvmInterfaceCslSign(UvmCslContext *uvmCslContext,
|
||||||
NvU32 bufferSize,
|
NvU32 bufferSize,
|
||||||
@@ -1775,10 +1641,8 @@ NV_STATUS nvUvmInterfaceCslSign(UvmCslContext *uvmCslContext,
|
|||||||
|
|
||||||
Returns the number of messages that can be encrypted before the message counter will overflow.
|
Returns the number of messages that can be encrypted before the message counter will overflow.
|
||||||
|
|
||||||
Locking: This function does not acquire an API or GPU lock.
|
See "CSL Interface and Locking" for locking requirements.
|
||||||
Memory : This function does not dynamically allocate memory.
|
This function does not perform dynamic memory allocation.
|
||||||
The caller must guarantee that no CSL function, including this one,
|
|
||||||
is invoked concurrently with the same CSL context.
|
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
uvmCslContext[IN/OUT] - The CSL context.
|
uvmCslContext[IN/OUT] - The CSL context.
|
||||||
@@ -1802,10 +1666,8 @@ NV_STATUS nvUvmInterfaceCslQueryMessagePool(UvmCslContext *uvmCslContext,
|
|||||||
can be used in nvUvmInterfaceCslEncrypt. If operation is UVM_CSL_OPERATION_DECRYPT then
|
can be used in nvUvmInterfaceCslEncrypt. If operation is UVM_CSL_OPERATION_DECRYPT then
|
||||||
the returned IV can be used in nvUvmInterfaceCslDecrypt.
|
the returned IV can be used in nvUvmInterfaceCslDecrypt.
|
||||||
|
|
||||||
Locking: This function does not acquire an API or GPU lock.
|
See "CSL Interface and Locking" for locking requirements.
|
||||||
The caller must guarantee that no CSL function, including this one,
|
This function does not perform dynamic memory allocation.
|
||||||
is invoked concurrently with the same CSL context.
|
|
||||||
Memory : This function does not dynamically allocate memory.
|
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
uvmCslContext[IN/OUT] - The CSL context.
|
uvmCslContext[IN/OUT] - The CSL context.
|
||||||
@@ -1813,7 +1675,7 @@ Arguments:
|
|||||||
- UVM_CSL_OPERATION_ENCRYPT
|
- UVM_CSL_OPERATION_ENCRYPT
|
||||||
- UVM_CSL_OPERATION_DECRYPT
|
- UVM_CSL_OPERATION_DECRYPT
|
||||||
increment[IN] - The amount by which the IV is incremented. Can be 0.
|
increment[IN] - The amount by which the IV is incremented. Can be 0.
|
||||||
iv[OUT] - If non-NULL, a buffer to store the incremented IV.
|
iv[out] - If non-NULL, a buffer to store the incremented IV.
|
||||||
|
|
||||||
Error codes:
|
Error codes:
|
||||||
NV_ERR_INVALID_ARGUMENT - The value of the operation parameter is illegal.
|
NV_ERR_INVALID_ARGUMENT - The value of the operation parameter is illegal.
|
||||||
@@ -1825,42 +1687,4 @@ NV_STATUS nvUvmInterfaceCslIncrementIv(UvmCslContext *uvmCslContext,
|
|||||||
NvU64 increment,
|
NvU64 increment,
|
||||||
UvmCslIv *iv);
|
UvmCslIv *iv);
|
||||||
|
|
||||||
/*******************************************************************************
|
|
||||||
nvUvmInterfaceCslLogEncryption
|
|
||||||
|
|
||||||
Checks and logs information about encryptions associated with the given
|
|
||||||
CSL context.
|
|
||||||
|
|
||||||
For contexts associated with channels, this function does not modify elements of
|
|
||||||
the UvmCslContext, and must be called for every CPU/GPU encryption.
|
|
||||||
|
|
||||||
For the context associated with fault buffers, bufferSize can encompass multiple
|
|
||||||
encryption invocations, and the UvmCslContext will be updated following a key
|
|
||||||
rotation event.
|
|
||||||
|
|
||||||
In either case the IV remains unmodified after this function is called.
|
|
||||||
|
|
||||||
Locking: This function does not acquire an API or GPU lock.
|
|
||||||
Memory : This function does not dynamically allocate memory.
|
|
||||||
The caller must guarantee that no CSL function, including this one,
|
|
||||||
is invoked concurrently with the same CSL context.
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
uvmCslContext[IN/OUT] - The CSL context.
|
|
||||||
operation[IN] - If the CSL context is associated with a fault
|
|
||||||
buffer, this argument is ignored. If it is
|
|
||||||
associated with a channel, it must be either
|
|
||||||
- UVM_CSL_OPERATION_ENCRYPT
|
|
||||||
- UVM_CSL_OPERATION_DECRYPT
|
|
||||||
bufferSize[IN] - The size of the buffer(s) encrypted by the
|
|
||||||
external entity in units of bytes.
|
|
||||||
|
|
||||||
Error codes:
|
|
||||||
NV_ERR_INSUFFICIENT_RESOURCES - The encryption would cause a counter
|
|
||||||
to overflow.
|
|
||||||
*/
|
|
||||||
NV_STATUS nvUvmInterfaceCslLogEncryption(UvmCslContext *uvmCslContext,
|
|
||||||
UvmCslOperation operation,
|
|
||||||
NvU32 bufferSize);
|
|
||||||
|
|
||||||
#endif // _NV_UVM_INTERFACE_H_
|
#endif // _NV_UVM_INTERFACE_H_
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2014-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -39,13 +39,12 @@
|
|||||||
// are multiple BIG page sizes in RM. These defines are used as flags to "0"
|
// are multiple BIG page sizes in RM. These defines are used as flags to "0"
|
||||||
// should be OK when user is not sure which pagesize allocation it wants
|
// should be OK when user is not sure which pagesize allocation it wants
|
||||||
//
|
//
|
||||||
#define UVM_PAGE_SIZE_DEFAULT 0x0ULL
|
#define UVM_PAGE_SIZE_DEFAULT 0x0
|
||||||
#define UVM_PAGE_SIZE_4K 0x1000ULL
|
#define UVM_PAGE_SIZE_4K 0x1000
|
||||||
#define UVM_PAGE_SIZE_64K 0x10000ULL
|
#define UVM_PAGE_SIZE_64K 0x10000
|
||||||
#define UVM_PAGE_SIZE_128K 0x20000ULL
|
#define UVM_PAGE_SIZE_128K 0x20000
|
||||||
#define UVM_PAGE_SIZE_2M 0x200000ULL
|
#define UVM_PAGE_SIZE_2M 0x200000
|
||||||
#define UVM_PAGE_SIZE_512M 0x20000000ULL
|
#define UVM_PAGE_SIZE_512M 0x20000000
|
||||||
#define UVM_PAGE_SIZE_256G 0x4000000000ULL
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// When modifying flags, make sure they are compatible with the mirrored
|
// When modifying flags, make sure they are compatible with the mirrored
|
||||||
@@ -105,10 +104,6 @@ typedef struct UvmGpuMemoryInfo_tag
|
|||||||
// Out: Set to TRUE, if the allocation is in sysmem.
|
// Out: Set to TRUE, if the allocation is in sysmem.
|
||||||
NvBool sysmem;
|
NvBool sysmem;
|
||||||
|
|
||||||
// Out: Set to TRUE, if this allocation is treated as EGM.
|
|
||||||
// sysmem is also TRUE when egm is TRUE.
|
|
||||||
NvBool egm;
|
|
||||||
|
|
||||||
// Out: Set to TRUE, if the allocation is a constructed
|
// Out: Set to TRUE, if the allocation is a constructed
|
||||||
// under a Device or Subdevice.
|
// under a Device or Subdevice.
|
||||||
// All permutations of sysmem and deviceDescendant are valid.
|
// All permutations of sysmem and deviceDescendant are valid.
|
||||||
@@ -130,10 +125,6 @@ typedef struct UvmGpuMemoryInfo_tag
|
|||||||
|
|
||||||
// Out: Uuid of the GPU to which the allocation belongs.
|
// Out: Uuid of the GPU to which the allocation belongs.
|
||||||
// This is only valid if deviceDescendant is NV_TRUE.
|
// This is only valid if deviceDescendant is NV_TRUE.
|
||||||
// When egm is NV_TRUE, this is also the UUID of the GPU
|
|
||||||
// for which EGM is local.
|
|
||||||
// If the GPU has SMC enabled, the UUID is the GI UUID.
|
|
||||||
// Otherwise, it is the UUID for the physical GPU.
|
|
||||||
// Note: If the allocation is owned by a device in
|
// Note: If the allocation is owned by a device in
|
||||||
// an SLI group and the allocation is broadcast
|
// an SLI group and the allocation is broadcast
|
||||||
// across the SLI group, this UUID will be any one
|
// across the SLI group, this UUID will be any one
|
||||||
@@ -268,7 +259,6 @@ typedef struct UvmGpuChannelInfo_tag
|
|||||||
|
|
||||||
// The errorNotifier is filled out when the channel hits an RC error.
|
// The errorNotifier is filled out when the channel hits an RC error.
|
||||||
NvNotification *errorNotifier;
|
NvNotification *errorNotifier;
|
||||||
NvNotification *keyRotationNotifier;
|
|
||||||
|
|
||||||
NvU32 hwRunlistId;
|
NvU32 hwRunlistId;
|
||||||
NvU32 hwChannelId;
|
NvU32 hwChannelId;
|
||||||
@@ -294,13 +284,13 @@ typedef struct UvmGpuChannelInfo_tag
|
|||||||
|
|
||||||
// GPU VAs of both GPFIFO and GPPUT are needed in Confidential Computing
|
// GPU VAs of both GPFIFO and GPPUT are needed in Confidential Computing
|
||||||
// so a channel can be controlled via another channel (SEC2 or WLC/LCIC)
|
// so a channel can be controlled via another channel (SEC2 or WLC/LCIC)
|
||||||
NvU64 gpFifoGpuVa;
|
NvU64 gpFifoGpuVa;
|
||||||
NvU64 gpPutGpuVa;
|
NvU64 gpPutGpuVa;
|
||||||
NvU64 gpGetGpuVa;
|
NvU64 gpGetGpuVa;
|
||||||
// GPU VA of work submission offset is needed in Confidential Computing
|
// GPU VA of work submission offset is needed in Confidential Computing
|
||||||
// so CE channels can ring doorbell of other channels as required for
|
// so CE channels can ring doorbell of other channels as required for
|
||||||
// WLC/LCIC work submission
|
// WLC/LCIC work submission
|
||||||
NvU64 workSubmissionOffsetGpuVa;
|
NvU64 workSubmissionOffsetGpuVa;
|
||||||
} UvmGpuChannelInfo;
|
} UvmGpuChannelInfo;
|
||||||
|
|
||||||
typedef enum
|
typedef enum
|
||||||
@@ -342,7 +332,7 @@ typedef struct UvmGpuPagingChannelAllocParams_tag
|
|||||||
|
|
||||||
// The max number of Copy Engines supported by a GPU.
|
// The max number of Copy Engines supported by a GPU.
|
||||||
// The gpu ops build has a static assert that this is the correct number.
|
// The gpu ops build has a static assert that this is the correct number.
|
||||||
#define UVM_COPY_ENGINE_COUNT_MAX 64
|
#define UVM_COPY_ENGINE_COUNT_MAX 10
|
||||||
|
|
||||||
typedef struct
|
typedef struct
|
||||||
{
|
{
|
||||||
@@ -396,7 +386,6 @@ typedef enum
|
|||||||
UVM_LINK_TYPE_NVLINK_2,
|
UVM_LINK_TYPE_NVLINK_2,
|
||||||
UVM_LINK_TYPE_NVLINK_3,
|
UVM_LINK_TYPE_NVLINK_3,
|
||||||
UVM_LINK_TYPE_NVLINK_4,
|
UVM_LINK_TYPE_NVLINK_4,
|
||||||
UVM_LINK_TYPE_NVLINK_5,
|
|
||||||
UVM_LINK_TYPE_C2C,
|
UVM_LINK_TYPE_C2C,
|
||||||
} UVM_LINK_TYPE;
|
} UVM_LINK_TYPE;
|
||||||
|
|
||||||
@@ -543,46 +532,12 @@ typedef struct UvmGpuExternalMappingInfo_tag
|
|||||||
NvU32 pteSize;
|
NvU32 pteSize;
|
||||||
} UvmGpuExternalMappingInfo;
|
} UvmGpuExternalMappingInfo;
|
||||||
|
|
||||||
typedef struct UvmGpuExternalPhysAddrInfo_tag
|
|
||||||
{
|
|
||||||
// In: Virtual permissions. Returns
|
|
||||||
// NV_ERR_INVALID_ACCESS_TYPE if input is
|
|
||||||
// inaccurate
|
|
||||||
UvmRmGpuMappingType mappingType;
|
|
||||||
|
|
||||||
// In: Size of the buffer to store PhysAddrs (in bytes).
|
|
||||||
NvU64 physAddrBufferSize;
|
|
||||||
|
|
||||||
// In: Page size for mapping
|
|
||||||
// If this field is passed as 0, the page size
|
|
||||||
// of the allocation is used for mapping.
|
|
||||||
// nvUvmInterfaceGetExternalAllocPtes must pass
|
|
||||||
// this field as zero.
|
|
||||||
NvU64 mappingPageSize;
|
|
||||||
|
|
||||||
// In: Pointer to a buffer to store PhysAddrs.
|
|
||||||
// Out: The interface will fill the buffer with PhysAddrs
|
|
||||||
NvU64 *physAddrBuffer;
|
|
||||||
|
|
||||||
// Out: Number of PhysAddrs filled in to the buffer.
|
|
||||||
NvU64 numWrittenPhysAddrs;
|
|
||||||
|
|
||||||
// Out: Number of PhysAddrs remaining to be filled
|
|
||||||
// if the buffer is not sufficient to accommodate
|
|
||||||
// requested PhysAddrs.
|
|
||||||
NvU64 numRemainingPhysAddrs;
|
|
||||||
} UvmGpuExternalPhysAddrInfo;
|
|
||||||
|
|
||||||
typedef struct UvmGpuP2PCapsParams_tag
|
typedef struct UvmGpuP2PCapsParams_tag
|
||||||
{
|
{
|
||||||
// Out: peerId[i] contains gpu[i]'s peer id of gpu[1 - i]. Only defined if
|
// Out: peerId[i] contains gpu[i]'s peer id of gpu[1 - i]. Only defined if
|
||||||
// the GPUs are direct peers.
|
// the GPUs are direct peers.
|
||||||
NvU32 peerIds[2];
|
NvU32 peerIds[2];
|
||||||
|
|
||||||
// Out: peerId[i] contains gpu[i]'s EGM peer id of gpu[1 - i]. Only defined
|
|
||||||
// if the GPUs are direct peers and EGM enabled in the system.
|
|
||||||
NvU32 egmPeerIds[2];
|
|
||||||
|
|
||||||
// Out: UVM_LINK_TYPE
|
// Out: UVM_LINK_TYPE
|
||||||
NvU32 p2pLink;
|
NvU32 p2pLink;
|
||||||
|
|
||||||
@@ -598,6 +553,11 @@ typedef struct UvmGpuP2PCapsParams_tag
|
|||||||
// second, not taking into account the protocols overhead. The reported
|
// second, not taking into account the protocols overhead. The reported
|
||||||
// bandwidth for indirect peers is zero.
|
// bandwidth for indirect peers is zero.
|
||||||
NvU32 totalLinkLineRateMBps;
|
NvU32 totalLinkLineRateMBps;
|
||||||
|
|
||||||
|
// Out: True if the peers have a indirect link to communicate. On P9
|
||||||
|
// systems, this is true if peers are connected to different NPUs that
|
||||||
|
// forward the requests between them.
|
||||||
|
NvU32 indirectAccess : 1;
|
||||||
} UvmGpuP2PCapsParams;
|
} UvmGpuP2PCapsParams;
|
||||||
|
|
||||||
// Platform-wide information
|
// Platform-wide information
|
||||||
@@ -620,13 +580,18 @@ typedef struct UvmGpuClientInfo_tag
|
|||||||
NvHandle hSmcPartRef;
|
NvHandle hSmcPartRef;
|
||||||
} UvmGpuClientInfo;
|
} UvmGpuClientInfo;
|
||||||
|
|
||||||
|
typedef enum
|
||||||
|
{
|
||||||
|
UVM_GPU_CONF_COMPUTE_MODE_NONE,
|
||||||
|
UVM_GPU_CONF_COMPUTE_MODE_APM,
|
||||||
|
UVM_GPU_CONF_COMPUTE_MODE_HCC,
|
||||||
|
UVM_GPU_CONF_COMPUTE_MODE_COUNT
|
||||||
|
} UvmGpuConfComputeMode;
|
||||||
|
|
||||||
typedef struct UvmGpuConfComputeCaps_tag
|
typedef struct UvmGpuConfComputeCaps_tag
|
||||||
{
|
{
|
||||||
// Out: true if Confidential Computing is enabled on the GPU
|
// Out: GPU's confidential compute mode
|
||||||
NvBool bConfComputingEnabled;
|
UvmGpuConfComputeMode mode;
|
||||||
|
|
||||||
// Out: true if key rotation is enabled (for UVM keys) on the GPU
|
|
||||||
NvBool bKeyRotationEnabled;
|
|
||||||
} UvmGpuConfComputeCaps;
|
} UvmGpuConfComputeCaps;
|
||||||
|
|
||||||
#define UVM_GPU_NAME_LENGTH 0x40
|
#define UVM_GPU_NAME_LENGTH 0x40
|
||||||
@@ -636,8 +601,7 @@ typedef struct UvmGpuInfo_tag
|
|||||||
// Printable gpu name
|
// Printable gpu name
|
||||||
char name[UVM_GPU_NAME_LENGTH];
|
char name[UVM_GPU_NAME_LENGTH];
|
||||||
|
|
||||||
// Uuid of the physical GPU or GI UUID if nvUvmInterfaceGetGpuInfo()
|
// Uuid of this gpu
|
||||||
// requested information for a valid SMC partition.
|
|
||||||
NvProcessorUuid uuid;
|
NvProcessorUuid uuid;
|
||||||
|
|
||||||
// Gpu architecture; NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_*
|
// Gpu architecture; NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_*
|
||||||
@@ -683,9 +647,6 @@ typedef struct UvmGpuInfo_tag
|
|||||||
// Maximum number of TPCs per GPC
|
// Maximum number of TPCs per GPC
|
||||||
NvU32 maxTpcPerGpcCount;
|
NvU32 maxTpcPerGpcCount;
|
||||||
|
|
||||||
// Number of access counter buffers.
|
|
||||||
NvU32 accessCntrBufferCount;
|
|
||||||
|
|
||||||
// NV_TRUE if SMC is enabled on this GPU.
|
// NV_TRUE if SMC is enabled on this GPU.
|
||||||
NvBool smcEnabled;
|
NvBool smcEnabled;
|
||||||
|
|
||||||
@@ -722,25 +683,6 @@ typedef struct UvmGpuInfo_tag
|
|||||||
// to NVSwitch peers.
|
// to NVSwitch peers.
|
||||||
NvBool connectedToSwitch;
|
NvBool connectedToSwitch;
|
||||||
NvU64 nvswitchMemoryWindowStart;
|
NvU64 nvswitchMemoryWindowStart;
|
||||||
|
|
||||||
// local EGM properties
|
|
||||||
// NV_TRUE if EGM is enabled
|
|
||||||
NvBool egmEnabled;
|
|
||||||
|
|
||||||
// Peer ID to reach local EGM when EGM is enabled
|
|
||||||
NvU8 egmPeerId;
|
|
||||||
|
|
||||||
// EGM base address to offset in the GMMU PTE entry for EGM mappings
|
|
||||||
NvU64 egmBaseAddr;
|
|
||||||
|
|
||||||
// If connectedToSwitch is NV_TRUE,
|
|
||||||
// nvswitchEgmMemoryWindowStart tells the base address for the GPU's EGM memory in the
|
|
||||||
// NVSwitch address space. It is used when creating PTEs of GPU memory mappings
|
|
||||||
// to NVSwitch peers.
|
|
||||||
NvU64 nvswitchEgmMemoryWindowStart;
|
|
||||||
|
|
||||||
// GPU supports ATS capability
|
|
||||||
NvBool atsSupport;
|
|
||||||
} UvmGpuInfo;
|
} UvmGpuInfo;
|
||||||
|
|
||||||
typedef struct UvmGpuFbInfo_tag
|
typedef struct UvmGpuFbInfo_tag
|
||||||
@@ -749,15 +691,9 @@ typedef struct UvmGpuFbInfo_tag
|
|||||||
// RM regions that are not registered with PMA either.
|
// RM regions that are not registered with PMA either.
|
||||||
NvU64 maxAllocatableAddress;
|
NvU64 maxAllocatableAddress;
|
||||||
|
|
||||||
NvU32 heapSize; // RAM in KB available for user allocations
|
NvU32 heapSize; // RAM in KB available for user allocations
|
||||||
NvU32 reservedHeapSize; // RAM in KB reserved for internal RM allocation
|
NvU32 reservedHeapSize; // RAM in KB reserved for internal RM allocation
|
||||||
NvBool bZeroFb; // Zero FB mode enabled.
|
NvBool bZeroFb; // Zero FB mode enabled.
|
||||||
NvU64 maxVidmemPageSize; // Largest GPU page size to access vidmem.
|
|
||||||
NvBool bStaticBar1Enabled; // Static BAR1 mode is enabled
|
|
||||||
NvU64 staticBar1StartOffset; // The start offset of the the static mapping
|
|
||||||
NvU64 staticBar1Size; // The size of the static mapping
|
|
||||||
NvU32 heapStart; // The start offset of heap in KB, helpful for MIG
|
|
||||||
// systems
|
|
||||||
} UvmGpuFbInfo;
|
} UvmGpuFbInfo;
|
||||||
|
|
||||||
typedef struct UvmGpuEccInfo_tag
|
typedef struct UvmGpuEccInfo_tag
|
||||||
@@ -769,15 +705,6 @@ typedef struct UvmGpuEccInfo_tag
|
|||||||
NvBool bEccEnabled;
|
NvBool bEccEnabled;
|
||||||
} UvmGpuEccInfo;
|
} UvmGpuEccInfo;
|
||||||
|
|
||||||
typedef struct UvmGpuNvlinkInfo_tag
|
|
||||||
{
|
|
||||||
unsigned nvlinkMask;
|
|
||||||
unsigned nvlinkOffset;
|
|
||||||
void *nvlinkReadLocation;
|
|
||||||
NvBool *nvlinkErrorNotifier;
|
|
||||||
NvBool bNvlinkRecoveryEnabled;
|
|
||||||
} UvmGpuNvlinkInfo;
|
|
||||||
|
|
||||||
typedef struct UvmPmaAllocationOptions_tag
|
typedef struct UvmPmaAllocationOptions_tag
|
||||||
{
|
{
|
||||||
NvU32 flags;
|
NvU32 flags;
|
||||||
@@ -844,14 +771,14 @@ typedef NV_STATUS (*uvmEventResume_t) (void);
|
|||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
uvmEventStartDevice
|
uvmEventStartDevice
|
||||||
This function will be called by the GPU driver once it has finished its
|
This function will be called by the GPU driver once it has finished its
|
||||||
initialization to tell the UVM driver that this physical GPU has come up.
|
initialization to tell the UVM driver that this GPU has come up.
|
||||||
*/
|
*/
|
||||||
typedef NV_STATUS (*uvmEventStartDevice_t) (const NvProcessorUuid *pGpuUuidStruct);
|
typedef NV_STATUS (*uvmEventStartDevice_t) (const NvProcessorUuid *pGpuUuidStruct);
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
uvmEventStopDevice
|
uvmEventStopDevice
|
||||||
This function will be called by the GPU driver to let UVM know that a
|
This function will be called by the GPU driver to let UVM know that a GPU
|
||||||
physical GPU is going down.
|
is going down.
|
||||||
*/
|
*/
|
||||||
typedef NV_STATUS (*uvmEventStopDevice_t) (const NvProcessorUuid *pGpuUuidStruct);
|
typedef NV_STATUS (*uvmEventStopDevice_t) (const NvProcessorUuid *pGpuUuidStruct);
|
||||||
|
|
||||||
@@ -882,7 +809,7 @@ typedef NV_STATUS (*uvmEventServiceInterrupt_t) (void *pDeviceObject,
|
|||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
uvmEventIsrTopHalf_t
|
uvmEventIsrTopHalf_t
|
||||||
This function will be called by the GPU driver to let UVM know
|
This function will be called by the GPU driver to let UVM know
|
||||||
that an interrupt has occurred on the given physical GPU.
|
that an interrupt has occurred.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
NV_OK if the UVM driver handled the interrupt
|
NV_OK if the UVM driver handled the interrupt
|
||||||
@@ -894,41 +821,6 @@ typedef NV_STATUS (*uvmEventIsrTopHalf_t) (const NvProcessorUuid *pGpuUuidStruct
|
|||||||
typedef void (*uvmEventIsrTopHalf_t) (void);
|
typedef void (*uvmEventIsrTopHalf_t) (void);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*******************************************************************************
|
|
||||||
uvmEventDrainP2P
|
|
||||||
This function will be called by the GPU driver to signal to UVM that the
|
|
||||||
GPU has encountered an uncontained error, and all peer work must be drained
|
|
||||||
to recover. When it is called, the following assumptions/guarantees are
|
|
||||||
valid/made:
|
|
||||||
|
|
||||||
* Impacted user channels have been preempted and disabled
|
|
||||||
* UVM channels are still running normally and will continue to do
|
|
||||||
so unless an unrecoverable error is hit on said channels
|
|
||||||
* UVM must not return from this function until all enqueued work on
|
|
||||||
* peer channels has drained
|
|
||||||
* In the context of this function call, RM will still service faults
|
|
||||||
* UVM must prevent new peer work from being enqueued until the
|
|
||||||
uvmEventResumeP2P callback is issued
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
NV_OK if UVM has idled peer work and will prevent new peer workloads.
|
|
||||||
NV_ERR_TIMEOUT if peer work was unable to be drained within a timeout
|
|
||||||
XXX NV_ERR_* for any other failure (TBD)
|
|
||||||
|
|
||||||
*/
|
|
||||||
typedef NV_STATUS (*uvmEventDrainP2P_t) (const NvProcessorUuid *pGpuUuidStruct);
|
|
||||||
|
|
||||||
/*******************************************************************************
|
|
||||||
uvmEventResumeP2P
|
|
||||||
This function will be called by the GPU driver to signal to UVM that the
|
|
||||||
GPU has recovered from the previously reported uncontained NVLINK error.
|
|
||||||
When it is called, the following assumptions/guarantees are valid/made:
|
|
||||||
|
|
||||||
* UVM is again allowed to enqueue peer work
|
|
||||||
* UVM channels are still running normally
|
|
||||||
*/
|
|
||||||
typedef NV_STATUS (*uvmEventResumeP2P_t) (const NvProcessorUuid *pGpuUuidStruct);
|
|
||||||
|
|
||||||
struct UvmOpsUvmEvents
|
struct UvmOpsUvmEvents
|
||||||
{
|
{
|
||||||
uvmEventSuspend_t suspend;
|
uvmEventSuspend_t suspend;
|
||||||
@@ -941,8 +833,6 @@ struct UvmOpsUvmEvents
|
|||||||
uvmEventWddmRestartAfterTimeout_t wddmRestartAfterTimeout;
|
uvmEventWddmRestartAfterTimeout_t wddmRestartAfterTimeout;
|
||||||
uvmEventServiceInterrupt_t serviceInterrupt;
|
uvmEventServiceInterrupt_t serviceInterrupt;
|
||||||
#endif
|
#endif
|
||||||
uvmEventDrainP2P_t drainP2P;
|
|
||||||
uvmEventResumeP2P_t resumeP2P;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define UVM_CSL_SIGN_AUTH_TAG_SIZE_BYTES 32
|
#define UVM_CSL_SIGN_AUTH_TAG_SIZE_BYTES 32
|
||||||
@@ -1026,6 +916,11 @@ typedef struct UvmGpuFaultInfo_tag
|
|||||||
// CSL context used for performing decryption of replayable faults when
|
// CSL context used for performing decryption of replayable faults when
|
||||||
// Confidential Computing is enabled.
|
// Confidential Computing is enabled.
|
||||||
UvmCslContext cslCtx;
|
UvmCslContext cslCtx;
|
||||||
|
|
||||||
|
// Indicates whether UVM owns the replayable fault buffer.
|
||||||
|
// The value of this field is always NV_TRUE When Confidential Computing
|
||||||
|
// is disabled.
|
||||||
|
NvBool bUvmOwnsHwFaultBuffer;
|
||||||
} replayable;
|
} replayable;
|
||||||
struct
|
struct
|
||||||
{
|
{
|
||||||
@@ -1103,9 +998,24 @@ typedef enum
|
|||||||
UVM_ACCESS_COUNTER_GRANULARITY_16G = 4,
|
UVM_ACCESS_COUNTER_GRANULARITY_16G = 4,
|
||||||
} UVM_ACCESS_COUNTER_GRANULARITY;
|
} UVM_ACCESS_COUNTER_GRANULARITY;
|
||||||
|
|
||||||
|
typedef enum
|
||||||
|
{
|
||||||
|
UVM_ACCESS_COUNTER_USE_LIMIT_NONE = 1,
|
||||||
|
UVM_ACCESS_COUNTER_USE_LIMIT_QTR = 2,
|
||||||
|
UVM_ACCESS_COUNTER_USE_LIMIT_HALF = 3,
|
||||||
|
UVM_ACCESS_COUNTER_USE_LIMIT_FULL = 4,
|
||||||
|
} UVM_ACCESS_COUNTER_USE_LIMIT;
|
||||||
|
|
||||||
typedef struct UvmGpuAccessCntrConfig_tag
|
typedef struct UvmGpuAccessCntrConfig_tag
|
||||||
{
|
{
|
||||||
NvU32 granularity;
|
NvU32 mimcGranularity;
|
||||||
|
|
||||||
|
NvU32 momcGranularity;
|
||||||
|
|
||||||
|
NvU32 mimcUseLimit;
|
||||||
|
|
||||||
|
NvU32 momcUseLimit;
|
||||||
|
|
||||||
NvU32 threshold;
|
NvU32 threshold;
|
||||||
} UvmGpuAccessCntrConfig;
|
} UvmGpuAccessCntrConfig;
|
||||||
|
|
||||||
@@ -1135,13 +1045,11 @@ typedef UvmGpuAccessCntrConfig gpuAccessCntrConfig;
|
|||||||
typedef UvmGpuFaultInfo gpuFaultInfo;
|
typedef UvmGpuFaultInfo gpuFaultInfo;
|
||||||
typedef UvmGpuMemoryInfo gpuMemoryInfo;
|
typedef UvmGpuMemoryInfo gpuMemoryInfo;
|
||||||
typedef UvmGpuExternalMappingInfo gpuExternalMappingInfo;
|
typedef UvmGpuExternalMappingInfo gpuExternalMappingInfo;
|
||||||
typedef UvmGpuExternalPhysAddrInfo gpuExternalPhysAddrInfo;
|
|
||||||
typedef UvmGpuChannelResourceInfo gpuChannelResourceInfo;
|
typedef UvmGpuChannelResourceInfo gpuChannelResourceInfo;
|
||||||
typedef UvmGpuChannelInstanceInfo gpuChannelInstanceInfo;
|
typedef UvmGpuChannelInstanceInfo gpuChannelInstanceInfo;
|
||||||
typedef UvmGpuChannelResourceBindParams gpuChannelResourceBindParams;
|
typedef UvmGpuChannelResourceBindParams gpuChannelResourceBindParams;
|
||||||
typedef UvmGpuFbInfo gpuFbInfo;
|
typedef UvmGpuFbInfo gpuFbInfo;
|
||||||
typedef UvmGpuEccInfo gpuEccInfo;
|
typedef UvmGpuEccInfo gpuEccInfo;
|
||||||
typedef UvmGpuNvlinkInfo gpuNvlinkInfo;
|
|
||||||
typedef UvmGpuPagingChannel *gpuPagingChannelHandle;
|
typedef UvmGpuPagingChannel *gpuPagingChannelHandle;
|
||||||
typedef UvmGpuPagingChannelInfo gpuPagingChannelInfo;
|
typedef UvmGpuPagingChannelInfo gpuPagingChannelInfo;
|
||||||
typedef UvmGpuPagingChannelAllocParams gpuPagingChannelAllocParams;
|
typedef UvmGpuPagingChannelAllocParams gpuPagingChannelAllocParams;
|
||||||
@@ -1159,21 +1067,4 @@ typedef enum UvmCslOperation
|
|||||||
UVM_CSL_OPERATION_DECRYPT
|
UVM_CSL_OPERATION_DECRYPT
|
||||||
} UvmCslOperation;
|
} UvmCslOperation;
|
||||||
|
|
||||||
typedef enum UVM_KEY_ROTATION_STATUS {
|
|
||||||
// Key rotation complete/not in progress
|
|
||||||
UVM_KEY_ROTATION_STATUS_IDLE = 0,
|
|
||||||
// RM is waiting for clients to report their channels are idle for key rotation
|
|
||||||
UVM_KEY_ROTATION_STATUS_PENDING = 1,
|
|
||||||
// Key rotation is in progress
|
|
||||||
UVM_KEY_ROTATION_STATUS_IN_PROGRESS = 2,
|
|
||||||
// Key rotation timeout failure, RM will RC non-idle channels.
|
|
||||||
// UVM should never see this status value.
|
|
||||||
UVM_KEY_ROTATION_STATUS_FAILED_TIMEOUT = 3,
|
|
||||||
// Key rotation failed because upper threshold was crossed, RM will RC non-idle channels
|
|
||||||
UVM_KEY_ROTATION_STATUS_FAILED_THRESHOLD = 4,
|
|
||||||
// Internal RM failure while rotating keys for a certain channel, RM will RC the channel.
|
|
||||||
UVM_KEY_ROTATION_STATUS_FAILED_ROTATION = 5,
|
|
||||||
UVM_KEY_ROTATION_STATUS_MAX_COUNT = 6,
|
|
||||||
} UVM_KEY_ROTATION_STATUS;
|
|
||||||
|
|
||||||
#endif // _NV_UVM_TYPES_H_
|
#endif // _NV_UVM_TYPES_H_
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -45,13 +45,6 @@
|
|||||||
|
|
||||||
#define NVKMS_DEVICE_ID_TEGRA 0x0000ffff
|
#define NVKMS_DEVICE_ID_TEGRA 0x0000ffff
|
||||||
|
|
||||||
#define NVKMS_MAX_SUPERFRAME_VIEWS 4
|
|
||||||
|
|
||||||
#define NVKMS_LOG2_LUT_ARRAY_SIZE 10
|
|
||||||
#define NVKMS_LUT_ARRAY_SIZE (1 << NVKMS_LOG2_LUT_ARRAY_SIZE)
|
|
||||||
|
|
||||||
#define NVKMS_OLUT_FP_NORM_SCALE_DEFAULT 0xffffffff
|
|
||||||
|
|
||||||
typedef NvU32 NvKmsDeviceHandle;
|
typedef NvU32 NvKmsDeviceHandle;
|
||||||
typedef NvU32 NvKmsDispHandle;
|
typedef NvU32 NvKmsDispHandle;
|
||||||
typedef NvU32 NvKmsConnectorHandle;
|
typedef NvU32 NvKmsConnectorHandle;
|
||||||
@@ -60,7 +53,6 @@ typedef NvU32 NvKmsFrameLockHandle;
|
|||||||
typedef NvU32 NvKmsDeferredRequestFifoHandle;
|
typedef NvU32 NvKmsDeferredRequestFifoHandle;
|
||||||
typedef NvU32 NvKmsSwapGroupHandle;
|
typedef NvU32 NvKmsSwapGroupHandle;
|
||||||
typedef NvU32 NvKmsVblankSyncObjectHandle;
|
typedef NvU32 NvKmsVblankSyncObjectHandle;
|
||||||
typedef NvU32 NvKmsVblankSemControlHandle;
|
|
||||||
|
|
||||||
struct NvKmsSize {
|
struct NvKmsSize {
|
||||||
NvU16 width;
|
NvU16 width;
|
||||||
@@ -187,14 +179,6 @@ enum NvKmsEventType {
|
|||||||
NVKMS_EVENT_TYPE_FLIP_OCCURRED,
|
NVKMS_EVENT_TYPE_FLIP_OCCURRED,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum NvKmsFlipResult {
|
|
||||||
NV_KMS_FLIP_RESULT_SUCCESS = 0, /* Success */
|
|
||||||
NV_KMS_FLIP_RESULT_INVALID_PARAMS, /* Parameter validation failed */
|
|
||||||
NV_KMS_FLIP_RESULT_IN_PROGRESS, /* Flip would fail because an outstanding
|
|
||||||
flip containing changes that cannot be
|
|
||||||
queued is in progress */
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
NV_EVO_SCALER_1TAP = 0,
|
NV_EVO_SCALER_1TAP = 0,
|
||||||
NV_EVO_SCALER_2TAPS = 1,
|
NV_EVO_SCALER_2TAPS = 1,
|
||||||
@@ -237,90 +221,6 @@ struct NvKmsUsageBounds {
|
|||||||
} layer[NVKMS_MAX_LAYERS_PER_HEAD];
|
} layer[NVKMS_MAX_LAYERS_PER_HEAD];
|
||||||
};
|
};
|
||||||
|
|
||||||
/*!
|
|
||||||
* Per-component arrays of NvU16s describing the LUT; used for both the input
|
|
||||||
* LUT and output LUT.
|
|
||||||
*/
|
|
||||||
struct NvKmsLutRamps {
|
|
||||||
NvU16 red[NVKMS_LUT_ARRAY_SIZE]; /*! in */
|
|
||||||
NvU16 green[NVKMS_LUT_ARRAY_SIZE]; /*! in */
|
|
||||||
NvU16 blue[NVKMS_LUT_ARRAY_SIZE]; /*! in */
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Datatypes for LUT capabilities */
|
|
||||||
enum NvKmsLUTFormat {
|
|
||||||
/*
|
|
||||||
* Normalized fixed-point format mapping [0, 1] to [0x0, 0xFFFF].
|
|
||||||
*/
|
|
||||||
NVKMS_LUT_FORMAT_UNORM16,
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Half-precision floating point.
|
|
||||||
*/
|
|
||||||
NVKMS_LUT_FORMAT_FP16,
|
|
||||||
|
|
||||||
/*
|
|
||||||
* 14-bit fixed-point format required to work around hardware bug 813188.
|
|
||||||
*
|
|
||||||
* To convert from UNORM16 to UNORM14_WAR_813188:
|
|
||||||
* unorm14_war_813188 = ((unorm16 >> 2) & ~7) + 0x6000
|
|
||||||
*/
|
|
||||||
NVKMS_LUT_FORMAT_UNORM14_WAR_813188
|
|
||||||
};
|
|
||||||
|
|
||||||
enum NvKmsLUTVssSupport {
|
|
||||||
NVKMS_LUT_VSS_NOT_SUPPORTED,
|
|
||||||
NVKMS_LUT_VSS_SUPPORTED,
|
|
||||||
NVKMS_LUT_VSS_REQUIRED,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum NvKmsLUTVssType {
|
|
||||||
NVKMS_LUT_VSS_TYPE_NONE,
|
|
||||||
NVKMS_LUT_VSS_TYPE_LINEAR,
|
|
||||||
NVKMS_LUT_VSS_TYPE_LOGARITHMIC,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct NvKmsLUTCaps {
|
|
||||||
/*! Whether this layer or head on this device supports this LUT stage. */
|
|
||||||
NvBool supported;
|
|
||||||
|
|
||||||
/*! Whether this LUT supports VSS. */
|
|
||||||
enum NvKmsLUTVssSupport vssSupport;
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* The type of VSS segmenting this LUT uses.
|
|
||||||
*/
|
|
||||||
enum NvKmsLUTVssType vssType;
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* Expected number of VSS segments.
|
|
||||||
*/
|
|
||||||
NvU32 vssSegments;
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* Expected number of LUT entries.
|
|
||||||
*/
|
|
||||||
NvU32 lutEntries;
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* Format for each of the LUT entries.
|
|
||||||
*/
|
|
||||||
enum NvKmsLUTFormat entryFormat;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* each LUT entry uses this many bytes */
|
|
||||||
#define NVKMS_LUT_CAPS_LUT_ENTRY_SIZE (4 * sizeof(NvU16))
|
|
||||||
|
|
||||||
/* if the LUT surface uses VSS, size of the VSS header */
|
|
||||||
#define NVKMS_LUT_VSS_HEADER_SIZE (4 * NVKMS_LUT_CAPS_LUT_ENTRY_SIZE)
|
|
||||||
|
|
||||||
struct NvKmsLUTSurfaceParams {
|
|
||||||
NvKmsSurfaceHandle surfaceHandle;
|
|
||||||
NvU64 offset NV_ALIGN_BYTES(8);
|
|
||||||
NvU32 vssSegments;
|
|
||||||
NvU32 lutEntries;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A 3x4 row-major colorspace conversion matrix.
|
* A 3x4 row-major colorspace conversion matrix.
|
||||||
*
|
*
|
||||||
@@ -516,9 +416,9 @@ struct NvKmsLayerCapabilities {
|
|||||||
NvBool supportsWindowMode :1;
|
NvBool supportsWindowMode :1;
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* Whether layer supports ICtCp pipe.
|
* Whether layer supports HDR pipe.
|
||||||
*/
|
*/
|
||||||
NvBool supportsICtCp :1;
|
NvBool supportsHDR :1;
|
||||||
|
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
@@ -539,10 +439,6 @@ struct NvKmsLayerCapabilities {
|
|||||||
* still expected to honor the NvKmsUsageBounds for each head.
|
* still expected to honor the NvKmsUsageBounds for each head.
|
||||||
*/
|
*/
|
||||||
NvU64 supportedSurfaceMemoryFormats NV_ALIGN_BYTES(8);
|
NvU64 supportedSurfaceMemoryFormats NV_ALIGN_BYTES(8);
|
||||||
|
|
||||||
/* Capabilities for each LUT stage in the EVO3 precomp pipeline. */
|
|
||||||
struct NvKmsLUTCaps ilut;
|
|
||||||
struct NvKmsLUTCaps tmo;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
@@ -635,18 +531,6 @@ typedef struct {
|
|||||||
NvBool noncoherent;
|
NvBool noncoherent;
|
||||||
} NvKmsDispIOCoherencyModes;
|
} NvKmsDispIOCoherencyModes;
|
||||||
|
|
||||||
enum NvKmsInputColorRange {
|
|
||||||
/*
|
|
||||||
* If DEFAULT is provided, driver will assume full range for RGB formats
|
|
||||||
* and limited range for YUV formats.
|
|
||||||
*/
|
|
||||||
NVKMS_INPUT_COLORRANGE_DEFAULT = 0,
|
|
||||||
|
|
||||||
NVKMS_INPUT_COLORRANGE_LIMITED = 1,
|
|
||||||
|
|
||||||
NVKMS_INPUT_COLORRANGE_FULL = 2,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum NvKmsInputColorSpace {
|
enum NvKmsInputColorSpace {
|
||||||
/* Unknown colorspace; no de-gamma will be applied */
|
/* Unknown colorspace; no de-gamma will be applied */
|
||||||
NVKMS_INPUT_COLORSPACE_NONE = 0,
|
NVKMS_INPUT_COLORSPACE_NONE = 0,
|
||||||
@@ -658,12 +542,6 @@ enum NvKmsInputColorSpace {
|
|||||||
NVKMS_INPUT_COLORSPACE_BT2100_PQ = 2,
|
NVKMS_INPUT_COLORSPACE_BT2100_PQ = 2,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum NvKmsOutputColorimetry {
|
|
||||||
NVKMS_OUTPUT_COLORIMETRY_DEFAULT = 0,
|
|
||||||
|
|
||||||
NVKMS_OUTPUT_COLORIMETRY_BT2100 = 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum NvKmsOutputTf {
|
enum NvKmsOutputTf {
|
||||||
/*
|
/*
|
||||||
* NVKMS itself won't apply any OETF (clients are still
|
* NVKMS itself won't apply any OETF (clients are still
|
||||||
@@ -674,17 +552,6 @@ enum NvKmsOutputTf {
|
|||||||
NVKMS_OUTPUT_TF_PQ = 2,
|
NVKMS_OUTPUT_TF_PQ = 2,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*!
|
|
||||||
* EOTF Data Byte 1 as per CTA-861-G spec.
|
|
||||||
* This is expected to match exactly with the spec.
|
|
||||||
*/
|
|
||||||
enum NvKmsInfoFrameEOTF {
|
|
||||||
NVKMS_INFOFRAME_EOTF_SDR_GAMMA = 0,
|
|
||||||
NVKMS_INFOFRAME_EOTF_HDR_GAMMA = 1,
|
|
||||||
NVKMS_INFOFRAME_EOTF_ST2084 = 2,
|
|
||||||
NVKMS_INFOFRAME_EOTF_HLG = 3,
|
|
||||||
};
|
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* HDR Static Metadata Type1 Descriptor as per CEA-861.3 spec.
|
* HDR Static Metadata Type1 Descriptor as per CEA-861.3 spec.
|
||||||
* This is expected to match exactly with the spec.
|
* This is expected to match exactly with the spec.
|
||||||
@@ -738,45 +605,4 @@ struct NvKmsHDRStaticMetadata {
|
|||||||
NvU16 maxFALL;
|
NvU16 maxFALL;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*!
|
|
||||||
* A superframe is made of two or more video streams that are combined in
|
|
||||||
* a specific way. A DP serializer (an external device connected to a Tegra
|
|
||||||
* ARM SOC over DP or HDMI) can receive a video stream comprising multiple
|
|
||||||
* videos combined into a single frame and then split it into multiple
|
|
||||||
* video streams. The following structure describes the number of views
|
|
||||||
* and dimensions of each view inside a superframe.
|
|
||||||
*/
|
|
||||||
struct NvKmsSuperframeInfo {
|
|
||||||
NvU8 numViews;
|
|
||||||
struct {
|
|
||||||
/* x offset inside superframe at which this view starts */
|
|
||||||
NvU16 x;
|
|
||||||
|
|
||||||
/* y offset inside superframe at which this view starts */
|
|
||||||
NvU16 y;
|
|
||||||
|
|
||||||
/* Horizontal active width in pixels for this view */
|
|
||||||
NvU16 width;
|
|
||||||
|
|
||||||
/* Vertical active height in lines for this view */
|
|
||||||
NvU16 height;
|
|
||||||
} view[NVKMS_MAX_SUPERFRAME_VIEWS];
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Fields within NvKmsVblankSemControlDataOneHead::flags */
|
|
||||||
#define NVKMS_VBLANK_SEM_CONTROL_SWAP_INTERVAL 15:0
|
|
||||||
|
|
||||||
struct NvKmsVblankSemControlDataOneHead {
|
|
||||||
NvU32 requestCounterAccel;
|
|
||||||
NvU32 requestCounter;
|
|
||||||
NvU32 flags;
|
|
||||||
|
|
||||||
NvU32 semaphore;
|
|
||||||
NvU64 vblankCount NV_ALIGN_BYTES(8);
|
|
||||||
};
|
|
||||||
|
|
||||||
struct NvKmsVblankSemControlData {
|
|
||||||
struct NvKmsVblankSemControlDataOneHead head[NV_MAX_HEADS];
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif /* NVKMS_API_TYPES_H */
|
#endif /* NVKMS_API_TYPES_H */
|
||||||
|
|||||||
@@ -49,8 +49,6 @@ struct NvKmsKapiDevice;
|
|||||||
struct NvKmsKapiMemory;
|
struct NvKmsKapiMemory;
|
||||||
struct NvKmsKapiSurface;
|
struct NvKmsKapiSurface;
|
||||||
struct NvKmsKapiChannelEvent;
|
struct NvKmsKapiChannelEvent;
|
||||||
struct NvKmsKapiSemaphoreSurface;
|
|
||||||
struct NvKmsKapiSemaphoreSurfaceCallback;
|
|
||||||
|
|
||||||
typedef NvU32 NvKmsKapiConnector;
|
typedef NvU32 NvKmsKapiConnector;
|
||||||
typedef NvU32 NvKmsKapiDisplay;
|
typedef NvU32 NvKmsKapiDisplay;
|
||||||
@@ -69,14 +67,6 @@ typedef NvU32 NvKmsKapiDisplay;
|
|||||||
*/
|
*/
|
||||||
typedef void NvKmsChannelEventProc(void *dataPtr, NvU32 dataU32);
|
typedef void NvKmsChannelEventProc(void *dataPtr, NvU32 dataU32);
|
||||||
|
|
||||||
/*
|
|
||||||
* Note: Same as above, this function must not call back into NVKMS-KAPI, nor
|
|
||||||
* directly into RM. Doing so could cause deadlocks given the notification
|
|
||||||
* function will most likely be called from within RM's interrupt handler
|
|
||||||
* callchain.
|
|
||||||
*/
|
|
||||||
typedef void NvKmsSemaphoreSurfaceCallbackProc(void *pData);
|
|
||||||
|
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -124,14 +114,6 @@ struct NvKmsKapiDisplayMode {
|
|||||||
#define NVKMS_KAPI_LAYER_INVALID_IDX 0xff
|
#define NVKMS_KAPI_LAYER_INVALID_IDX 0xff
|
||||||
#define NVKMS_KAPI_LAYER_PRIMARY_IDX 0
|
#define NVKMS_KAPI_LAYER_PRIMARY_IDX 0
|
||||||
|
|
||||||
struct NvKmsKapiLutCaps {
|
|
||||||
struct {
|
|
||||||
struct NvKmsLUTCaps ilut;
|
|
||||||
struct NvKmsLUTCaps tmo;
|
|
||||||
} layer[NVKMS_KAPI_LAYER_MAX];
|
|
||||||
struct NvKmsLUTCaps olut;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct NvKmsKapiDeviceResourcesInfo {
|
struct NvKmsKapiDeviceResourcesInfo {
|
||||||
|
|
||||||
NvU32 numHeads;
|
NvU32 numHeads;
|
||||||
@@ -144,11 +126,6 @@ struct NvKmsKapiDeviceResourcesInfo {
|
|||||||
NvU32 validCursorCompositionModes;
|
NvU32 validCursorCompositionModes;
|
||||||
NvU64 supportedCursorSurfaceMemoryFormats;
|
NvU64 supportedCursorSurfaceMemoryFormats;
|
||||||
|
|
||||||
struct {
|
|
||||||
NvU64 maxSubmittedOffset;
|
|
||||||
NvU64 stride;
|
|
||||||
} semsurf;
|
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
NvU16 validRRTransforms;
|
NvU16 validRRTransforms;
|
||||||
NvU32 validCompositionModes;
|
NvU32 validCompositionModes;
|
||||||
@@ -166,19 +143,13 @@ struct NvKmsKapiDeviceResourcesInfo {
|
|||||||
|
|
||||||
NvU32 hasVideoMemory;
|
NvU32 hasVideoMemory;
|
||||||
|
|
||||||
NvU32 numDisplaySemaphores;
|
|
||||||
|
|
||||||
NvU8 genericPageKind;
|
NvU8 genericPageKind;
|
||||||
|
|
||||||
NvBool supportsSyncpts;
|
NvBool supportsSyncpts;
|
||||||
|
|
||||||
NvBool requiresVrrSemaphores;
|
|
||||||
} caps;
|
} caps;
|
||||||
|
|
||||||
NvU64 supportedSurfaceMemoryFormats[NVKMS_KAPI_LAYER_MAX];
|
NvU64 supportedSurfaceMemoryFormats[NVKMS_KAPI_LAYER_MAX];
|
||||||
NvBool supportsICtCp[NVKMS_KAPI_LAYER_MAX];
|
NvBool supportsHDR[NVKMS_KAPI_LAYER_MAX];
|
||||||
|
|
||||||
struct NvKmsKapiLutCaps lutCaps;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define NVKMS_KAPI_LAYER_MASK(layerType) (1 << (layerType))
|
#define NVKMS_KAPI_LAYER_MASK(layerType) (1 << (layerType))
|
||||||
@@ -224,26 +195,18 @@ struct NvKmsKapiStaticDisplayInfo {
|
|||||||
NvU32 headMask;
|
NvU32 headMask;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct NvKmsKapiSyncParams {
|
struct NvKmsKapiSyncpt {
|
||||||
union {
|
|
||||||
struct {
|
|
||||||
/*!
|
|
||||||
* Possible syncpt use case in kapi.
|
|
||||||
* For pre-syncpt, use only id and value
|
|
||||||
* and for post-syncpt, use only fd.
|
|
||||||
*/
|
|
||||||
NvU32 preSyncptId;
|
|
||||||
NvU32 preSyncptValue;
|
|
||||||
} syncpt;
|
|
||||||
|
|
||||||
struct {
|
/*!
|
||||||
NvU32 index;
|
* Possible syncpt use case in kapi.
|
||||||
} semaphore;
|
* For pre-syncpt, use only id and value
|
||||||
} u;
|
* and for post-syncpt, use only fd.
|
||||||
|
*/
|
||||||
|
NvBool preSyncptSpecified;
|
||||||
|
NvU32 preSyncptId;
|
||||||
|
NvU32 preSyncptValue;
|
||||||
|
|
||||||
NvBool preSyncptSpecified;
|
NvBool postSyncptRequested;
|
||||||
NvBool postSyncptRequested;
|
|
||||||
NvBool semaphoreSpecified;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct NvKmsKapiLayerConfig {
|
struct NvKmsKapiLayerConfig {
|
||||||
@@ -253,12 +216,10 @@ struct NvKmsKapiLayerConfig {
|
|||||||
NvU8 surfaceAlpha;
|
NvU8 surfaceAlpha;
|
||||||
} compParams;
|
} compParams;
|
||||||
struct NvKmsRRParams rrParams;
|
struct NvKmsRRParams rrParams;
|
||||||
struct NvKmsKapiSyncParams syncParams;
|
struct NvKmsKapiSyncpt syncptParams;
|
||||||
|
|
||||||
struct {
|
struct NvKmsHDRStaticMetadata hdrMetadata;
|
||||||
struct NvKmsHDRStaticMetadata val;
|
NvBool hdrMetadataSpecified;
|
||||||
NvBool enabled;
|
|
||||||
} hdrMetadata;
|
|
||||||
|
|
||||||
enum NvKmsOutputTf tf;
|
enum NvKmsOutputTf tf;
|
||||||
|
|
||||||
@@ -272,54 +233,16 @@ struct NvKmsKapiLayerConfig {
|
|||||||
NvU16 dstWidth, dstHeight;
|
NvU16 dstWidth, dstHeight;
|
||||||
|
|
||||||
enum NvKmsInputColorSpace inputColorSpace;
|
enum NvKmsInputColorSpace inputColorSpace;
|
||||||
|
|
||||||
struct {
|
|
||||||
NvBool enabled;
|
|
||||||
struct NvKmsKapiSurface *lutSurface;
|
|
||||||
NvU64 offset;
|
|
||||||
NvU32 vssSegments;
|
|
||||||
NvU32 lutEntries;
|
|
||||||
} ilut;
|
|
||||||
|
|
||||||
struct {
|
|
||||||
NvBool enabled;
|
|
||||||
struct NvKmsKapiSurface *lutSurface;
|
|
||||||
NvU64 offset;
|
|
||||||
NvU32 vssSegments;
|
|
||||||
NvU32 lutEntries;
|
|
||||||
} tmo;
|
|
||||||
|
|
||||||
struct NvKmsCscMatrix csc;
|
|
||||||
NvBool cscUseMain;
|
|
||||||
|
|
||||||
struct {
|
|
||||||
struct NvKmsCscMatrix lmsCtm;
|
|
||||||
struct NvKmsCscMatrix lmsToItpCtm;
|
|
||||||
struct NvKmsCscMatrix itpToLmsCtm;
|
|
||||||
struct NvKmsCscMatrix blendCtm;
|
|
||||||
struct {
|
|
||||||
NvBool lmsCtm : 1;
|
|
||||||
NvBool lmsToItpCtm : 1;
|
|
||||||
NvBool itpToLmsCtm : 1;
|
|
||||||
NvBool blendCtm : 1;
|
|
||||||
} enabled;
|
|
||||||
} matrixOverrides;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct NvKmsKapiLayerRequestedConfig {
|
struct NvKmsKapiLayerRequestedConfig {
|
||||||
struct NvKmsKapiLayerConfig config;
|
struct NvKmsKapiLayerConfig config;
|
||||||
struct {
|
struct {
|
||||||
NvBool surfaceChanged : 1;
|
NvBool surfaceChanged : 1;
|
||||||
NvBool srcXYChanged : 1;
|
NvBool srcXYChanged : 1;
|
||||||
NvBool srcWHChanged : 1;
|
NvBool srcWHChanged : 1;
|
||||||
NvBool dstXYChanged : 1;
|
NvBool dstXYChanged : 1;
|
||||||
NvBool dstWHChanged : 1;
|
NvBool dstWHChanged : 1;
|
||||||
NvBool cscChanged : 1;
|
|
||||||
NvBool tfChanged : 1;
|
|
||||||
NvBool hdrMetadataChanged : 1;
|
|
||||||
NvBool matrixOverridesChanged : 1;
|
|
||||||
NvBool ilutChanged : 1;
|
|
||||||
NvBool tmoChanged : 1;
|
|
||||||
} flags;
|
} flags;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -363,52 +286,14 @@ struct NvKmsKapiHeadModeSetConfig {
|
|||||||
struct NvKmsKapiDisplayMode mode;
|
struct NvKmsKapiDisplayMode mode;
|
||||||
|
|
||||||
NvBool vrrEnabled;
|
NvBool vrrEnabled;
|
||||||
|
|
||||||
struct {
|
|
||||||
NvBool enabled;
|
|
||||||
enum NvKmsInfoFrameEOTF eotf;
|
|
||||||
struct NvKmsHDRStaticMetadata staticMetadata;
|
|
||||||
} hdrInfoFrame;
|
|
||||||
|
|
||||||
enum NvKmsOutputColorimetry colorimetry;
|
|
||||||
|
|
||||||
struct {
|
|
||||||
struct {
|
|
||||||
NvU32 depth;
|
|
||||||
NvU32 start;
|
|
||||||
NvU32 end;
|
|
||||||
struct NvKmsLutRamps *pRamps;
|
|
||||||
} input;
|
|
||||||
|
|
||||||
struct {
|
|
||||||
NvBool enabled;
|
|
||||||
struct NvKmsLutRamps *pRamps;
|
|
||||||
} output;
|
|
||||||
} lut;
|
|
||||||
|
|
||||||
struct {
|
|
||||||
NvBool enabled;
|
|
||||||
struct NvKmsKapiSurface *lutSurface;
|
|
||||||
NvU64 offset;
|
|
||||||
NvU32 vssSegments;
|
|
||||||
NvU32 lutEntries;
|
|
||||||
} olut;
|
|
||||||
|
|
||||||
NvU32 olutFpNormScale;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct NvKmsKapiHeadRequestedConfig {
|
struct NvKmsKapiHeadRequestedConfig {
|
||||||
struct NvKmsKapiHeadModeSetConfig modeSetConfig;
|
struct NvKmsKapiHeadModeSetConfig modeSetConfig;
|
||||||
struct {
|
struct {
|
||||||
NvBool activeChanged : 1;
|
NvBool activeChanged : 1;
|
||||||
NvBool displaysChanged : 1;
|
NvBool displaysChanged : 1;
|
||||||
NvBool modeChanged : 1;
|
NvBool modeChanged : 1;
|
||||||
NvBool hdrInfoFrameChanged : 1;
|
|
||||||
NvBool colorimetryChanged : 1;
|
|
||||||
NvBool legacyIlutChanged : 1;
|
|
||||||
NvBool legacyOlutChanged : 1;
|
|
||||||
NvBool olutChanged : 1;
|
|
||||||
NvBool olutFpNormScaleChanged : 1;
|
|
||||||
} flags;
|
} flags;
|
||||||
|
|
||||||
struct NvKmsKapiCursorRequestedConfig cursorRequestedConfig;
|
struct NvKmsKapiCursorRequestedConfig cursorRequestedConfig;
|
||||||
@@ -433,9 +318,6 @@ struct NvKmsKapiHeadReplyConfig {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct NvKmsKapiModeSetReplyConfig {
|
struct NvKmsKapiModeSetReplyConfig {
|
||||||
enum NvKmsFlipResult flipResult;
|
|
||||||
NvBool vrrFlip;
|
|
||||||
NvS32 vrrSemaphoreIndex;
|
|
||||||
struct NvKmsKapiHeadReplyConfig
|
struct NvKmsKapiHeadReplyConfig
|
||||||
headReplyConfig[NVKMS_KAPI_MAX_HEADS];
|
headReplyConfig[NVKMS_KAPI_MAX_HEADS];
|
||||||
};
|
};
|
||||||
@@ -544,9 +426,6 @@ struct NvKmsKapiCreateSurfaceParams {
|
|||||||
* explicit_layout is NV_TRUE and layout is
|
* explicit_layout is NV_TRUE and layout is
|
||||||
* NvKmsSurfaceMemoryLayoutBlockLinear */
|
* NvKmsSurfaceMemoryLayoutBlockLinear */
|
||||||
NvU8 log2GobsPerBlockY;
|
NvU8 log2GobsPerBlockY;
|
||||||
|
|
||||||
/* [IN] Whether a surface can be updated directly on the screen */
|
|
||||||
NvBool noDisplayCaching;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum NvKmsKapiAllocationType {
|
enum NvKmsKapiAllocationType {
|
||||||
@@ -555,14 +434,6 @@ enum NvKmsKapiAllocationType {
|
|||||||
NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN = 2,
|
NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN = 2,
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef enum NvKmsKapiRegisterWaiterResultRec {
|
|
||||||
NVKMS_KAPI_REG_WAITER_FAILED,
|
|
||||||
NVKMS_KAPI_REG_WAITER_SUCCESS,
|
|
||||||
NVKMS_KAPI_REG_WAITER_ALREADY_SIGNALLED,
|
|
||||||
} NvKmsKapiRegisterWaiterResult;
|
|
||||||
|
|
||||||
typedef void NvKmsKapiSuspendResumeCallbackFunc(NvBool suspend);
|
|
||||||
|
|
||||||
struct NvKmsKapiFunctionsTable {
|
struct NvKmsKapiFunctionsTable {
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
@@ -648,8 +519,8 @@ struct NvKmsKapiFunctionsTable {
|
|||||||
);
|
);
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* Revoke modeset permissions previously granted. Only one (dispIndex,
|
* Revoke permissions previously granted. Only one (dispIndex, head,
|
||||||
* head, display) is currently supported.
|
* display) is currently supported.
|
||||||
*
|
*
|
||||||
* \param [in] device A device returned by allocateDevice().
|
* \param [in] device A device returned by allocateDevice().
|
||||||
*
|
*
|
||||||
@@ -666,34 +537,6 @@ struct NvKmsKapiFunctionsTable {
|
|||||||
NvKmsKapiDisplay display
|
NvKmsKapiDisplay display
|
||||||
);
|
);
|
||||||
|
|
||||||
/*!
|
|
||||||
* Grant modeset sub-owner permissions to fd. This is used by clients to
|
|
||||||
* convert drm 'master' permissions into nvkms sub-owner permission.
|
|
||||||
*
|
|
||||||
* \param [in] fd fd from opening /dev/nvidia-modeset.
|
|
||||||
*
|
|
||||||
* \param [in] device A device returned by allocateDevice().
|
|
||||||
*
|
|
||||||
* \return NV_TRUE on success, NV_FALSE on failure.
|
|
||||||
*/
|
|
||||||
NvBool (*grantSubOwnership)
|
|
||||||
(
|
|
||||||
NvS32 fd,
|
|
||||||
struct NvKmsKapiDevice *device
|
|
||||||
);
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* Revoke sub-owner permissions previously granted.
|
|
||||||
*
|
|
||||||
* \param [in] device A device returned by allocateDevice().
|
|
||||||
*
|
|
||||||
* \return NV_TRUE on success, NV_FALSE on failure.
|
|
||||||
*/
|
|
||||||
NvBool (*revokeSubOwnership)
|
|
||||||
(
|
|
||||||
struct NvKmsKapiDevice *device
|
|
||||||
);
|
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* Registers for notification, via
|
* Registers for notification, via
|
||||||
* NvKmsKapiAllocateDeviceParams::eventCallback, of the events specified
|
* NvKmsKapiAllocateDeviceParams::eventCallback, of the events specified
|
||||||
@@ -1014,17 +857,6 @@ struct NvKmsKapiFunctionsTable {
|
|||||||
const void *pLinearAddress
|
const void *pLinearAddress
|
||||||
);
|
);
|
||||||
|
|
||||||
/*!
|
|
||||||
* Check if memory object allocated is video memory.
|
|
||||||
*
|
|
||||||
* \param [in] memory Memory allocated using allocateMemory()
|
|
||||||
*
|
|
||||||
* \return NV_TRUE if memory is vidmem, NV_FALSE otherwise.
|
|
||||||
*/
|
|
||||||
NvBool (*isVidmem)(
|
|
||||||
const struct NvKmsKapiMemory *memory
|
|
||||||
);
|
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* Create a formatted surface from an NvKmsKapiMemory object.
|
* Create a formatted surface from an NvKmsKapiMemory object.
|
||||||
*
|
*
|
||||||
@@ -1241,6 +1073,21 @@ struct NvKmsKapiFunctionsTable {
|
|||||||
NvU64 *pPages
|
NvU64 *pPages
|
||||||
);
|
);
|
||||||
|
|
||||||
|
/*!
|
||||||
|
* Check if this memory object can be scanned out for display.
|
||||||
|
*
|
||||||
|
* \param [in] device A device allocated using allocateDevice().
|
||||||
|
*
|
||||||
|
* \param [in] memory The memory object to check for display support.
|
||||||
|
*
|
||||||
|
* \return NV_TRUE if this memory can be displayed, NV_FALSE if not.
|
||||||
|
*/
|
||||||
|
NvBool (*isMemoryValidForDisplay)
|
||||||
|
(
|
||||||
|
const struct NvKmsKapiDevice *device,
|
||||||
|
const struct NvKmsKapiMemory *memory
|
||||||
|
);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Import SGT as a memory handle.
|
* Import SGT as a memory handle.
|
||||||
*
|
*
|
||||||
@@ -1275,299 +1122,6 @@ struct NvKmsKapiFunctionsTable {
|
|||||||
NvP64 dmaBuf,
|
NvP64 dmaBuf,
|
||||||
NvU32 limit);
|
NvU32 limit);
|
||||||
|
|
||||||
/*!
|
|
||||||
* Import a semaphore surface allocated elsewhere to NVKMS and return a
|
|
||||||
* handle to the new object.
|
|
||||||
*
|
|
||||||
* \param [in] device A device allocated using allocateDevice().
|
|
||||||
*
|
|
||||||
* \param [in] nvKmsParamsUser Userspace pointer to driver-specific
|
|
||||||
* parameters describing the semaphore
|
|
||||||
* surface being imported.
|
|
||||||
*
|
|
||||||
* \param [in] nvKmsParamsSize Size of the driver-specific parameter
|
|
||||||
* struct.
|
|
||||||
*
|
|
||||||
* \param [out] pSemaphoreMap Returns a CPU mapping of the semaphore
|
|
||||||
* surface's semaphore memory to the client.
|
|
||||||
*
|
|
||||||
* \param [out] pMaxSubmittedMap Returns a CPU mapping of the semaphore
|
|
||||||
* surface's semaphore memory to the client.
|
|
||||||
*
|
|
||||||
* \return struct NvKmsKapiSemaphoreSurface* on success, NULL on failure.
|
|
||||||
*/
|
|
||||||
struct NvKmsKapiSemaphoreSurface* (*importSemaphoreSurface)
|
|
||||||
(
|
|
||||||
struct NvKmsKapiDevice *device,
|
|
||||||
NvU64 nvKmsParamsUser,
|
|
||||||
NvU64 nvKmsParamsSize,
|
|
||||||
void **pSemaphoreMap,
|
|
||||||
void **pMaxSubmittedMap
|
|
||||||
);
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* Free an imported semaphore surface.
|
|
||||||
*
|
|
||||||
* \param [in] device The device passed to
|
|
||||||
* importSemaphoreSurface() when creating
|
|
||||||
* semaphoreSurface.
|
|
||||||
*
|
|
||||||
* \param [in] semaphoreSurface A semaphore surface returned by
|
|
||||||
* importSemaphoreSurface().
|
|
||||||
*/
|
|
||||||
void (*freeSemaphoreSurface)
|
|
||||||
(
|
|
||||||
struct NvKmsKapiDevice *device,
|
|
||||||
struct NvKmsKapiSemaphoreSurface *semaphoreSurface
|
|
||||||
);
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* Register a callback to be called when a semaphore reaches a value.
|
|
||||||
*
|
|
||||||
* The callback will be called when the semaphore at index in
|
|
||||||
* semaphoreSurface reaches the value wait_value. The callback will
|
|
||||||
* be called at most once and is automatically unregistered when called.
|
|
||||||
* It may also be unregistered (i.e., cancelled) explicitly using the
|
|
||||||
* unregisterSemaphoreSurfaceCallback() function. To avoid leaking the
|
|
||||||
* memory used to track the registered callback, callers must ensure one
|
|
||||||
* of these methods of unregistration is used for every successful
|
|
||||||
* callback registration that returns a non-NULL pCallbackHandle.
|
|
||||||
*
|
|
||||||
* \param [in] device The device passed to
|
|
||||||
* importSemaphoreSurface() when creating
|
|
||||||
* semaphoreSurface.
|
|
||||||
*
|
|
||||||
* \param [in] semaphoreSurface A semaphore surface returned by
|
|
||||||
* importSemaphoreSurface().
|
|
||||||
*
|
|
||||||
* \param [in] pCallback A pointer to the function to call when
|
|
||||||
* the specified value is reached. NULL
|
|
||||||
* means no callback.
|
|
||||||
*
|
|
||||||
* \param [in] pData Arbitrary data to be passed back to the
|
|
||||||
* callback as its sole parameter.
|
|
||||||
*
|
|
||||||
* \param [in] index The index of the semaphore within
|
|
||||||
* semaphoreSurface.
|
|
||||||
*
|
|
||||||
* \param [in] wait_value The value the semaphore must reach or
|
|
||||||
* exceed before the callback is called.
|
|
||||||
*
|
|
||||||
* \param [in] new_value The value the semaphore will be set to
|
|
||||||
* when it reaches or exceeds <wait_value>.
|
|
||||||
* 0 means do not update the value.
|
|
||||||
*
|
|
||||||
* \param [out] pCallbackHandle On success, the value pointed to will
|
|
||||||
* contain an opaque handle to the
|
|
||||||
* registered callback that may be used to
|
|
||||||
* cancel it if needed. Unused if pCallback
|
|
||||||
* is NULL.
|
|
||||||
*
|
|
||||||
* \return NVKMS_KAPI_REG_WAITER_SUCCESS if the waiter was registered or if
|
|
||||||
* no callback was requested and the semaphore at <index> has
|
|
||||||
* already reached or exceeded <wait_value>
|
|
||||||
*
|
|
||||||
* NVKMS_KAPI_REG_WAITER_ALREADY_SIGNALLED if a callback was
|
|
||||||
* requested and the semaphore at <index> has already reached or
|
|
||||||
* exceeded <wait_value>
|
|
||||||
*
|
|
||||||
* NVKMS_KAPI_REG_WAITER_FAILED if waiter registration failed.
|
|
||||||
*/
|
|
||||||
NvKmsKapiRegisterWaiterResult
|
|
||||||
(*registerSemaphoreSurfaceCallback)
|
|
||||||
(
|
|
||||||
struct NvKmsKapiDevice *device,
|
|
||||||
struct NvKmsKapiSemaphoreSurface *semaphoreSurface,
|
|
||||||
NvKmsSemaphoreSurfaceCallbackProc *pCallback,
|
|
||||||
void *pData,
|
|
||||||
NvU64 index,
|
|
||||||
NvU64 wait_value,
|
|
||||||
NvU64 new_value,
|
|
||||||
struct NvKmsKapiSemaphoreSurfaceCallback **pCallbackHandle
|
|
||||||
);
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* Unregister a callback registered via registerSemaphoreSurfaceCallback()
|
|
||||||
*
|
|
||||||
* If the callback has not yet been called, this function will cancel the
|
|
||||||
* callback and free its associated resources.
|
|
||||||
*
|
|
||||||
* Note this function treats the callback handle as a pointer. While this
|
|
||||||
* function does not dereference that pointer itself, the underlying call
|
|
||||||
* to RM does within a properly guarded critical section that first ensures
|
|
||||||
* it is not in the process of being used within a callback. This means
|
|
||||||
* the callstack must take into consideration that pointers are not in
|
|
||||||
* general unique handles if they may have been freed, since a subsequent
|
|
||||||
* malloc could return the same pointer value at that point. This callchain
|
|
||||||
* avoids that by leveraging the behavior of the underlying RM APIs:
|
|
||||||
*
|
|
||||||
* 1) A callback handle is referenced relative to its corresponding
|
|
||||||
* (semaphore surface, index, wait_value) tuple here and within RM. It
|
|
||||||
* is not a valid handle outside of that scope.
|
|
||||||
*
|
|
||||||
* 2) A callback can not be registered against an already-reached value
|
|
||||||
* for a given semaphore surface index.
|
|
||||||
*
|
|
||||||
* 3) A given callback handle can not be registered twice against the same
|
|
||||||
* (semaphore surface, index, wait_value) tuple, so unregistration will
|
|
||||||
* never race with registration at the RM level, and would only race at
|
|
||||||
* a higher level if used incorrectly. Since this is kernel code, we
|
|
||||||
* can safely assume there won't be malicious clients purposely misuing
|
|
||||||
* the API, but the burden is placed on the caller to ensure its usage
|
|
||||||
* does not lead to races at higher levels.
|
|
||||||
*
|
|
||||||
* These factors considered together ensure any valid registered handle is
|
|
||||||
* either still in the relevant waiter list and refers to the same event/
|
|
||||||
* callback as when it was registered, or has been removed from the list
|
|
||||||
* as part of a critical section that also destroys the list itself and
|
|
||||||
* makes future lookups in that list impossible, and hence eliminates the
|
|
||||||
* chance of comparing a stale handle with a new handle of the same value
|
|
||||||
* as part of a lookup.
|
|
||||||
*
|
|
||||||
* \param [in] device The device passed to
|
|
||||||
* importSemaphoreSurface() when creating
|
|
||||||
* semaphoreSurface.
|
|
||||||
*
|
|
||||||
* \param [in] semaphoreSurface The semaphore surface passed to
|
|
||||||
* registerSemaphoreSurfaceCallback() when
|
|
||||||
* registering the callback.
|
|
||||||
*
|
|
||||||
* \param [in] index The index passed to
|
|
||||||
* registerSemaphoreSurfaceCallback() when
|
|
||||||
* registering the callback.
|
|
||||||
*
|
|
||||||
* \param [in] wait_value The wait_value passed to
|
|
||||||
* registerSemaphoreSurfaceCallback() when
|
|
||||||
* registering the callback.
|
|
||||||
*
|
|
||||||
* \param [in] callbackHandle The callback handle returned by
|
|
||||||
* registerSemaphoreSurfaceCallback().
|
|
||||||
*/
|
|
||||||
NvBool
|
|
||||||
(*unregisterSemaphoreSurfaceCallback)
|
|
||||||
(
|
|
||||||
struct NvKmsKapiDevice *device,
|
|
||||||
struct NvKmsKapiSemaphoreSurface *semaphoreSurface,
|
|
||||||
NvU64 index,
|
|
||||||
NvU64 wait_value,
|
|
||||||
struct NvKmsKapiSemaphoreSurfaceCallback *callbackHandle
|
|
||||||
);
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* Update the value of a semaphore surface from the CPU.
|
|
||||||
*
|
|
||||||
* Update the semaphore value at the specified index from the CPU, then
|
|
||||||
* wake up any pending CPU waiters associated with that index that are
|
|
||||||
* waiting on it reaching a value <= the new value.
|
|
||||||
*/
|
|
||||||
NvBool
|
|
||||||
(*setSemaphoreSurfaceValue)
|
|
||||||
(
|
|
||||||
struct NvKmsKapiDevice *device,
|
|
||||||
struct NvKmsKapiSemaphoreSurface *semaphoreSurface,
|
|
||||||
NvU64 index,
|
|
||||||
NvU64 new_value
|
|
||||||
);
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* Set the callback function for suspending and resuming the display system.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
(*setSuspendResumeCallback)
|
|
||||||
(
|
|
||||||
NvKmsKapiSuspendResumeCallbackFunc *function
|
|
||||||
);
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* Immediately initialize the specified display semaphore to the pending state.
|
|
||||||
*
|
|
||||||
* Must be called prior to applying a mode set that utilizes the specified
|
|
||||||
* display semaphore for synchronization.
|
|
||||||
*
|
|
||||||
* \param [in] device The device which will utilize the semaphore.
|
|
||||||
*
|
|
||||||
* \param [in] semaphoreIndex Index of the desired semaphore within the
|
|
||||||
* NVKMS semaphore pool. Must be less than
|
|
||||||
* NvKmsKapiDeviceResourcesInfo::caps::numDisplaySemaphores
|
|
||||||
* for the specified device.
|
|
||||||
*/
|
|
||||||
NvBool
|
|
||||||
(*tryInitDisplaySemaphore)
|
|
||||||
(
|
|
||||||
struct NvKmsKapiDevice *device,
|
|
||||||
NvU32 semaphoreIndex
|
|
||||||
);
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* Immediately set the specified display semaphore to the displayable state.
|
|
||||||
*
|
|
||||||
* Must be called after \ref tryInitDisplaySemaphore to indicate a mode
|
|
||||||
* configuration change that utilizes the specified display semaphore for
|
|
||||||
* synchronization may proceed.
|
|
||||||
*
|
|
||||||
* \param [in] device The device which will utilize the semaphore.
|
|
||||||
*
|
|
||||||
* \param [in] semaphoreIndex Index of the desired semaphore within the
|
|
||||||
* NVKMS semaphore pool. Must be less than
|
|
||||||
* NvKmsKapiDeviceResourcesInfo::caps::numDisplaySemaphores
|
|
||||||
* for the specified device.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
(*signalDisplaySemaphore)
|
|
||||||
(
|
|
||||||
struct NvKmsKapiDevice *device,
|
|
||||||
NvU32 semaphoreIndex
|
|
||||||
);
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* Immediately cancel use of a display semaphore by resetting its value to
|
|
||||||
* its initial state.
|
|
||||||
*
|
|
||||||
* This can be used by clients to restore a semaphore to a consistent state
|
|
||||||
* when they have prepared it for use by previously calling
|
|
||||||
* \ref tryInitDisplaySemaphore() on it, but are then prevented from
|
|
||||||
* submitting the associated hardware operations to consume it due to the
|
|
||||||
* subsequent failure of some software or hardware operation.
|
|
||||||
*
|
|
||||||
* \param [in] device The device which will utilize the semaphore.
|
|
||||||
*
|
|
||||||
* \param [in] semaphoreIndex Index of the desired semaphore within the
|
|
||||||
* NVKMS semaphore pool. Must be less than
|
|
||||||
* NvKmsKapiDeviceResourcesInfo::caps::numDisplaySemaphores
|
|
||||||
* for the specified device.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
(*cancelDisplaySemaphore)
|
|
||||||
(
|
|
||||||
struct NvKmsKapiDevice *device,
|
|
||||||
NvU32 semaphoreIndex
|
|
||||||
);
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* Signal the VRR semaphore at the specified index from the CPU.
|
|
||||||
* If device does not support VRR semaphores, this is a no-op.
|
|
||||||
* Returns true if signal is success or no-op, otherwise returns false.
|
|
||||||
*
|
|
||||||
* \param [in] device A device allocated using allocateDevice().
|
|
||||||
*
|
|
||||||
* \param [in] index The VRR semaphore index to be signalled.
|
|
||||||
*/
|
|
||||||
NvBool
|
|
||||||
(*signalVrrSemaphore)
|
|
||||||
(
|
|
||||||
struct NvKmsKapiDevice *device,
|
|
||||||
NvS32 index
|
|
||||||
);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Notify NVKMS that the system's framebuffer console has been disabled and
|
|
||||||
* the reserved allocation for the old framebuffer console can be unmapped.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
(*framebufferConsoleDisabled)
|
|
||||||
(
|
|
||||||
struct NvKmsKapiDevice *device
|
|
||||||
);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/** @} */
|
/** @} */
|
||||||
@@ -1582,20 +1136,6 @@ NvBool nvKmsKapiGetFunctionsTable
|
|||||||
struct NvKmsKapiFunctionsTable *funcsTable
|
struct NvKmsKapiFunctionsTable *funcsTable
|
||||||
);
|
);
|
||||||
|
|
||||||
NvU32 nvKmsKapiF16ToF32(NvU16 a);
|
|
||||||
|
|
||||||
NvU16 nvKmsKapiF32ToF16(NvU32 a);
|
|
||||||
|
|
||||||
NvU32 nvKmsKapiF32Mul(NvU32 a, NvU32 b);
|
|
||||||
|
|
||||||
NvU32 nvKmsKapiF32Div(NvU32 a, NvU32 b);
|
|
||||||
|
|
||||||
NvU32 nvKmsKapiF32Add(NvU32 a, NvU32 b);
|
|
||||||
|
|
||||||
NvU32 nvKmsKapiF32ToUI32RMinMag(NvU32 a, NvBool exact);
|
|
||||||
|
|
||||||
NvU32 nvKmsKapiUI32ToF32(NvU32 a);
|
|
||||||
|
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
#endif /* defined(__NVKMS_KAPI_H__) */
|
#endif /* defined(__NVKMS_KAPI_H__) */
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2017-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -34,25 +34,19 @@
|
|||||||
/*
|
/*
|
||||||
* This is the maximum number of GPUs supported in a single system.
|
* This is the maximum number of GPUs supported in a single system.
|
||||||
*/
|
*/
|
||||||
#define NV_MAX_DEVICES 32
|
#define NV_MAX_DEVICES 32
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is the maximum number of subdevices within a single device.
|
* This is the maximum number of subdevices within a single device.
|
||||||
*/
|
*/
|
||||||
#define NV_MAX_SUBDEVICES 8
|
#define NV_MAX_SUBDEVICES 8
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is the maximum length of the process name string.
|
* This is the maximum length of the process name string.
|
||||||
*/
|
*/
|
||||||
#define NV_PROC_NAME_MAX_LENGTH 100U
|
#define NV_PROC_NAME_MAX_LENGTH 100U
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is the maximum number of heads per GPU.
|
* This is the maximum number of heads per GPU.
|
||||||
*/
|
*/
|
||||||
#define NV_MAX_HEADS 4
|
#define NV_MAX_HEADS 4
|
||||||
|
|
||||||
/*
|
|
||||||
* Maximum length of a MIG device UUID. It is a 36-byte UUID string plus a
|
|
||||||
* 4-byte prefix and NUL terminator: 'M' 'I' 'G' '-' UUID '\0x0'
|
|
||||||
*/
|
|
||||||
#define NV_MIG_DEVICE_UUID_STR_LENGTH 41U
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -33,26 +33,43 @@ extern "C" {
|
|||||||
|
|
||||||
#include "nvtypes.h"
|
#include "nvtypes.h"
|
||||||
|
|
||||||
// Miscellaneous macros useful for bit field manipulations.
|
#if !defined(NVIDIA_UNDEF_LEGACY_BIT_MACROS)
|
||||||
#ifndef NVBIT
|
//
|
||||||
#define NVBIT(b) (1U<<(b))
|
// Miscellaneous macros useful for bit field manipulations
|
||||||
|
//
|
||||||
|
// STUPID HACK FOR CL 19434692. Will revert when fix CL is delivered bfm -> chips_a.
|
||||||
|
#ifndef BIT
|
||||||
|
#define BIT(b) (1U<<(b))
|
||||||
#endif
|
#endif
|
||||||
#ifndef NVBIT_TYPE
|
#ifndef BIT32
|
||||||
#define NVBIT_TYPE(b, t) (((t)1U)<<(b))
|
#define BIT32(b) ((NvU32)1U<<(b))
|
||||||
#endif
|
#endif
|
||||||
#ifndef NVBIT32
|
#ifndef BIT64
|
||||||
#define NVBIT32(b) NVBIT_TYPE(b, NvU32)
|
#define BIT64(b) ((NvU64)1U<<(b))
|
||||||
#endif
|
|
||||||
#ifndef NVBIT64
|
|
||||||
#define NVBIT64(b) NVBIT_TYPE(b, NvU64)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
//Concatenate 2 32bit values to a 64bit value
|
#endif
|
||||||
#define NV_CONCAT_32_TO_64(hi, lo) ((((NvU64)hi) << 32) | ((NvU64)lo))
|
|
||||||
|
//
|
||||||
|
// It is recommended to use the following bit macros to avoid macro name
|
||||||
|
// collisions with other src code bases.
|
||||||
|
//
|
||||||
|
#ifndef NVBIT
|
||||||
|
#define NVBIT(b) (1U<<(b))
|
||||||
|
#endif
|
||||||
|
#ifndef NVBIT_TYPE
|
||||||
|
#define NVBIT_TYPE(b, t) (((t)1U)<<(b))
|
||||||
|
#endif
|
||||||
|
#ifndef NVBIT32
|
||||||
|
#define NVBIT32(b) NVBIT_TYPE(b, NvU32)
|
||||||
|
#endif
|
||||||
|
#ifndef NVBIT64
|
||||||
|
#define NVBIT64(b) NVBIT_TYPE(b, NvU64)
|
||||||
|
#endif
|
||||||
|
|
||||||
// Helper macro's for 32 bit bitmasks
|
// Helper macro's for 32 bit bitmasks
|
||||||
#define NV_BITMASK32_ELEMENT_SIZE (sizeof(NvU32) << 3)
|
#define NV_BITMASK32_ELEMENT_SIZE (sizeof(NvU32) << 3)
|
||||||
#define NV_BITMASK32_IDX(chId) (((chId) & ~(0x1F)) >> 5)
|
#define NV_BITMASK32_IDX(chId) (((chId) & ~(0x1F)) >> 5)
|
||||||
#define NV_BITMASK32_OFFSET(chId) ((chId) & (0x1F))
|
#define NV_BITMASK32_OFFSET(chId) ((chId) & (0x1F))
|
||||||
#define NV_BITMASK32_SET(pChannelMask, chId) \
|
#define NV_BITMASK32_SET(pChannelMask, chId) \
|
||||||
(pChannelMask)[NV_BITMASK32_IDX(chId)] |= NVBIT(NV_BITMASK32_OFFSET(chId))
|
(pChannelMask)[NV_BITMASK32_IDX(chId)] |= NVBIT(NV_BITMASK32_OFFSET(chId))
|
||||||
@@ -477,23 +494,6 @@ do \
|
|||||||
//
|
//
|
||||||
#define NV_TWO_N_MINUS_ONE(n) (((1ULL<<(n/2))<<((n+1)/2))-1)
|
#define NV_TWO_N_MINUS_ONE(n) (((1ULL<<(n/2))<<((n+1)/2))-1)
|
||||||
|
|
||||||
//
|
|
||||||
// Create a 64b bitmask with n bits set
|
|
||||||
// This is the same as ((1ULL<<n) - 1), but it doesn't overflow for n=64
|
|
||||||
//
|
|
||||||
// ...
|
|
||||||
// n=-1, 0x0000000000000000
|
|
||||||
// n=0, 0x0000000000000000
|
|
||||||
// n=1, 0x0000000000000001
|
|
||||||
// ...
|
|
||||||
// n=63, 0x7FFFFFFFFFFFFFFF
|
|
||||||
// n=64, 0xFFFFFFFFFFFFFFFF
|
|
||||||
// n=65, 0xFFFFFFFFFFFFFFFF
|
|
||||||
// n=66, 0xFFFFFFFFFFFFFFFF
|
|
||||||
// ...
|
|
||||||
//
|
|
||||||
#define NV_BITMASK64(n) ((n<1) ? 0ULL : (NV_U64_MAX>>((n>64) ? 0 : (64-n))))
|
|
||||||
|
|
||||||
#define DRF_READ_1WORD_BS(d,r,f,v) \
|
#define DRF_READ_1WORD_BS(d,r,f,v) \
|
||||||
((DRF_EXTENT_MW(NV##d##r##f)<8)?DRF_READ_1BYTE_BS(NV##d##r##f,(v)): \
|
((DRF_EXTENT_MW(NV##d##r##f)<8)?DRF_READ_1BYTE_BS(NV##d##r##f,(v)): \
|
||||||
((DRF_EXTENT_MW(NV##d##r##f)<16)?DRF_READ_2BYTE_BS(NV##d##r##f,(v)): \
|
((DRF_EXTENT_MW(NV##d##r##f)<16)?DRF_READ_2BYTE_BS(NV##d##r##f,(v)): \
|
||||||
@@ -574,13 +574,6 @@ nvMaskPos32(const NvU32 mask, const NvU32 bitIdx)
|
|||||||
n32 = BIT_IDX_32(LOWESTBIT(n32));\
|
n32 = BIT_IDX_32(LOWESTBIT(n32));\
|
||||||
}
|
}
|
||||||
|
|
||||||
// Destructive operation on n64
|
|
||||||
#define LOWESTBITIDX_64(n64) \
|
|
||||||
{ \
|
|
||||||
n64 = BIT_IDX_64(LOWESTBIT(n64));\
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// Destructive operation on n32
|
// Destructive operation on n32
|
||||||
#define HIGHESTBITIDX_32(n32) \
|
#define HIGHESTBITIDX_32(n32) \
|
||||||
{ \
|
{ \
|
||||||
@@ -701,42 +694,6 @@ nvPrevPow2_U64(const NvU64 x )
|
|||||||
} \
|
} \
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
|
||||||
// Bug 4851259: Newly added functions must be hidden from certain HS-signed
|
|
||||||
// ucode compilers to avoid signature mismatch.
|
|
||||||
//
|
|
||||||
#ifndef NVDEC_1_0
|
|
||||||
/*!
|
|
||||||
* Returns the position of nth set bit in the given mask.
|
|
||||||
*
|
|
||||||
* Returns -1 if mask has fewer than n bits set.
|
|
||||||
*
|
|
||||||
* n is 0 indexed and has valid values 0..31 inclusive, so "zeroth" set bit is
|
|
||||||
* the first set LSB.
|
|
||||||
*
|
|
||||||
* Example, if mask = 0x000000F0u and n = 1, the return value will be 5.
|
|
||||||
* Example, if mask = 0x000000F0u and n = 4, the return value will be -1.
|
|
||||||
*/
|
|
||||||
static NV_FORCEINLINE NvS32
|
|
||||||
nvGetNthSetBitIndex32(NvU32 mask, NvU32 n)
|
|
||||||
{
|
|
||||||
NvU32 seenSetBitsCount = 0;
|
|
||||||
NvS32 index;
|
|
||||||
FOR_EACH_INDEX_IN_MASK(32, index, mask)
|
|
||||||
{
|
|
||||||
if (seenSetBitsCount == n)
|
|
||||||
{
|
|
||||||
return index;
|
|
||||||
}
|
|
||||||
++seenSetBitsCount;
|
|
||||||
}
|
|
||||||
FOR_EACH_INDEX_IN_MASK_END;
|
|
||||||
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif // NVDEC_1_0
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// Size to use when declaring variable-sized arrays
|
// Size to use when declaring variable-sized arrays
|
||||||
//
|
//
|
||||||
@@ -961,30 +918,6 @@ static NV_FORCEINLINE void *NV_NVUPTR_TO_PTR(NvUPtr address)
|
|||||||
// Use (lo) if (b) is less than 64, and (hi) if >= 64.
|
// Use (lo) if (b) is less than 64, and (hi) if >= 64.
|
||||||
//
|
//
|
||||||
#define NV_BIT_SET_128(b, lo, hi) { nvAssert( (b) < 128 ); if ( (b) < 64 ) (lo) |= NVBIT64(b); else (hi) |= NVBIT64( b & 0x3F ); }
|
#define NV_BIT_SET_128(b, lo, hi) { nvAssert( (b) < 128 ); if ( (b) < 64 ) (lo) |= NVBIT64(b); else (hi) |= NVBIT64( b & 0x3F ); }
|
||||||
//
|
|
||||||
// Clear the bit at pos (b) for U64 which is < 128.
|
|
||||||
// Use (lo) if (b) is less than 64, and (hi) if >= 64.
|
|
||||||
//
|
|
||||||
#define NV_BIT_CLEAR_128(b, lo, hi) { nvAssert( (b) < 128 ); if ( (b) < 64 ) (lo) &= ~NVBIT64(b); else (hi) &= ~NVBIT64( b & 0x3F ); }
|
|
||||||
|
|
||||||
// Get the number of elements the specified fixed-size array
|
|
||||||
#define NV_ARRAY_ELEMENTS(x) ((sizeof(x)/sizeof((x)[0])))
|
|
||||||
|
|
||||||
#if !defined(NVIDIA_UNDEF_LEGACY_BIT_MACROS)
|
|
||||||
//
|
|
||||||
// Deprecated macros whose definition can be removed once the code base no longer references them.
|
|
||||||
// Use the NVBIT* macros instead of these macros.
|
|
||||||
//
|
|
||||||
#ifndef BIT
|
|
||||||
#define BIT(b) (1U<<(b))
|
|
||||||
#endif
|
|
||||||
#ifndef BIT32
|
|
||||||
#define BIT32(b) ((NvU32)1U<<(b))
|
|
||||||
#endif
|
|
||||||
#ifndef BIT64
|
|
||||||
#define BIT64(b) ((NvU64)1U<<(b))
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2014-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -150,16 +150,6 @@ NV_STATUS_CODE(NV_ERR_NVLINK_CONFIGURATION_ERROR, 0x00000078, "Nvlink Confi
|
|||||||
NV_STATUS_CODE(NV_ERR_RISCV_ERROR, 0x00000079, "Generic RISC-V assert or halt")
|
NV_STATUS_CODE(NV_ERR_RISCV_ERROR, 0x00000079, "Generic RISC-V assert or halt")
|
||||||
NV_STATUS_CODE(NV_ERR_FABRIC_MANAGER_NOT_PRESENT, 0x0000007A, "Fabric Manager is not loaded")
|
NV_STATUS_CODE(NV_ERR_FABRIC_MANAGER_NOT_PRESENT, 0x0000007A, "Fabric Manager is not loaded")
|
||||||
NV_STATUS_CODE(NV_ERR_ALREADY_SIGNALLED, 0x0000007B, "Semaphore Surface value already >= requested wait value")
|
NV_STATUS_CODE(NV_ERR_ALREADY_SIGNALLED, 0x0000007B, "Semaphore Surface value already >= requested wait value")
|
||||||
NV_STATUS_CODE(NV_ERR_QUEUE_TASK_SLOT_NOT_AVAILABLE, 0x0000007C, "PMU RPC error due to no queue slot available for this event")
|
|
||||||
NV_STATUS_CODE(NV_ERR_KEY_ROTATION_IN_PROGRESS, 0x0000007D, "Operation not allowed as key rotation is in progress")
|
|
||||||
NV_STATUS_CODE(NV_ERR_TEST_ONLY_CODE_NOT_ENABLED, 0x0000007E, "Test-only code path not enabled")
|
|
||||||
NV_STATUS_CODE(NV_ERR_SECURE_BOOT_FAILED, 0x0000007F, "GFW secure boot failed")
|
|
||||||
NV_STATUS_CODE(NV_ERR_INSUFFICIENT_ZBC_ENTRY, 0x00000080, "No more ZBC entry for the client")
|
|
||||||
NV_STATUS_CODE(NV_ERR_NVLINK_FABRIC_NOT_READY, 0x00000081, "Nvlink Fabric Status or Fabric Probe is not yet complete, caller needs to retry")
|
|
||||||
NV_STATUS_CODE(NV_ERR_NVLINK_FABRIC_FAILURE, 0x00000082, "Nvlink Fabric Probe failed")
|
|
||||||
NV_STATUS_CODE(NV_ERR_GPU_MEMORY_ONLINING_FAILURE, 0x00000083, "GPU Memory Onlining failed")
|
|
||||||
NV_STATUS_CODE(NV_ERR_REDUCTION_MANAGER_NOT_AVAILABLE, 0x00000084, "Reduction Manager is not available")
|
|
||||||
NV_STATUS_CODE(NV_ERR_RESOURCE_RETIREMENT_ERROR, 0x00000086, "An error occurred while trying to retire a resource")
|
|
||||||
|
|
||||||
// Warnings:
|
// Warnings:
|
||||||
NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch")
|
NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch")
|
||||||
|
|||||||
@@ -145,18 +145,7 @@ typedef signed short NvS16; /* -32768 to 32767 */
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Macro to build an NvU32 from four bytes, listed from msb to lsb
|
// Macro to build an NvU32 from four bytes, listed from msb to lsb
|
||||||
#define NvU32_BUILD(a, b, c, d) \
|
#define NvU32_BUILD(a, b, c, d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
|
||||||
((NvU32)( \
|
|
||||||
(((NvU32)(a) & 0xff) << 24) | \
|
|
||||||
(((NvU32)(b) & 0xff) << 16) | \
|
|
||||||
(((NvU32)(c) & 0xff) << 8) | \
|
|
||||||
(((NvU32)(d) & 0xff))))
|
|
||||||
|
|
||||||
// Macro to build an NvU64 from two DWORDS, listed from msb to lsb
|
|
||||||
#define NvU64_BUILD(a, b) \
|
|
||||||
((NvU64)( \
|
|
||||||
(((NvU64)(a) & ~0U) << 32) | \
|
|
||||||
(((NvU64)(b) & ~0U))))
|
|
||||||
|
|
||||||
#if NVTYPES_USE_STDINT
|
#if NVTYPES_USE_STDINT
|
||||||
typedef uint32_t NvV32; /* "void": enumerated or multiple fields */
|
typedef uint32_t NvV32; /* "void": enumerated or multiple fields */
|
||||||
|
|||||||
@@ -40,11 +40,8 @@
|
|||||||
#include "nv_stdarg.h"
|
#include "nv_stdarg.h"
|
||||||
#include <nv-kernel-interface-api.h>
|
#include <nv-kernel-interface-api.h>
|
||||||
#include <os/nv_memory_type.h>
|
#include <os/nv_memory_type.h>
|
||||||
#include <os/nv_memory_area.h>
|
|
||||||
#include <nv-caps.h>
|
#include <nv-caps.h>
|
||||||
|
|
||||||
#include "rs_access.h"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
typedef struct
|
typedef struct
|
||||||
@@ -70,6 +67,7 @@ typedef struct os_wait_queue os_wait_queue;
|
|||||||
* ---------------------------------------------------------------------------
|
* ---------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
NvU64 NV_API_CALL os_get_num_phys_pages (void);
|
||||||
NV_STATUS NV_API_CALL os_alloc_mem (void **, NvU64);
|
NV_STATUS NV_API_CALL os_alloc_mem (void **, NvU64);
|
||||||
void NV_API_CALL os_free_mem (void *);
|
void NV_API_CALL os_free_mem (void *);
|
||||||
NV_STATUS NV_API_CALL os_get_current_time (NvU32 *, NvU32 *);
|
NV_STATUS NV_API_CALL os_get_current_time (NvU32 *, NvU32 *);
|
||||||
@@ -105,10 +103,9 @@ NvBool NV_API_CALL os_pci_remove_supported (void);
|
|||||||
void NV_API_CALL os_pci_remove (void *);
|
void NV_API_CALL os_pci_remove (void *);
|
||||||
void* NV_API_CALL os_map_kernel_space (NvU64, NvU64, NvU32);
|
void* NV_API_CALL os_map_kernel_space (NvU64, NvU64, NvU32);
|
||||||
void NV_API_CALL os_unmap_kernel_space (void *, NvU64);
|
void NV_API_CALL os_unmap_kernel_space (void *, NvU64);
|
||||||
#if defined(NV_VMWARE)
|
void* NV_API_CALL os_map_user_space (NvU64, NvU64, NvU32, NvU32, void **);
|
||||||
void* NV_API_CALL os_map_user_space (MemoryArea *, NvU32, NvU32, void **);
|
|
||||||
void NV_API_CALL os_unmap_user_space (void *, NvU64, void *);
|
void NV_API_CALL os_unmap_user_space (void *, NvU64, void *);
|
||||||
#endif
|
NV_STATUS NV_API_CALL os_flush_cpu_cache (void);
|
||||||
NV_STATUS NV_API_CALL os_flush_cpu_cache_all (void);
|
NV_STATUS NV_API_CALL os_flush_cpu_cache_all (void);
|
||||||
NV_STATUS NV_API_CALL os_flush_user_cache (void);
|
NV_STATUS NV_API_CALL os_flush_user_cache (void);
|
||||||
void NV_API_CALL os_flush_cpu_write_combine_buffer(void);
|
void NV_API_CALL os_flush_cpu_write_combine_buffer(void);
|
||||||
@@ -119,7 +116,7 @@ void NV_API_CALL os_io_write_byte (NvU32, NvU8);
|
|||||||
void NV_API_CALL os_io_write_word (NvU32, NvU16);
|
void NV_API_CALL os_io_write_word (NvU32, NvU16);
|
||||||
void NV_API_CALL os_io_write_dword (NvU32, NvU32);
|
void NV_API_CALL os_io_write_dword (NvU32, NvU32);
|
||||||
NvBool NV_API_CALL os_is_administrator (void);
|
NvBool NV_API_CALL os_is_administrator (void);
|
||||||
NvBool NV_API_CALL os_check_access (RsAccessRight accessRight);
|
NvBool NV_API_CALL os_allow_priority_override (void);
|
||||||
void NV_API_CALL os_dbg_init (void);
|
void NV_API_CALL os_dbg_init (void);
|
||||||
void NV_API_CALL os_dbg_breakpoint (void);
|
void NV_API_CALL os_dbg_breakpoint (void);
|
||||||
void NV_API_CALL os_dbg_set_level (NvU32);
|
void NV_API_CALL os_dbg_set_level (NvU32);
|
||||||
@@ -135,8 +132,7 @@ void NV_API_CALL os_free_spinlock (void *);
|
|||||||
NvU64 NV_API_CALL os_acquire_spinlock (void *);
|
NvU64 NV_API_CALL os_acquire_spinlock (void *);
|
||||||
void NV_API_CALL os_release_spinlock (void *, NvU64);
|
void NV_API_CALL os_release_spinlock (void *, NvU64);
|
||||||
NV_STATUS NV_API_CALL os_queue_work_item (struct os_work_queue *, void *);
|
NV_STATUS NV_API_CALL os_queue_work_item (struct os_work_queue *, void *);
|
||||||
NV_STATUS NV_API_CALL os_flush_work_queue (struct os_work_queue *, NvBool);
|
NV_STATUS NV_API_CALL os_flush_work_queue (struct os_work_queue *);
|
||||||
NvBool NV_API_CALL os_is_queue_flush_ongoing (struct os_work_queue *);
|
|
||||||
NV_STATUS NV_API_CALL os_alloc_mutex (void **);
|
NV_STATUS NV_API_CALL os_alloc_mutex (void **);
|
||||||
void NV_API_CALL os_free_mutex (void *);
|
void NV_API_CALL os_free_mutex (void *);
|
||||||
NV_STATUS NV_API_CALL os_acquire_mutex (void *);
|
NV_STATUS NV_API_CALL os_acquire_mutex (void *);
|
||||||
@@ -157,7 +153,6 @@ void NV_API_CALL os_release_rwlock_read (void *);
|
|||||||
void NV_API_CALL os_release_rwlock_write (void *);
|
void NV_API_CALL os_release_rwlock_write (void *);
|
||||||
NvBool NV_API_CALL os_semaphore_may_sleep (void);
|
NvBool NV_API_CALL os_semaphore_may_sleep (void);
|
||||||
NV_STATUS NV_API_CALL os_get_version_info (os_version_info*);
|
NV_STATUS NV_API_CALL os_get_version_info (os_version_info*);
|
||||||
NV_STATUS NV_API_CALL os_get_is_openrm (NvBool *);
|
|
||||||
NvBool NV_API_CALL os_is_isr (void);
|
NvBool NV_API_CALL os_is_isr (void);
|
||||||
NvBool NV_API_CALL os_pat_supported (void);
|
NvBool NV_API_CALL os_pat_supported (void);
|
||||||
void NV_API_CALL os_dump_stack (void);
|
void NV_API_CALL os_dump_stack (void);
|
||||||
@@ -170,7 +165,7 @@ NvU32 NV_API_CALL os_get_grid_csp_support (void);
|
|||||||
void NV_API_CALL os_bug_check (NvU32, const char *);
|
void NV_API_CALL os_bug_check (NvU32, const char *);
|
||||||
NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32);
|
NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32);
|
||||||
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **);
|
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **);
|
||||||
NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *, NvU32);
|
NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *);
|
||||||
NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *);
|
NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *);
|
||||||
NV_STATUS NV_API_CALL os_get_euid (NvU32 *);
|
NV_STATUS NV_API_CALL os_get_euid (NvU32 *);
|
||||||
NV_STATUS NV_API_CALL os_get_smbios_header (NvU64 *pSmbsAddr);
|
NV_STATUS NV_API_CALL os_get_smbios_header (NvU64 *pSmbsAddr);
|
||||||
@@ -178,7 +173,6 @@ NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi (NvU32 *);
|
|||||||
void NV_API_CALL os_add_record_for_crashLog (void *, NvU32);
|
void NV_API_CALL os_add_record_for_crashLog (void *, NvU32);
|
||||||
void NV_API_CALL os_delete_record_for_crashLog (void *);
|
void NV_API_CALL os_delete_record_for_crashLog (void *);
|
||||||
NV_STATUS NV_API_CALL os_call_vgpu_vfio (void *, NvU32);
|
NV_STATUS NV_API_CALL os_call_vgpu_vfio (void *, NvU32);
|
||||||
NV_STATUS NV_API_CALL os_device_vm_present (void);
|
|
||||||
NV_STATUS NV_API_CALL os_numa_memblock_size (NvU64 *);
|
NV_STATUS NV_API_CALL os_numa_memblock_size (NvU64 *);
|
||||||
NV_STATUS NV_API_CALL os_alloc_pages_node (NvS32, NvU32, NvU32, NvU64 *);
|
NV_STATUS NV_API_CALL os_alloc_pages_node (NvS32, NvU32, NvU32, NvU64 *);
|
||||||
NV_STATUS NV_API_CALL os_get_page (NvU64 address);
|
NV_STATUS NV_API_CALL os_get_page (NvU64 address);
|
||||||
@@ -205,8 +199,6 @@ nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry (nv_cap_t *, const char *,
|
|||||||
void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *);
|
void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *);
|
||||||
int NV_API_CALL os_nv_cap_validate_and_dup_fd(const nv_cap_t *, int);
|
int NV_API_CALL os_nv_cap_validate_and_dup_fd(const nv_cap_t *, int);
|
||||||
void NV_API_CALL os_nv_cap_close_fd (int);
|
void NV_API_CALL os_nv_cap_close_fd (int);
|
||||||
NvS32 NV_API_CALL os_imex_channel_get (NvU64);
|
|
||||||
NvS32 NV_API_CALL os_imex_channel_count (void);
|
|
||||||
|
|
||||||
enum os_pci_req_atomics_type {
|
enum os_pci_req_atomics_type {
|
||||||
OS_INTF_PCIE_REQ_ATOMICS_32BIT,
|
OS_INTF_PCIE_REQ_ATOMICS_32BIT,
|
||||||
@@ -214,7 +206,6 @@ enum os_pci_req_atomics_type {
|
|||||||
OS_INTF_PCIE_REQ_ATOMICS_128BIT
|
OS_INTF_PCIE_REQ_ATOMICS_128BIT
|
||||||
};
|
};
|
||||||
NV_STATUS NV_API_CALL os_enable_pci_req_atomics (void *, enum os_pci_req_atomics_type);
|
NV_STATUS NV_API_CALL os_enable_pci_req_atomics (void *, enum os_pci_req_atomics_type);
|
||||||
void NV_API_CALL os_pci_trigger_flr(void *handle);
|
|
||||||
NV_STATUS NV_API_CALL os_get_numa_node_memory_usage (NvS32, NvU64 *, NvU64 *);
|
NV_STATUS NV_API_CALL os_get_numa_node_memory_usage (NvS32, NvU64 *, NvU64 *);
|
||||||
NV_STATUS NV_API_CALL os_numa_add_gpu_memory (void *, NvU64, NvU64, NvU32 *);
|
NV_STATUS NV_API_CALL os_numa_add_gpu_memory (void *, NvU64, NvU64, NvU32 *);
|
||||||
NV_STATUS NV_API_CALL os_numa_remove_gpu_memory (void *, NvU64, NvU64, NvU32);
|
NV_STATUS NV_API_CALL os_numa_remove_gpu_memory (void *, NvU64, NvU64, NvU32);
|
||||||
@@ -222,17 +213,13 @@ NV_STATUS NV_API_CALL os_offline_page_at_address(NvU64 address);
|
|||||||
void* NV_API_CALL os_get_pid_info(void);
|
void* NV_API_CALL os_get_pid_info(void);
|
||||||
void NV_API_CALL os_put_pid_info(void *pid_info);
|
void NV_API_CALL os_put_pid_info(void *pid_info);
|
||||||
NV_STATUS NV_API_CALL os_find_ns_pid(void *pid_info, NvU32 *ns_pid);
|
NV_STATUS NV_API_CALL os_find_ns_pid(void *pid_info, NvU32 *ns_pid);
|
||||||
NvBool NV_API_CALL os_is_init_ns(void);
|
|
||||||
|
|
||||||
extern NvU32 os_page_size;
|
extern NvU32 os_page_size;
|
||||||
extern NvU64 os_page_mask;
|
extern NvU64 os_page_mask;
|
||||||
extern NvU8 os_page_shift;
|
extern NvU8 os_page_shift;
|
||||||
extern NvBool os_cc_enabled;
|
extern NvBool os_cc_enabled;
|
||||||
extern NvBool os_cc_sev_snp_enabled;
|
|
||||||
extern NvBool os_cc_snp_vtom_enabled;
|
|
||||||
extern NvBool os_cc_tdx_enabled;
|
extern NvBool os_cc_tdx_enabled;
|
||||||
extern NvBool os_dma_buf_enabled;
|
extern NvBool os_dma_buf_enabled;
|
||||||
extern NvBool os_imex_channel_is_supported;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ---------------------------------------------------------------------------
|
* ---------------------------------------------------------------------------
|
||||||
|
|||||||
@@ -1,104 +0,0 @@
|
|||||||
/*
|
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
||||||
* SPDX-License-Identifier: MIT
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef NV_MEMORY_AREA_H
|
|
||||||
#define NV_MEMORY_AREA_H
|
|
||||||
|
|
||||||
typedef struct MemoryRange
|
|
||||||
{
|
|
||||||
NvU64 start;
|
|
||||||
NvU64 size;
|
|
||||||
} MemoryRange;
|
|
||||||
|
|
||||||
typedef struct MemoryArea
|
|
||||||
{
|
|
||||||
MemoryRange *pRanges;
|
|
||||||
NvU64 numRanges;
|
|
||||||
} MemoryArea;
|
|
||||||
|
|
||||||
static inline NvU64 memareaSize(MemoryArea memArea)
|
|
||||||
{
|
|
||||||
NvU64 size = 0;
|
|
||||||
NvU64 idx = 0;
|
|
||||||
for (idx = 0; idx < memArea.numRanges; idx++)
|
|
||||||
{
|
|
||||||
size += memArea.pRanges[idx].size;
|
|
||||||
}
|
|
||||||
return size;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline MemoryRange
|
|
||||||
mrangeMake
|
|
||||||
(
|
|
||||||
NvU64 start,
|
|
||||||
NvU64 size
|
|
||||||
)
|
|
||||||
{
|
|
||||||
MemoryRange range;
|
|
||||||
range.start = start;
|
|
||||||
range.size = size;
|
|
||||||
return range;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline NvU64
|
|
||||||
mrangeLimit
|
|
||||||
(
|
|
||||||
MemoryRange a
|
|
||||||
)
|
|
||||||
{
|
|
||||||
return a.start + a.size;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline NvBool
|
|
||||||
mrangeIntersects
|
|
||||||
(
|
|
||||||
MemoryRange a,
|
|
||||||
MemoryRange b
|
|
||||||
)
|
|
||||||
{
|
|
||||||
return ((a.start >= b.start) && (a.start < mrangeLimit(b))) ||
|
|
||||||
((b.start >= a.start) && (b.start < mrangeLimit(a)));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline NvBool
|
|
||||||
mrangeContains
|
|
||||||
(
|
|
||||||
MemoryRange outer,
|
|
||||||
MemoryRange inner
|
|
||||||
)
|
|
||||||
{
|
|
||||||
return (inner.start >= outer.start) && (mrangeLimit(inner) <= mrangeLimit(outer));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline MemoryRange
|
|
||||||
mrangeOffset
|
|
||||||
(
|
|
||||||
MemoryRange range,
|
|
||||||
NvU64 amt
|
|
||||||
)
|
|
||||||
{
|
|
||||||
range.start += amt;
|
|
||||||
return range;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* NV_MEMORY_AREA_H */
|
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -37,7 +37,7 @@ NV_STATUS NV_API_CALL rm_gpu_ops_create_session (nvidia_stack_t *, nvgpuSessio
|
|||||||
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_session (nvidia_stack_t *, nvgpuSessionHandle_t);
|
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_session (nvidia_stack_t *, nvgpuSessionHandle_t);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_device_create (nvidia_stack_t *, nvgpuSessionHandle_t, const nvgpuInfo_t *, const NvProcessorUuid *, nvgpuDeviceHandle_t *, NvBool);
|
NV_STATUS NV_API_CALL rm_gpu_ops_device_create (nvidia_stack_t *, nvgpuSessionHandle_t, const nvgpuInfo_t *, const NvProcessorUuid *, nvgpuDeviceHandle_t *, NvBool);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_device_destroy (nvidia_stack_t *, nvgpuDeviceHandle_t);
|
NV_STATUS NV_API_CALL rm_gpu_ops_device_destroy (nvidia_stack_t *, nvgpuDeviceHandle_t);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_address_space_create(nvidia_stack_t *, nvgpuDeviceHandle_t, unsigned long long, unsigned long long, NvBool, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t);
|
NV_STATUS NV_API_CALL rm_gpu_ops_address_space_create(nvidia_stack_t *, nvgpuDeviceHandle_t, unsigned long long, unsigned long long, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_dup_address_space(nvidia_stack_t *, nvgpuDeviceHandle_t, NvHandle, NvHandle, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t);
|
NV_STATUS NV_API_CALL rm_gpu_ops_dup_address_space(nvidia_stack_t *, nvgpuDeviceHandle_t, NvHandle, NvHandle, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_address_space_destroy(nvidia_stack_t *, nvgpuAddressSpaceHandle_t);
|
NV_STATUS NV_API_CALL rm_gpu_ops_address_space_destroy(nvidia_stack_t *, nvgpuAddressSpaceHandle_t);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_fb(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvLength, NvU64 *, nvgpuAllocInfo_t);
|
NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_fb(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvLength, NvU64 *, nvgpuAllocInfo_t);
|
||||||
@@ -45,6 +45,7 @@ NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_fb(nvidia_stack_t *, nvgpuAddres
|
|||||||
NV_STATUS NV_API_CALL rm_gpu_ops_pma_alloc_pages(nvidia_stack_t *, void *, NvLength, NvU32 , nvgpuPmaAllocationOptions_t, NvU64 *);
|
NV_STATUS NV_API_CALL rm_gpu_ops_pma_alloc_pages(nvidia_stack_t *, void *, NvLength, NvU32 , nvgpuPmaAllocationOptions_t, NvU64 *);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_pma_free_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32, NvU32);
|
NV_STATUS NV_API_CALL rm_gpu_ops_pma_free_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32, NvU32);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_pma_pin_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32, NvU32);
|
NV_STATUS NV_API_CALL rm_gpu_ops_pma_pin_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32, NvU32);
|
||||||
|
NV_STATUS NV_API_CALL rm_gpu_ops_pma_unpin_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_get_pma_object(nvidia_stack_t *, nvgpuDeviceHandle_t, void **, const nvgpuPmaStatistics_t *);
|
NV_STATUS NV_API_CALL rm_gpu_ops_get_pma_object(nvidia_stack_t *, nvgpuDeviceHandle_t, void **, const nvgpuPmaStatistics_t *);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_pma_register_callbacks(nvidia_stack_t *sp, void *, nvPmaEvictPagesCallback, nvPmaEvictRangeCallback, void *);
|
NV_STATUS NV_API_CALL rm_gpu_ops_pma_register_callbacks(nvidia_stack_t *sp, void *, nvPmaEvictPagesCallback, nvPmaEvictRangeCallback, void *);
|
||||||
void NV_API_CALL rm_gpu_ops_pma_unregister_callbacks(nvidia_stack_t *sp, void *);
|
void NV_API_CALL rm_gpu_ops_pma_unregister_callbacks(nvidia_stack_t *sp, void *);
|
||||||
@@ -75,21 +76,18 @@ NV_STATUS NV_API_CALL rm_gpu_ops_own_page_fault_intr(nvidia_stack_t *, nvgpuDevi
|
|||||||
NV_STATUS NV_API_CALL rm_gpu_ops_init_fault_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFaultInfo_t);
|
NV_STATUS NV_API_CALL rm_gpu_ops_init_fault_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFaultInfo_t);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_fault_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFaultInfo_t);
|
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_fault_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFaultInfo_t);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_get_non_replayable_faults(nvidia_stack_t *, nvgpuFaultInfo_t, void *, NvU32 *);
|
NV_STATUS NV_API_CALL rm_gpu_ops_get_non_replayable_faults(nvidia_stack_t *, nvgpuFaultInfo_t, void *, NvU32 *);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_flush_replayable_fault_buffer(nvidia_stack_t *, nvgpuFaultInfo_t, NvBool);
|
NV_STATUS NV_API_CALL rm_gpu_ops_flush_replayable_fault_buffer(nvidia_stack_t *, nvgpuDeviceHandle_t);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_toggle_prefetch_faults(nvidia_stack_t *, nvgpuFaultInfo_t, NvBool);
|
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_has_pending_non_replayable_faults(nvidia_stack_t *, nvgpuFaultInfo_t, NvBool *);
|
NV_STATUS NV_API_CALL rm_gpu_ops_has_pending_non_replayable_faults(nvidia_stack_t *, nvgpuFaultInfo_t, NvBool *);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_init_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t, NvU32);
|
NV_STATUS NV_API_CALL rm_gpu_ops_init_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t, NvU32);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t);
|
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_own_access_cntr_intr(nvidia_stack_t *, nvgpuSessionHandle_t, nvgpuAccessCntrInfo_t, NvBool);
|
NV_STATUS NV_API_CALL rm_gpu_ops_own_access_cntr_intr(nvidia_stack_t *, nvgpuSessionHandle_t, nvgpuAccessCntrInfo_t, NvBool);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_enable_access_cntr(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t, const nvgpuAccessCntrConfig_t *);
|
NV_STATUS NV_API_CALL rm_gpu_ops_enable_access_cntr(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t, nvgpuAccessCntrConfig_t);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_disable_access_cntr(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t);
|
NV_STATUS NV_API_CALL rm_gpu_ops_disable_access_cntr(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_set_page_directory (nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, unsigned, NvBool, NvU32);
|
NV_STATUS NV_API_CALL rm_gpu_ops_set_page_directory (nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, unsigned, NvBool, NvU32);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_unset_page_directory (nvidia_stack_t *, nvgpuAddressSpaceHandle_t);
|
NV_STATUS NV_API_CALL rm_gpu_ops_unset_page_directory (nvidia_stack_t *, nvgpuAddressSpaceHandle_t);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_get_nvlink_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuNvlinkInfo_t);
|
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_p2p_object_create(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuDeviceHandle_t, NvHandle *);
|
NV_STATUS NV_API_CALL rm_gpu_ops_p2p_object_create(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuDeviceHandle_t, NvHandle *);
|
||||||
void NV_API_CALL rm_gpu_ops_p2p_object_destroy(nvidia_stack_t *, nvgpuSessionHandle_t, NvHandle);
|
void NV_API_CALL rm_gpu_ops_p2p_object_destroy(nvidia_stack_t *, nvgpuSessionHandle_t, NvHandle);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_get_external_alloc_ptes(nvidia_stack_t*, nvgpuAddressSpaceHandle_t, NvHandle, NvU64, NvU64, nvgpuExternalMappingInfo_t);
|
NV_STATUS NV_API_CALL rm_gpu_ops_get_external_alloc_ptes(nvidia_stack_t*, nvgpuAddressSpaceHandle_t, NvHandle, NvU64, NvU64, nvgpuExternalMappingInfo_t);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_get_external_alloc_phys_addrs(nvidia_stack_t*, nvgpuAddressSpaceHandle_t, NvHandle, NvU64, NvU64, nvgpuExternalPhysAddrInfo_t);
|
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_retain_channel(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvHandle, NvHandle, void **, nvgpuChannelInstanceInfo_t);
|
NV_STATUS NV_API_CALL rm_gpu_ops_retain_channel(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvHandle, NvHandle, void **, nvgpuChannelInstanceInfo_t);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_bind_channel_resources(nvidia_stack_t *, void *, nvgpuChannelResourceBindParams_t);
|
NV_STATUS NV_API_CALL rm_gpu_ops_bind_channel_resources(nvidia_stack_t *, void *, nvgpuChannelResourceBindParams_t);
|
||||||
void NV_API_CALL rm_gpu_ops_release_channel(nvidia_stack_t *, void *);
|
void NV_API_CALL rm_gpu_ops_release_channel(nvidia_stack_t *, void *);
|
||||||
@@ -102,18 +100,15 @@ void NV_API_CALL rm_gpu_ops_paging_channel_destroy(nvidia_stack_t *, nvgpu
|
|||||||
NV_STATUS NV_API_CALL rm_gpu_ops_paging_channels_map(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuDeviceHandle_t, NvU64 *);
|
NV_STATUS NV_API_CALL rm_gpu_ops_paging_channels_map(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuDeviceHandle_t, NvU64 *);
|
||||||
void NV_API_CALL rm_gpu_ops_paging_channels_unmap(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuDeviceHandle_t);
|
void NV_API_CALL rm_gpu_ops_paging_channels_unmap(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuDeviceHandle_t);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_paging_channel_push_stream(nvidia_stack_t *, nvgpuPagingChannelHandle_t, char *, NvU32);
|
NV_STATUS NV_API_CALL rm_gpu_ops_paging_channel_push_stream(nvidia_stack_t *, nvgpuPagingChannelHandle_t, char *, NvU32);
|
||||||
void NV_API_CALL rm_gpu_ops_report_fatal_error(nvidia_stack_t *, NV_STATUS error);
|
|
||||||
|
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_init(nvidia_stack_t *, struct ccslContext_t **, nvgpuChannelHandle_t);
|
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_init(nvidia_stack_t *, struct ccslContext_t **, nvgpuChannelHandle_t);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_clear(nvidia_stack_t *, struct ccslContext_t *);
|
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_clear(nvidia_stack_t *, struct ccslContext_t *);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_key(nvidia_stack_t *, UvmCslContext *[], NvU32);
|
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8);
|
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *, NvU8 *);
|
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *, NvU8 *);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt_with_iv(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8*, NvU8 *, NvU8 *);
|
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt_with_iv(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8*, NvU8 *, NvU8 *);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_decrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 const *, NvU32, NvU8 *, NvU8 const *, NvU32, NvU8 const *);
|
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_decrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 const *, NvU8 *, NvU8 const *, NvU32, NvU8 const *);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_sign(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *);
|
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_sign(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_query_message_pool(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU64 *);
|
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_query_message_pool(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU64 *);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_increment_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU64, NvU8 *);
|
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_increment_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU64, NvU8 *);
|
||||||
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_log_encryption(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU32);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -1,276 +0,0 @@
|
|||||||
/*
|
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
||||||
* SPDX-License-Identifier: MIT
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
*/
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <nvtypes.h>
|
|
||||||
#if defined(_MSC_VER)
|
|
||||||
#pragma warning(disable:4324)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
//
|
|
||||||
// This file was generated with FINN, an NVIDIA coding tool.
|
|
||||||
// Source file: rs_access.finn
|
|
||||||
//
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#include "nvtypes.h"
|
|
||||||
#include "nvmisc.h"
|
|
||||||
|
|
||||||
|
|
||||||
/****************************************************************************/
|
|
||||||
/* Access right definitions */
|
|
||||||
/****************************************************************************/
|
|
||||||
|
|
||||||
//
|
|
||||||
// The meaning of each access right is documented in
|
|
||||||
// resman/docs/rmapi/resource_server/rm_capabilities.adoc
|
|
||||||
//
|
|
||||||
// RS_ACCESS_COUNT is the number of access rights that have been defined
|
|
||||||
// and are in use. All integers in the range [0, RS_ACCESS_COUNT) should
|
|
||||||
// represent valid access rights.
|
|
||||||
//
|
|
||||||
// When adding a new access right, don't forget to update
|
|
||||||
// 1) The descriptions in the resman/docs/rmapi/resource_server/rm_capabilities.adoc
|
|
||||||
// 2) RS_ACCESS_COUNT, defined below
|
|
||||||
// 3) The declaration of g_rsAccessMetadata in rs_access_rights.c
|
|
||||||
// 4) The list of access rights in drivers/common/chip-config/Chipcontrols.pm
|
|
||||||
// 5) Any relevant access right callbacks
|
|
||||||
//
|
|
||||||
|
|
||||||
#define RS_ACCESS_DUP_OBJECT 0U
|
|
||||||
#define RS_ACCESS_NICE 1U
|
|
||||||
#define RS_ACCESS_DEBUG 2U
|
|
||||||
#define RS_ACCESS_PERFMON 3U
|
|
||||||
#define RS_ACCESS_COUNT 4U
|
|
||||||
|
|
||||||
|
|
||||||
/****************************************************************************/
|
|
||||||
/* Access right data structures */
|
|
||||||
/****************************************************************************/
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* @brief A type that can be used to represent any access right.
|
|
||||||
*/
|
|
||||||
typedef NvU16 RsAccessRight;
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* @brief An internal type used to represent one limb in an access right mask.
|
|
||||||
*/
|
|
||||||
typedef NvU32 RsAccessLimb;
|
|
||||||
#define SDK_RS_ACCESS_LIMB_BITS 32
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* @brief The number of limbs in the RS_ACCESS_MASK struct.
|
|
||||||
*/
|
|
||||||
#define SDK_RS_ACCESS_MAX_LIMBS 1
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* @brief The maximum number of possible access rights supported by the
|
|
||||||
* current data structure definition.
|
|
||||||
*
|
|
||||||
* You probably want RS_ACCESS_COUNT instead, which is the number of actual
|
|
||||||
* access rights defined.
|
|
||||||
*/
|
|
||||||
#define SDK_RS_ACCESS_MAX_COUNT (0x20) /* finn: Evaluated from "(SDK_RS_ACCESS_LIMB_BITS * SDK_RS_ACCESS_MAX_LIMBS)" */
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief A struct representing a set of access rights.
|
|
||||||
*
|
|
||||||
* Note that the values of bit positions larger than RS_ACCESS_COUNT is
|
|
||||||
* undefined, and should not be assumed to be 0 (see RS_ACCESS_MASK_FILL).
|
|
||||||
*/
|
|
||||||
typedef struct RS_ACCESS_MASK {
|
|
||||||
RsAccessLimb limbs[SDK_RS_ACCESS_MAX_LIMBS];
|
|
||||||
} RS_ACCESS_MASK;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief A struct representing auxiliary information about each access right.
|
|
||||||
*/
|
|
||||||
typedef struct RS_ACCESS_INFO {
|
|
||||||
NvU32 flags;
|
|
||||||
} RS_ACCESS_INFO;
|
|
||||||
|
|
||||||
|
|
||||||
/****************************************************************************/
|
|
||||||
/* Access right macros */
|
|
||||||
/****************************************************************************/
|
|
||||||
|
|
||||||
#define SDK_RS_ACCESS_LIMB_INDEX(index) ((index) / SDK_RS_ACCESS_LIMB_BITS)
|
|
||||||
#define SDK_RS_ACCESS_LIMB_POS(index) ((index) % SDK_RS_ACCESS_LIMB_BITS)
|
|
||||||
|
|
||||||
#define SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) \
|
|
||||||
((pAccessMask)->limbs[SDK_RS_ACCESS_LIMB_INDEX(index)])
|
|
||||||
#define SDK_RS_ACCESS_OFFSET_MASK(index) \
|
|
||||||
NVBIT_TYPE(SDK_RS_ACCESS_LIMB_POS(index), RsAccessLimb)
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* @brief Checks that accessRight represents a valid access right.
|
|
||||||
*
|
|
||||||
* The valid range of access rights is [0, RS_ACCESS_COUNT).
|
|
||||||
*
|
|
||||||
* @param[in] accessRight The access right value to check
|
|
||||||
*
|
|
||||||
* @return true if accessRight is valid
|
|
||||||
* @return false otherwise
|
|
||||||
*/
|
|
||||||
#define RS_ACCESS_BOUNDS_CHECK(accessRight) \
|
|
||||||
(accessRight < RS_ACCESS_COUNT)
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* @brief Test whether an access right is present in a set
|
|
||||||
*
|
|
||||||
* @param[in] pAccessMask The set of access rights to read
|
|
||||||
* @param[in] index The access right to examine
|
|
||||||
*
|
|
||||||
* @return NV_TRUE if the access right specified by index was present in the set,
|
|
||||||
* and NV_FALSE otherwise
|
|
||||||
*/
|
|
||||||
#define RS_ACCESS_MASK_TEST(pAccessMask, index) \
|
|
||||||
(RS_ACCESS_BOUNDS_CHECK(index) && \
|
|
||||||
(SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) & SDK_RS_ACCESS_OFFSET_MASK(index)) != 0)
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* @brief Add an access right to a mask
|
|
||||||
*
|
|
||||||
* @param[in] pAccessMask The set of access rights to modify
|
|
||||||
* @param[in] index The access right to set
|
|
||||||
*/
|
|
||||||
#define RS_ACCESS_MASK_ADD(pAccessMask, index) \
|
|
||||||
do \
|
|
||||||
{ \
|
|
||||||
if (RS_ACCESS_BOUNDS_CHECK(index)) { \
|
|
||||||
SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) |= SDK_RS_ACCESS_OFFSET_MASK(index); \
|
|
||||||
} \
|
|
||||||
} while (NV_FALSE)
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* @brief Remove an access right from a mask
|
|
||||||
*
|
|
||||||
* @param[in] pAccessMask The set of access rights to modify
|
|
||||||
* @param[in] index The access right to unset
|
|
||||||
*/
|
|
||||||
#define RS_ACCESS_MASK_REMOVE(pAccessMask, index) \
|
|
||||||
do \
|
|
||||||
{ \
|
|
||||||
if (RS_ACCESS_BOUNDS_CHECK(index)) { \
|
|
||||||
SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) &= ~SDK_RS_ACCESS_OFFSET_MASK(index); \
|
|
||||||
} \
|
|
||||||
} while (NV_FALSE)
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* @brief Performs an in-place union between two access right masks
|
|
||||||
*
|
|
||||||
* @param[in,out] pMaskOut The access rights mask to be updated
|
|
||||||
* @param[in] pMaskIn The set of access rights to be added to pMaskOut
|
|
||||||
*/
|
|
||||||
#define RS_ACCESS_MASK_UNION(pMaskOut, pMaskIn) \
|
|
||||||
do \
|
|
||||||
{ \
|
|
||||||
NvLength limb; \
|
|
||||||
for (limb = 0; limb < SDK_RS_ACCESS_MAX_LIMBS; limb++) \
|
|
||||||
{ \
|
|
||||||
SDK_RS_ACCESS_LIMB_ELT(pMaskOut, limb) |= SDK_RS_ACCESS_LIMB_ELT(pMaskIn, limb); \
|
|
||||||
} \
|
|
||||||
} while (NV_FALSE)
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* @brief Performs an in-place subtract of one mask's rights from another
|
|
||||||
*
|
|
||||||
* @param[in,out] pMaskOut The access rights mask to be updated
|
|
||||||
* @param[in] pMaskIn The set of access rights to be removed from pMaskOut
|
|
||||||
*/
|
|
||||||
#define RS_ACCESS_MASK_SUBTRACT(pMaskOut, pMaskIn) \
|
|
||||||
do \
|
|
||||||
{ \
|
|
||||||
NvLength limb; \
|
|
||||||
for (limb = 0; limb < SDK_RS_ACCESS_MAX_LIMBS; limb++) \
|
|
||||||
{ \
|
|
||||||
SDK_RS_ACCESS_LIMB_ELT(pMaskOut, limb) &= ~SDK_RS_ACCESS_LIMB_ELT(pMaskIn, limb); \
|
|
||||||
} \
|
|
||||||
} while (NV_FALSE)
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* @brief Removes all rights from an access rights mask
|
|
||||||
*
|
|
||||||
* @param[in,out] pAccessMask The access rights mask to be updated
|
|
||||||
*/
|
|
||||||
#define RS_ACCESS_MASK_CLEAR(pAccessMask) \
|
|
||||||
do \
|
|
||||||
{ \
|
|
||||||
portMemSet(pAccessMask, 0, sizeof(*pAccessMask)); \
|
|
||||||
} while (NV_FALSE)
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* @brief Adds all rights to an access rights mask
|
|
||||||
*
|
|
||||||
* @param[in,out] pAccessMask The access rights mask to be updated
|
|
||||||
*/
|
|
||||||
#define RS_ACCESS_MASK_FILL(pAccessMask) \
|
|
||||||
do \
|
|
||||||
{ \
|
|
||||||
portMemSet(pAccessMask, 0xff, sizeof(*pAccessMask)); \
|
|
||||||
} while (NV_FALSE)
|
|
||||||
|
|
||||||
|
|
||||||
/****************************************************************************/
|
|
||||||
/* Share definitions */
|
|
||||||
/****************************************************************************/
|
|
||||||
|
|
||||||
//
|
|
||||||
// The usage of Share Policy and the meaning of each share type is documented in
|
|
||||||
// resman/docs/rmapi/resource_server/rm_capabilities.adoc
|
|
||||||
//
|
|
||||||
#define RS_SHARE_TYPE_NONE (0U)
|
|
||||||
#define RS_SHARE_TYPE_ALL (1U)
|
|
||||||
#define RS_SHARE_TYPE_OS_SECURITY_TOKEN (2U)
|
|
||||||
#define RS_SHARE_TYPE_CLIENT (3U)
|
|
||||||
#define RS_SHARE_TYPE_PID (4U)
|
|
||||||
#define RS_SHARE_TYPE_SMC_PARTITION (5U)
|
|
||||||
#define RS_SHARE_TYPE_GPU (6U)
|
|
||||||
#define RS_SHARE_TYPE_FM_CLIENT (7U)
|
|
||||||
// Must be last. Update when a new SHARE_TYPE is added
|
|
||||||
#define RS_SHARE_TYPE_MAX (8U)
|
|
||||||
|
|
||||||
|
|
||||||
//
|
|
||||||
// Use Revoke to remove an existing policy from the list.
|
|
||||||
// Allow is based on OR logic, Require is based on AND logic.
|
|
||||||
// To share a right, at least one Allow (non-Require) must match, and all Require must pass.
|
|
||||||
// If Compose is specified, policies will be added to the list. Otherwise, they will replace the list.
|
|
||||||
//
|
|
||||||
#define RS_SHARE_ACTION_FLAG_REVOKE NVBIT(0)
|
|
||||||
#define RS_SHARE_ACTION_FLAG_REQUIRE NVBIT(1)
|
|
||||||
#define RS_SHARE_ACTION_FLAG_COMPOSE NVBIT(2)
|
|
||||||
|
|
||||||
/****************************************************************************/
|
|
||||||
/* Share flag data structures */
|
|
||||||
/****************************************************************************/
|
|
||||||
|
|
||||||
typedef struct RS_SHARE_POLICY {
|
|
||||||
NvU32 target;
|
|
||||||
RS_ACCESS_MASK accessMask;
|
|
||||||
NvU16 type; ///< RS_SHARE_TYPE_
|
|
||||||
NvU8 action; ///< RS_SHARE_ACTION_
|
|
||||||
} RS_SHARE_POLICY;
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,111 +0,0 @@
|
|||||||
# Each of these headers is checked for presence with a test #include; a
|
|
||||||
# corresponding #define will be generated in conftest/headers.h.
|
|
||||||
NV_HEADER_PRESENCE_TESTS = \
|
|
||||||
asm/system.h \
|
|
||||||
drm/drmP.h \
|
|
||||||
drm/drm_aperture.h \
|
|
||||||
drm/drm_auth.h \
|
|
||||||
drm/drm_gem.h \
|
|
||||||
drm/drm_crtc.h \
|
|
||||||
drm/drm_color_mgmt.h \
|
|
||||||
drm/drm_atomic.h \
|
|
||||||
drm/drm_atomic_helper.h \
|
|
||||||
drm/drm_atomic_state_helper.h \
|
|
||||||
drm/drm_encoder.h \
|
|
||||||
drm/drm_atomic_uapi.h \
|
|
||||||
drm/drm_drv.h \
|
|
||||||
drm/drm_fbdev_generic.h \
|
|
||||||
drm/drm_fbdev_ttm.h \
|
|
||||||
drm/drm_client_setup.h \
|
|
||||||
drm/drm_framebuffer.h \
|
|
||||||
drm/drm_connector.h \
|
|
||||||
drm/drm_probe_helper.h \
|
|
||||||
drm/drm_blend.h \
|
|
||||||
drm/drm_fourcc.h \
|
|
||||||
drm/drm_prime.h \
|
|
||||||
drm/drm_plane.h \
|
|
||||||
drm/drm_vblank.h \
|
|
||||||
drm/drm_file.h \
|
|
||||||
drm/drm_ioctl.h \
|
|
||||||
drm/drm_device.h \
|
|
||||||
drm/drm_mode_config.h \
|
|
||||||
drm/drm_modeset_lock.h \
|
|
||||||
drm/drm_property.h \
|
|
||||||
drm/clients/drm_client_setup.h \
|
|
||||||
dt-bindings/interconnect/tegra_icc_id.h \
|
|
||||||
generated/autoconf.h \
|
|
||||||
generated/compile.h \
|
|
||||||
generated/utsrelease.h \
|
|
||||||
linux/aperture.h \
|
|
||||||
linux/dma-direct.h \
|
|
||||||
linux/efi.h \
|
|
||||||
linux/kconfig.h \
|
|
||||||
linux/platform/tegra/mc_utils.h \
|
|
||||||
linux/printk.h \
|
|
||||||
linux/ratelimit.h \
|
|
||||||
linux/prio_tree.h \
|
|
||||||
linux/log2.h \
|
|
||||||
linux/of.h \
|
|
||||||
linux/bug.h \
|
|
||||||
linux/sched.h \
|
|
||||||
linux/sched/mm.h \
|
|
||||||
linux/sched/signal.h \
|
|
||||||
linux/sched/task.h \
|
|
||||||
linux/sched/task_stack.h \
|
|
||||||
xen/ioemu.h \
|
|
||||||
linux/fence.h \
|
|
||||||
linux/dma-fence.h \
|
|
||||||
linux/dma-resv.h \
|
|
||||||
soc/tegra/chip-id.h \
|
|
||||||
soc/tegra/fuse.h \
|
|
||||||
soc/tegra/fuse-helper.h \
|
|
||||||
soc/tegra/tegra_bpmp.h \
|
|
||||||
video/nv_internal.h \
|
|
||||||
linux/platform/tegra/dce/dce-client-ipc.h \
|
|
||||||
linux/nvhost.h \
|
|
||||||
linux/nvhost_t194.h \
|
|
||||||
linux/host1x-next.h \
|
|
||||||
asm/book3s/64/hash-64k.h \
|
|
||||||
asm/set_memory.h \
|
|
||||||
asm/prom.h \
|
|
||||||
asm/powernv.h \
|
|
||||||
linux/atomic.h \
|
|
||||||
asm/barrier.h \
|
|
||||||
asm/opal-api.h \
|
|
||||||
sound/hdaudio.h \
|
|
||||||
asm/pgtable_types.h \
|
|
||||||
asm/page.h \
|
|
||||||
linux/stringhash.h \
|
|
||||||
linux/dma-map-ops.h \
|
|
||||||
rdma/peer_mem.h \
|
|
||||||
sound/hda_codec.h \
|
|
||||||
linux/dma-buf.h \
|
|
||||||
linux/time.h \
|
|
||||||
linux/platform_device.h \
|
|
||||||
linux/mutex.h \
|
|
||||||
linux/reset.h \
|
|
||||||
linux/of_platform.h \
|
|
||||||
linux/of_device.h \
|
|
||||||
linux/of_gpio.h \
|
|
||||||
linux/gpio.h \
|
|
||||||
linux/gpio/consumer.h \
|
|
||||||
linux/interconnect.h \
|
|
||||||
linux/pm_runtime.h \
|
|
||||||
linux/clk.h \
|
|
||||||
linux/clk-provider.h \
|
|
||||||
linux/ioasid.h \
|
|
||||||
linux/stdarg.h \
|
|
||||||
linux/iosys-map.h \
|
|
||||||
asm/coco.h \
|
|
||||||
linux/vfio_pci_core.h \
|
|
||||||
linux/mdev.h \
|
|
||||||
soc/tegra/bpmp-abi.h \
|
|
||||||
soc/tegra/bpmp.h \
|
|
||||||
linux/sync_file.h \
|
|
||||||
linux/cc_platform.h \
|
|
||||||
asm/cpufeature.h \
|
|
||||||
linux/mpi.h \
|
|
||||||
asm/mshyperv.h \
|
|
||||||
crypto/sig.h \
|
|
||||||
linux/pfn_t.h
|
|
||||||
|
|
||||||
@@ -1,334 +0,0 @@
|
|||||||
/*
|
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
||||||
* SPDX-License-Identifier: MIT
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "nv-kthread-q.h"
|
|
||||||
#include "nv-list-helpers.h"
|
|
||||||
|
|
||||||
#include <linux/kthread.h>
|
|
||||||
#include <linux/interrupt.h>
|
|
||||||
#include <linux/completion.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <linux/mm.h>
|
|
||||||
|
|
||||||
#if defined(NV_LINUX_BUG_H_PRESENT)
|
|
||||||
#include <linux/bug.h>
|
|
||||||
#else
|
|
||||||
#include <asm/bug.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Today's implementation is a little simpler and more limited than the
|
|
||||||
// API description allows for in nv-kthread-q.h. Details include:
|
|
||||||
//
|
|
||||||
// 1. Each nv_kthread_q instance is a first-in, first-out queue.
|
|
||||||
//
|
|
||||||
// 2. Each nv_kthread_q instance is serviced by exactly one kthread.
|
|
||||||
//
|
|
||||||
// You can create any number of queues, each of which gets its own
|
|
||||||
// named kernel thread (kthread). You can then insert arbitrary functions
|
|
||||||
// into the queue, and those functions will be run in the context of the
|
|
||||||
// queue's kthread.
|
|
||||||
|
|
||||||
#ifndef WARN
|
|
||||||
// Only *really* old kernels (2.6.9) end up here. Just use a simple printk
|
|
||||||
// to implement this, because such kernels won't be supported much longer.
|
|
||||||
#define WARN(condition, format...) ({ \
|
|
||||||
int __ret_warn_on = !!(condition); \
|
|
||||||
if (unlikely(__ret_warn_on)) \
|
|
||||||
printk(KERN_ERR format); \
|
|
||||||
unlikely(__ret_warn_on); \
|
|
||||||
})
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define NVQ_WARN(fmt, ...) \
|
|
||||||
do { \
|
|
||||||
if (in_interrupt()) { \
|
|
||||||
WARN(1, "nv_kthread_q: [in interrupt]: " fmt, \
|
|
||||||
##__VA_ARGS__); \
|
|
||||||
} \
|
|
||||||
else { \
|
|
||||||
WARN(1, "nv_kthread_q: task: %s: " fmt, \
|
|
||||||
current->comm, \
|
|
||||||
##__VA_ARGS__); \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
static int _main_loop(void *args)
|
|
||||||
{
|
|
||||||
nv_kthread_q_t *q = (nv_kthread_q_t *)args;
|
|
||||||
nv_kthread_q_item_t *q_item = NULL;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
while (1) {
|
|
||||||
// Normally this thread is never interrupted. However,
|
|
||||||
// down_interruptible (instead of down) is called here,
|
|
||||||
// in order to avoid being classified as a potentially
|
|
||||||
// hung task, by the kernel watchdog.
|
|
||||||
while (down_interruptible(&q->q_sem))
|
|
||||||
NVQ_WARN("Interrupted during semaphore wait\n");
|
|
||||||
|
|
||||||
if (atomic_read(&q->main_loop_should_exit))
|
|
||||||
break;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&q->q_lock, flags);
|
|
||||||
|
|
||||||
// The q_sem semaphore prevents us from getting here unless there is
|
|
||||||
// at least one item in the list, so an empty list indicates a bug.
|
|
||||||
if (unlikely(list_empty(&q->q_list_head))) {
|
|
||||||
spin_unlock_irqrestore(&q->q_lock, flags);
|
|
||||||
NVQ_WARN("_main_loop: Empty queue: q: 0x%p\n", q);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume one item from the queue
|
|
||||||
q_item = list_first_entry(&q->q_list_head,
|
|
||||||
nv_kthread_q_item_t,
|
|
||||||
q_list_node);
|
|
||||||
|
|
||||||
list_del_init(&q_item->q_list_node);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&q->q_lock, flags);
|
|
||||||
|
|
||||||
// Run the item
|
|
||||||
q_item->function_to_run(q_item->function_args);
|
|
||||||
|
|
||||||
// Make debugging a little simpler by clearing this between runs:
|
|
||||||
q_item = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (!kthread_should_stop())
|
|
||||||
schedule();
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void nv_kthread_q_stop(nv_kthread_q_t *q)
|
|
||||||
{
|
|
||||||
// check if queue has been properly initialized
|
|
||||||
if (unlikely(!q->q_kthread))
|
|
||||||
return;
|
|
||||||
|
|
||||||
nv_kthread_q_flush(q);
|
|
||||||
|
|
||||||
// If this assertion fires, then a caller likely either broke the API rules,
|
|
||||||
// by adding items after calling nv_kthread_q_stop, or possibly messed up
|
|
||||||
// with inadequate flushing of self-rescheduling q_items.
|
|
||||||
if (unlikely(!list_empty(&q->q_list_head)))
|
|
||||||
NVQ_WARN("list not empty after flushing\n");
|
|
||||||
|
|
||||||
if (likely(!atomic_read(&q->main_loop_should_exit))) {
|
|
||||||
|
|
||||||
atomic_set(&q->main_loop_should_exit, 1);
|
|
||||||
|
|
||||||
// Wake up the kthread so that it can see that it needs to stop:
|
|
||||||
up(&q->q_sem);
|
|
||||||
|
|
||||||
kthread_stop(q->q_kthread);
|
|
||||||
q->q_kthread = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// When CONFIG_VMAP_STACK is defined, the kernel thread stack allocator used by
|
|
||||||
// kthread_create_on_node relies on a 2 entry, per-core cache to minimize
|
|
||||||
// vmalloc invocations. The cache is NUMA-unaware, so when there is a hit, the
|
|
||||||
// stack location ends up being a function of the core assigned to the current
|
|
||||||
// thread, instead of being a function of the specified NUMA node. The cache was
|
|
||||||
// added to the kernel in commit ac496bf48d97f2503eaa353996a4dd5e4383eaf0
|
|
||||||
// ("fork: Optimize task creation by caching two thread stacks per CPU if
|
|
||||||
// CONFIG_VMAP_STACK=y")
|
|
||||||
//
|
|
||||||
// To work around the problematic cache, we create up to three kernel threads
|
|
||||||
// -If the first thread's stack is resident on the preferred node, return this
|
|
||||||
// thread.
|
|
||||||
// -Otherwise, create a second thread. If its stack is resident on the
|
|
||||||
// preferred node, stop the first thread and return this one.
|
|
||||||
// -Otherwise, create a third thread. The stack allocator does not find a
|
|
||||||
// cached stack, and so falls back to vmalloc, which takes the NUMA hint into
|
|
||||||
// consideration. The first two threads are then stopped.
|
|
||||||
//
|
|
||||||
// When CONFIG_VMAP_STACK is not defined, the first kernel thread is returned.
|
|
||||||
//
|
|
||||||
// This function is never invoked when there is no NUMA preference (preferred
|
|
||||||
// node is NUMA_NO_NODE).
|
|
||||||
static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
|
|
||||||
nv_kthread_q_t *q,
|
|
||||||
int preferred_node,
|
|
||||||
const char *q_name)
|
|
||||||
{
|
|
||||||
|
|
||||||
unsigned i, j;
|
|
||||||
static const unsigned attempts = 3;
|
|
||||||
struct task_struct *thread[3];
|
|
||||||
|
|
||||||
for (i = 0;; i++) {
|
|
||||||
struct page *stack;
|
|
||||||
|
|
||||||
thread[i] = kthread_create_on_node(threadfn, q, preferred_node, q_name);
|
|
||||||
|
|
||||||
if (unlikely(IS_ERR(thread[i]))) {
|
|
||||||
|
|
||||||
// Instead of failing, pick the previous thread, even if its
|
|
||||||
// stack is not allocated on the preferred node.
|
|
||||||
if (i > 0)
|
|
||||||
i--;
|
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// vmalloc is not used to allocate the stack, so simply return the
|
|
||||||
// thread, even if its stack may not be allocated on the preferred node
|
|
||||||
if (!is_vmalloc_addr(thread[i]->stack))
|
|
||||||
break;
|
|
||||||
|
|
||||||
// Ran out of attempts - return thread even if its stack may not be
|
|
||||||
// allocated on the preferred node
|
|
||||||
if (i == (attempts - 1))
|
|
||||||
break;
|
|
||||||
|
|
||||||
// Get the NUMA node where the first page of the stack is resident. If
|
|
||||||
// it is the preferred node, select this thread.
|
|
||||||
stack = vmalloc_to_page(thread[i]->stack);
|
|
||||||
if (page_to_nid(stack) == preferred_node)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (j = i; j > 0; j--)
|
|
||||||
kthread_stop(thread[j - 1]);
|
|
||||||
|
|
||||||
return thread[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferred_node)
|
|
||||||
{
|
|
||||||
memset(q, 0, sizeof(*q));
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&q->q_list_head);
|
|
||||||
spin_lock_init(&q->q_lock);
|
|
||||||
sema_init(&q->q_sem, 0);
|
|
||||||
|
|
||||||
if (preferred_node == NV_KTHREAD_NO_NODE) {
|
|
||||||
q->q_kthread = kthread_create(_main_loop, q, q_name);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
q->q_kthread = thread_create_on_node(_main_loop, q, preferred_node, q_name);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (IS_ERR(q->q_kthread)) {
|
|
||||||
int err = PTR_ERR(q->q_kthread);
|
|
||||||
|
|
||||||
// Clear q_kthread before returning so that nv_kthread_q_stop() can be
|
|
||||||
// safely called on it making error handling easier.
|
|
||||||
q->q_kthread = NULL;
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
wake_up_process(q->q_kthread);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname)
|
|
||||||
{
|
|
||||||
return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true (non-zero) if the item was actually scheduled, and false if the
|
|
||||||
// item was already pending in a queue.
|
|
||||||
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int ret = 1;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&q->q_lock, flags);
|
|
||||||
|
|
||||||
if (likely(list_empty(&q_item->q_list_node)))
|
|
||||||
list_add_tail(&q_item->q_list_node, &q->q_list_head);
|
|
||||||
else
|
|
||||||
ret = 0;
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&q->q_lock, flags);
|
|
||||||
|
|
||||||
if (likely(ret))
|
|
||||||
up(&q->q_sem);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item,
|
|
||||||
nv_q_func_t function_to_run,
|
|
||||||
void *function_args)
|
|
||||||
{
|
|
||||||
INIT_LIST_HEAD(&q_item->q_list_node);
|
|
||||||
q_item->function_to_run = function_to_run;
|
|
||||||
q_item->function_args = function_args;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true (non-zero) if the q_item got scheduled, false otherwise.
|
|
||||||
int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q,
|
|
||||||
nv_kthread_q_item_t *q_item)
|
|
||||||
{
|
|
||||||
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
|
|
||||||
NVQ_WARN("Not allowed: nv_kthread_q_schedule_q_item was "
|
|
||||||
"called with a non-alive q: 0x%p\n", q);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return _raw_q_schedule(q, q_item);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void _q_flush_function(void *args)
|
|
||||||
{
|
|
||||||
struct completion *completion = (struct completion *)args;
|
|
||||||
complete(completion);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static void _raw_q_flush(nv_kthread_q_t *q)
|
|
||||||
{
|
|
||||||
nv_kthread_q_item_t q_item;
|
|
||||||
DECLARE_COMPLETION_ONSTACK(completion);
|
|
||||||
|
|
||||||
nv_kthread_q_item_init(&q_item, _q_flush_function, &completion);
|
|
||||||
|
|
||||||
_raw_q_schedule(q, &q_item);
|
|
||||||
|
|
||||||
// Wait for the flush item to run. Once it has run, then all of the
|
|
||||||
// previously queued items in front of it will have run, so that means
|
|
||||||
// the flush is complete.
|
|
||||||
wait_for_completion(&completion);
|
|
||||||
}
|
|
||||||
|
|
||||||
void nv_kthread_q_flush(nv_kthread_q_t *q)
|
|
||||||
{
|
|
||||||
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
|
|
||||||
NVQ_WARN("Not allowed: nv_kthread_q_flush was called after "
|
|
||||||
"nv_kthread_q_stop. q: 0x%p\n", q);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// This 2x flush is not a typing mistake. The queue really does have to be
|
|
||||||
// flushed twice, in order to take care of the case of a q_item that
|
|
||||||
// reschedules itself.
|
|
||||||
_raw_q_flush(q);
|
|
||||||
_raw_q_flush(q);
|
|
||||||
}
|
|
||||||
@@ -25,15 +25,6 @@
|
|||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
|
||||||
#include "nv-pci-table.h"
|
#include "nv-pci-table.h"
|
||||||
#include "cpuopsys.h"
|
|
||||||
|
|
||||||
#if defined(NV_BSD)
|
|
||||||
/* Define PCI classes that FreeBSD's linuxkpi is missing */
|
|
||||||
#define PCI_VENDOR_ID_NVIDIA 0x10de
|
|
||||||
#define PCI_CLASS_DISPLAY_VGA 0x0300
|
|
||||||
#define PCI_CLASS_DISPLAY_3D 0x0302
|
|
||||||
#define PCI_CLASS_BRIDGE_OTHER 0x0680
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Devices supported by RM */
|
/* Devices supported by RM */
|
||||||
struct pci_device_id nv_pci_table[] = {
|
struct pci_device_id nv_pci_table[] = {
|
||||||
@@ -57,7 +48,7 @@ struct pci_device_id nv_pci_table[] = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/* Devices supported by all drivers in nvidia.ko */
|
/* Devices supported by all drivers in nvidia.ko */
|
||||||
struct pci_device_id nv_module_device_table[4] = {
|
struct pci_device_id nv_module_device_table[] = {
|
||||||
{
|
{
|
||||||
.vendor = PCI_VENDOR_ID_NVIDIA,
|
.vendor = PCI_VENDOR_ID_NVIDIA,
|
||||||
.device = PCI_ANY_ID,
|
.device = PCI_ANY_ID,
|
||||||
@@ -85,6 +76,4 @@ struct pci_device_id nv_module_device_table[4] = {
|
|||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
|
|
||||||
#if defined(NV_LINUX)
|
|
||||||
MODULE_DEVICE_TABLE(pci, nv_module_device_table);
|
MODULE_DEVICE_TABLE(pci, nv_module_device_table);
|
||||||
#endif
|
|
||||||
|
|||||||
@@ -27,6 +27,5 @@
|
|||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
|
|
||||||
extern struct pci_device_id nv_pci_table[];
|
extern struct pci_device_id nv_pci_table[];
|
||||||
extern struct pci_device_id nv_module_device_table[4];
|
|
||||||
|
|
||||||
#endif /* _NV_PCI_TABLE_H_ */
|
#endif /* _NV_PCI_TABLE_H_ */
|
||||||
|
|||||||
@@ -1,120 +0,0 @@
|
|||||||
/*
|
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
||||||
* SPDX-License-Identifier: MIT
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef __NV_COMMON_UTILS_H__
|
|
||||||
#define __NV_COMMON_UTILS_H__
|
|
||||||
|
|
||||||
#include "nvtypes.h"
|
|
||||||
#include "nvmisc.h"
|
|
||||||
|
|
||||||
#if !defined(TRUE)
|
|
||||||
#define TRUE NV_TRUE
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if !defined(FALSE)
|
|
||||||
#define FALSE NV_FALSE
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define NV_IS_UNSIGNED(x) ((__typeof__(x))-1 > 0)
|
|
||||||
|
|
||||||
/* Get the length of a statically-sized array. */
|
|
||||||
#define ARRAY_LEN(_arr) (sizeof(_arr) / sizeof(_arr[0]))
|
|
||||||
|
|
||||||
#define NV_INVALID_HEAD 0xFFFFFFFF
|
|
||||||
|
|
||||||
#define NV_INVALID_CONNECTOR_PHYSICAL_INFORMATION (~0)
|
|
||||||
|
|
||||||
#if !defined(NV_MIN)
|
|
||||||
# define NV_MIN(a,b) (((a)<(b))?(a):(b))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define NV_MIN3(a,b,c) NV_MIN(NV_MIN(a, b), c)
|
|
||||||
#define NV_MIN4(a,b,c,d) NV_MIN3(NV_MIN(a,b),c,d)
|
|
||||||
|
|
||||||
#if !defined(NV_MAX)
|
|
||||||
# define NV_MAX(a,b) (((a)>(b))?(a):(b))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define NV_MAX3(a,b,c) NV_MAX(NV_MAX(a, b), c)
|
|
||||||
#define NV_MAX4(a,b,c,d) NV_MAX3(NV_MAX(a,b),c,d)
|
|
||||||
|
|
||||||
static inline int NV_LIMIT_VAL_TO_MIN_MAX(int val, int min, int max)
|
|
||||||
{
|
|
||||||
if (val < min) {
|
|
||||||
return min;
|
|
||||||
}
|
|
||||||
if (val > max) {
|
|
||||||
return max;
|
|
||||||
}
|
|
||||||
return val;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define NV_ROUNDUP_DIV(x,y) ((x) / (y) + (((x) % (y)) ? 1 : 0))
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Macros used for computing palette entries:
|
|
||||||
*
|
|
||||||
* NV_UNDER_REPLICATE(val, source_size, result_size) expands a value
|
|
||||||
* of source_size bits into a value of target_size bits by shifting
|
|
||||||
* the source value into the high bits and replicating the high bits
|
|
||||||
* of the value into the low bits of the result.
|
|
||||||
*
|
|
||||||
* PALETTE_DEPTH_SHIFT(val, w) maps a colormap entry for a component
|
|
||||||
* that has w bits to an appropriate entry in a LUT of 256 entries.
|
|
||||||
*/
|
|
||||||
static inline unsigned int NV_UNDER_REPLICATE(unsigned short val,
|
|
||||||
int source_size,
|
|
||||||
int result_size)
|
|
||||||
{
|
|
||||||
return (val << (result_size - source_size)) |
|
|
||||||
(val >> ((source_size << 1) - result_size));
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static inline unsigned short PALETTE_DEPTH_SHIFT(unsigned short val, int depth)
|
|
||||||
{
|
|
||||||
return NV_UNDER_REPLICATE(val, depth, 8);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Use __builtin_ffs where it is supported, or provide an equivalent
|
|
||||||
* implementation for platforms like riscv where it is not.
|
|
||||||
*/
|
|
||||||
#if defined(__GNUC__) && !NVCPU_IS_RISCV64
|
|
||||||
static inline int nv_ffs(int x)
|
|
||||||
{
|
|
||||||
return __builtin_ffs(x);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static inline int nv_ffs(int x)
|
|
||||||
{
|
|
||||||
if (x == 0)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
LOWESTBITIDX_32(x);
|
|
||||||
|
|
||||||
return 1 + x;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* __NV_COMMON_UTILS_H__ */
|
|
||||||
@@ -43,13 +43,9 @@
|
|||||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
||||||
typedef struct fence nv_dma_fence_t;
|
typedef struct fence nv_dma_fence_t;
|
||||||
typedef struct fence_ops nv_dma_fence_ops_t;
|
typedef struct fence_ops nv_dma_fence_ops_t;
|
||||||
typedef struct fence_cb nv_dma_fence_cb_t;
|
|
||||||
typedef fence_func_t nv_dma_fence_func_t;
|
|
||||||
#else
|
#else
|
||||||
typedef struct dma_fence nv_dma_fence_t;
|
typedef struct dma_fence nv_dma_fence_t;
|
||||||
typedef struct dma_fence_ops nv_dma_fence_ops_t;
|
typedef struct dma_fence_ops nv_dma_fence_ops_t;
|
||||||
typedef struct dma_fence_cb nv_dma_fence_cb_t;
|
|
||||||
typedef dma_fence_func_t nv_dma_fence_func_t;
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
||||||
@@ -101,14 +97,6 @@ static inline int nv_dma_fence_signal(nv_dma_fence_t *fence) {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int nv_dma_fence_signal_locked(nv_dma_fence_t *fence) {
|
|
||||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
|
||||||
return fence_signal_locked(fence);
|
|
||||||
#else
|
|
||||||
return dma_fence_signal_locked(fence);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u64 nv_dma_fence_context_alloc(unsigned num) {
|
static inline u64 nv_dma_fence_context_alloc(unsigned num) {
|
||||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
||||||
return fence_context_alloc(num);
|
return fence_context_alloc(num);
|
||||||
@@ -120,7 +108,7 @@ static inline u64 nv_dma_fence_context_alloc(unsigned num) {
|
|||||||
static inline void
|
static inline void
|
||||||
nv_dma_fence_init(nv_dma_fence_t *fence,
|
nv_dma_fence_init(nv_dma_fence_t *fence,
|
||||||
const nv_dma_fence_ops_t *ops,
|
const nv_dma_fence_ops_t *ops,
|
||||||
spinlock_t *lock, u64 context, uint64_t seqno) {
|
spinlock_t *lock, u64 context, unsigned seqno) {
|
||||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
||||||
fence_init(fence, ops, lock, context, seqno);
|
fence_init(fence, ops, lock, context, seqno);
|
||||||
#else
|
#else
|
||||||
@@ -128,29 +116,6 @@ nv_dma_fence_init(nv_dma_fence_t *fence,
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
|
||||||
nv_dma_fence_set_error(nv_dma_fence_t *fence,
|
|
||||||
int error) {
|
|
||||||
#if defined(NV_DMA_FENCE_SET_ERROR_PRESENT)
|
|
||||||
return dma_fence_set_error(fence, error);
|
|
||||||
#elif defined(NV_FENCE_SET_ERROR_PRESENT)
|
|
||||||
return fence_set_error(fence, error);
|
|
||||||
#else
|
|
||||||
fence->status = error;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int
|
|
||||||
nv_dma_fence_add_callback(nv_dma_fence_t *fence,
|
|
||||||
nv_dma_fence_cb_t *cb,
|
|
||||||
nv_dma_fence_func_t func) {
|
|
||||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
|
||||||
return fence_add_callback(fence, cb, func);
|
|
||||||
#else
|
|
||||||
return dma_fence_add_callback(fence, cb, func);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
|
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
|
||||||
|
|
||||||
#endif /* __NVIDIA_DMA_FENCE_HELPER_H__ */
|
#endif /* __NVIDIA_DMA_FENCE_HELPER_H__ */
|
||||||
|
|||||||
@@ -121,20 +121,6 @@ static inline void nv_dma_resv_add_excl_fence(nv_dma_resv_t *obj,
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void nv_dma_resv_add_shared_fence(nv_dma_resv_t *obj,
|
|
||||||
nv_dma_fence_t *fence)
|
|
||||||
{
|
|
||||||
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
|
|
||||||
#if defined(NV_DMA_RESV_ADD_FENCE_PRESENT)
|
|
||||||
dma_resv_add_fence(obj, fence, DMA_RESV_USAGE_READ);
|
|
||||||
#else
|
|
||||||
dma_resv_add_shared_fence(obj, fence);
|
|
||||||
#endif
|
|
||||||
#else
|
|
||||||
reservation_object_add_shared_fence(obj, fence);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
|
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
|
||||||
|
|
||||||
#endif /* __NVIDIA_DMA_RESV_HELPER_H__ */
|
#endif /* __NVIDIA_DMA_RESV_HELPER_H__ */
|
||||||
|
|||||||
@@ -24,7 +24,6 @@
|
|||||||
#define __NVIDIA_DRM_CONFTEST_H__
|
#define __NVIDIA_DRM_CONFTEST_H__
|
||||||
|
|
||||||
#include "conftest.h"
|
#include "conftest.h"
|
||||||
#include "nvtypes.h"
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* NOTE: This file is expected to get included at the top before including any
|
* NOTE: This file is expected to get included at the top before including any
|
||||||
@@ -62,147 +61,4 @@
|
|||||||
#undef NV_DRM_FENCE_AVAILABLE
|
#undef NV_DRM_FENCE_AVAILABLE
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(NV_DRM_CLIENT_SETUP_PRESENT) && \
|
|
||||||
(defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT) || \
|
|
||||||
defined(NV_APERTURE_REMOVE_CONFLICTING_PCI_DEVICES_PRESENT))
|
|
||||||
// XXX remove dependency on DRM_TTM_HELPER by implementing nvidia-drm's own
|
|
||||||
// .fbdev_probe callback that uses NVKMS kapi
|
|
||||||
#if IS_ENABLED(CONFIG_DRM_TTM_HELPER)
|
|
||||||
#define NV_DRM_FBDEV_AVAILABLE
|
|
||||||
#define NV_DRM_CLIENT_AVAILABLE
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We can support color management if either drm_helper_crtc_enable_color_mgmt()
|
|
||||||
* or drm_crtc_enable_color_mgmt() exist.
|
|
||||||
*/
|
|
||||||
#if defined(NV_DRM_HELPER_CRTC_ENABLE_COLOR_MGMT_PRESENT) || \
|
|
||||||
defined(NV_DRM_CRTC_ENABLE_COLOR_MGMT_PRESENT)
|
|
||||||
#define NV_DRM_COLOR_MGMT_AVAILABLE
|
|
||||||
#else
|
|
||||||
#undef NV_DRM_COLOR_MGMT_AVAILABLE
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Adapt to quirks in FreeBSD's Linux kernel compatibility layer.
|
|
||||||
*/
|
|
||||||
#if defined(NV_BSD)
|
|
||||||
|
|
||||||
#include <linux/rwsem.h>
|
|
||||||
#include <sys/param.h>
|
|
||||||
#include <sys/lock.h>
|
|
||||||
#include <sys/sx.h>
|
|
||||||
|
|
||||||
/* For nv_drm_gem_prime_force_fence_signal */
|
|
||||||
#ifndef spin_is_locked
|
|
||||||
#if ((__FreeBSD_version >= 1500000) && (__FreeBSD_version < 1500018)) || (__FreeBSD_version < 1401501)
|
|
||||||
#define spin_is_locked(lock) mtx_owned(lock.m)
|
|
||||||
#else
|
|
||||||
#define spin_is_locked(lock) mtx_owned(lock)
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef rwsem_is_locked
|
|
||||||
#define rwsem_is_locked(sem) (((sem)->sx.sx_lock & (SX_LOCK_SHARED)) \
|
|
||||||
|| ((sem)->sx.sx_lock & ~(SX_LOCK_FLAGMASK & ~SX_LOCK_SHARED)))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* FreeBSD does not define vm_flags_t in its linuxkpi, since there is already
|
|
||||||
* a FreeBSD vm_flags_t (of a different size) and they don't want the names to
|
|
||||||
* collide. Temporarily redefine it when including nv-mm.h
|
|
||||||
*/
|
|
||||||
#define vm_flags_t unsigned long
|
|
||||||
#include "nv-mm.h"
|
|
||||||
#undef vm_flags_t
|
|
||||||
|
|
||||||
/*
|
|
||||||
* sys/nv.h and nvidia/nv.h have the same header guard
|
|
||||||
* we need to clear it for nvlist_t to get loaded
|
|
||||||
*/
|
|
||||||
#undef _NV_H_
|
|
||||||
#include <sys/nv.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* For now just use set_page_dirty as the lock variant
|
|
||||||
* is not ported for FreeBSD. (in progress). This calls
|
|
||||||
* vm_page_dirty. Used in nv-mm.h
|
|
||||||
*/
|
|
||||||
#define set_page_dirty_lock set_page_dirty
|
|
||||||
|
|
||||||
/*
|
|
||||||
* FreeBSD does not implement drm_atomic_state_free, simply
|
|
||||||
* default to drm_atomic_state_put
|
|
||||||
*/
|
|
||||||
#define drm_atomic_state_free drm_atomic_state_put
|
|
||||||
|
|
||||||
#if __FreeBSD_version < 1300000
|
|
||||||
/* redefine LIST_HEAD_INIT to the linux version */
|
|
||||||
#include <linux/list.h>
|
|
||||||
#define LIST_HEAD_INIT(name) LINUX_LIST_HEAD_INIT(name)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* FreeBSD currently has only vmf_insert_pfn_prot defined, and it has a
|
|
||||||
* static assert warning not to use it since all of DRM's usages are in
|
|
||||||
* loops with the vm obj lock(s) held. Instead we should use the lkpi
|
|
||||||
* function itself directly. For us none of this applies so we can just
|
|
||||||
* wrap it in our own definition of vmf_insert_pfn
|
|
||||||
*/
|
|
||||||
#ifndef NV_VMF_INSERT_PFN_PRESENT
|
|
||||||
#define NV_VMF_INSERT_PFN_PRESENT 1
|
|
||||||
|
|
||||||
#if __FreeBSD_version < 1300000
|
|
||||||
#define VM_SHARED (1 << 17)
|
|
||||||
|
|
||||||
/* Not present in 12.2 */
|
|
||||||
static inline vm_fault_t
|
|
||||||
lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr,
|
|
||||||
unsigned long pfn, pgprot_t prot)
|
|
||||||
{
|
|
||||||
vm_object_t vm_obj = vma->vm_obj;
|
|
||||||
vm_page_t page;
|
|
||||||
vm_pindex_t pindex;
|
|
||||||
|
|
||||||
VM_OBJECT_ASSERT_WLOCKED(vm_obj);
|
|
||||||
pindex = OFF_TO_IDX(addr - vma->vm_start);
|
|
||||||
if (vma->vm_pfn_count == 0)
|
|
||||||
vma->vm_pfn_first = pindex;
|
|
||||||
MPASS(pindex <= OFF_TO_IDX(vma->vm_end));
|
|
||||||
|
|
||||||
page = vm_page_grab(vm_obj, pindex, VM_ALLOC_NORMAL);
|
|
||||||
if (page == NULL) {
|
|
||||||
page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn));
|
|
||||||
vm_page_xbusy(page);
|
|
||||||
if (vm_page_insert(page, vm_obj, pindex)) {
|
|
||||||
vm_page_xunbusy(page);
|
|
||||||
return (VM_FAULT_OOM);
|
|
||||||
}
|
|
||||||
page->valid = VM_PAGE_BITS_ALL;
|
|
||||||
}
|
|
||||||
pmap_page_set_memattr(page, pgprot2cachemode(prot));
|
|
||||||
vma->vm_pfn_count++;
|
|
||||||
|
|
||||||
return (VM_FAULT_NOPAGE);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline vm_fault_t
|
|
||||||
vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
|
||||||
unsigned long pfn)
|
|
||||||
{
|
|
||||||
vm_fault_t ret;
|
|
||||||
|
|
||||||
VM_OBJECT_WLOCK(vma->vm_obj);
|
|
||||||
ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, vma->vm_page_prot);
|
|
||||||
VM_OBJECT_WUNLOCK(vma->vm_obj);
|
|
||||||
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* defined(NV_BSD) */
|
|
||||||
|
|
||||||
#endif /* defined(__NVIDIA_DRM_CONFTEST_H__) */
|
#endif /* defined(__NVIDIA_DRM_CONFTEST_H__) */
|
||||||
|
|||||||
@@ -314,11 +314,7 @@ static int nv_drm_connector_get_modes(struct drm_connector *connector)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int nv_drm_connector_mode_valid(struct drm_connector *connector,
|
static int nv_drm_connector_mode_valid(struct drm_connector *connector,
|
||||||
#if defined(NV_DRM_CONNECTOR_HELPER_FUNCS_MODE_VALID_HAS_CONST_MODE_ARG)
|
|
||||||
const struct drm_display_mode *mode)
|
|
||||||
#else
|
|
||||||
struct drm_display_mode *mode)
|
struct drm_display_mode *mode)
|
||||||
#endif
|
|
||||||
{
|
{
|
||||||
struct drm_device *dev = connector->dev;
|
struct drm_device *dev = connector->dev;
|
||||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||||
@@ -353,125 +349,10 @@ nv_drm_connector_best_encoder(struct drm_connector *connector)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG)
|
|
||||||
static const NvU32 __nv_drm_connector_supported_colorspaces =
|
|
||||||
BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
|
|
||||||
BIT(DRM_MODE_COLORIMETRY_BT2020_YCC);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT)
|
|
||||||
static int
|
|
||||||
__nv_drm_connector_atomic_check(struct drm_connector *connector,
|
|
||||||
struct drm_atomic_state *state)
|
|
||||||
{
|
|
||||||
struct drm_connector_state *new_connector_state =
|
|
||||||
drm_atomic_get_new_connector_state(state, connector);
|
|
||||||
struct drm_connector_state *old_connector_state =
|
|
||||||
drm_atomic_get_old_connector_state(state, connector);
|
|
||||||
struct nv_drm_device *nv_dev = to_nv_device(connector->dev);
|
|
||||||
|
|
||||||
struct drm_crtc *crtc = new_connector_state->crtc;
|
|
||||||
struct drm_crtc_state *crtc_state;
|
|
||||||
struct nv_drm_crtc_state *nv_crtc_state;
|
|
||||||
struct NvKmsKapiHeadRequestedConfig *req_config;
|
|
||||||
|
|
||||||
if (!crtc) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
|
|
||||||
nv_crtc_state = to_nv_crtc_state(crtc_state);
|
|
||||||
req_config = &nv_crtc_state->req_config;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Override metadata for the entire head instead of allowing NVKMS to derive
|
|
||||||
* it from the layers' metadata.
|
|
||||||
*
|
|
||||||
* This is the metadata that will sent to the display, and if applicable,
|
|
||||||
* layers will be tone mapped to this metadata rather than that of the
|
|
||||||
* display.
|
|
||||||
*/
|
|
||||||
req_config->flags.hdrInfoFrameChanged =
|
|
||||||
!drm_connector_atomic_hdr_metadata_equal(old_connector_state,
|
|
||||||
new_connector_state);
|
|
||||||
if (new_connector_state->hdr_output_metadata &&
|
|
||||||
new_connector_state->hdr_output_metadata->data) {
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note that HDMI definitions are used here even though we might not
|
|
||||||
* be using HDMI. While that seems odd, it is consistent with
|
|
||||||
* upstream behavior.
|
|
||||||
*/
|
|
||||||
|
|
||||||
struct hdr_output_metadata *hdr_metadata =
|
|
||||||
new_connector_state->hdr_output_metadata->data;
|
|
||||||
struct hdr_metadata_infoframe *info_frame =
|
|
||||||
&hdr_metadata->hdmi_metadata_type1;
|
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
if (hdr_metadata->metadata_type != HDMI_STATIC_METADATA_TYPE1) {
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(info_frame->display_primaries); i++) {
|
|
||||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.displayPrimaries[i].x =
|
|
||||||
info_frame->display_primaries[i].x;
|
|
||||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.displayPrimaries[i].y =
|
|
||||||
info_frame->display_primaries[i].y;
|
|
||||||
}
|
|
||||||
|
|
||||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.whitePoint.x =
|
|
||||||
info_frame->white_point.x;
|
|
||||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.whitePoint.y =
|
|
||||||
info_frame->white_point.y;
|
|
||||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxDisplayMasteringLuminance =
|
|
||||||
info_frame->max_display_mastering_luminance;
|
|
||||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.minDisplayMasteringLuminance =
|
|
||||||
info_frame->min_display_mastering_luminance;
|
|
||||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxCLL =
|
|
||||||
info_frame->max_cll;
|
|
||||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxFALL =
|
|
||||||
info_frame->max_fall;
|
|
||||||
|
|
||||||
req_config->modeSetConfig.hdrInfoFrame.eotf = info_frame->eotf;
|
|
||||||
|
|
||||||
req_config->modeSetConfig.hdrInfoFrame.enabled = NV_TRUE;
|
|
||||||
} else {
|
|
||||||
req_config->modeSetConfig.hdrInfoFrame.enabled = NV_FALSE;
|
|
||||||
}
|
|
||||||
|
|
||||||
req_config->flags.colorimetryChanged =
|
|
||||||
(old_connector_state->colorspace != new_connector_state->colorspace);
|
|
||||||
// When adding a case here, also add to __nv_drm_connector_supported_colorspaces
|
|
||||||
switch (new_connector_state->colorspace) {
|
|
||||||
case DRM_MODE_COLORIMETRY_DEFAULT:
|
|
||||||
req_config->modeSetConfig.colorimetry =
|
|
||||||
NVKMS_OUTPUT_COLORIMETRY_DEFAULT;
|
|
||||||
break;
|
|
||||||
case DRM_MODE_COLORIMETRY_BT2020_RGB:
|
|
||||||
case DRM_MODE_COLORIMETRY_BT2020_YCC:
|
|
||||||
// Ignore RGB/YCC
|
|
||||||
// See https://patchwork.freedesktop.org/patch/525496/?series=111865&rev=4
|
|
||||||
req_config->modeSetConfig.colorimetry =
|
|
||||||
NVKMS_OUTPUT_COLORIMETRY_BT2100;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
// XXX HDR TODO: Add support for more color spaces
|
|
||||||
NV_DRM_DEV_LOG_ERR(nv_dev, "Unsupported color space");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif /* defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT) */
|
|
||||||
|
|
||||||
static const struct drm_connector_helper_funcs nv_connector_helper_funcs = {
|
static const struct drm_connector_helper_funcs nv_connector_helper_funcs = {
|
||||||
.get_modes = nv_drm_connector_get_modes,
|
.get_modes = nv_drm_connector_get_modes,
|
||||||
.mode_valid = nv_drm_connector_mode_valid,
|
.mode_valid = nv_drm_connector_mode_valid,
|
||||||
.best_encoder = nv_drm_connector_best_encoder,
|
.best_encoder = nv_drm_connector_best_encoder,
|
||||||
#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT)
|
|
||||||
.atomic_check = __nv_drm_connector_atomic_check,
|
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct drm_connector*
|
static struct drm_connector*
|
||||||
@@ -524,32 +405,6 @@ nv_drm_connector_new(struct drm_device *dev,
|
|||||||
DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
|
DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT)
|
|
||||||
if (nv_connector->type == NVKMS_CONNECTOR_TYPE_HDMI) {
|
|
||||||
#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG)
|
|
||||||
if (drm_mode_create_hdmi_colorspace_property(
|
|
||||||
&nv_connector->base,
|
|
||||||
__nv_drm_connector_supported_colorspaces) == 0) {
|
|
||||||
#else
|
|
||||||
if (drm_mode_create_hdmi_colorspace_property(&nv_connector->base) == 0) {
|
|
||||||
#endif
|
|
||||||
drm_connector_attach_colorspace_property(&nv_connector->base);
|
|
||||||
}
|
|
||||||
drm_connector_attach_hdr_output_metadata_property(&nv_connector->base);
|
|
||||||
} else if (nv_connector->type == NVKMS_CONNECTOR_TYPE_DP) {
|
|
||||||
#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG)
|
|
||||||
if (drm_mode_create_dp_colorspace_property(
|
|
||||||
&nv_connector->base,
|
|
||||||
__nv_drm_connector_supported_colorspaces) == 0) {
|
|
||||||
#else
|
|
||||||
if (drm_mode_create_dp_colorspace_property(&nv_connector->base) == 0) {
|
|
||||||
#endif
|
|
||||||
drm_connector_attach_colorspace_property(&nv_connector->base);
|
|
||||||
}
|
|
||||||
drm_connector_attach_hdr_output_metadata_property(&nv_connector->base);
|
|
||||||
}
|
|
||||||
#endif /* defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT) */
|
|
||||||
|
|
||||||
/* Register connector with DRM subsystem */
|
/* Register connector with DRM subsystem */
|
||||||
|
|
||||||
ret = drm_connector_register(&nv_connector->base);
|
ret = drm_connector_register(&nv_connector->base);
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -38,13 +38,6 @@
|
|||||||
#include "nvtypes.h"
|
#include "nvtypes.h"
|
||||||
#include "nvkms-kapi.h"
|
#include "nvkms-kapi.h"
|
||||||
|
|
||||||
enum nv_drm_transfer_function {
|
|
||||||
NV_DRM_TRANSFER_FUNCTION_DEFAULT,
|
|
||||||
NV_DRM_TRANSFER_FUNCTION_LINEAR,
|
|
||||||
NV_DRM_TRANSFER_FUNCTION_PQ,
|
|
||||||
NV_DRM_TRANSFER_FUNCTION_MAX,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct nv_drm_crtc {
|
struct nv_drm_crtc {
|
||||||
NvU32 head;
|
NvU32 head;
|
||||||
|
|
||||||
@@ -70,8 +63,6 @@ struct nv_drm_crtc {
|
|||||||
*/
|
*/
|
||||||
struct drm_file *modeset_permission_filep;
|
struct drm_file *modeset_permission_filep;
|
||||||
|
|
||||||
struct NvKmsLUTCaps olut_caps;
|
|
||||||
|
|
||||||
struct drm_crtc base;
|
struct drm_crtc base;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -151,12 +142,6 @@ struct nv_drm_crtc_state {
|
|||||||
* nv_drm_atomic_crtc_destroy_state().
|
* nv_drm_atomic_crtc_destroy_state().
|
||||||
*/
|
*/
|
||||||
struct nv_drm_flip *nv_flip;
|
struct nv_drm_flip *nv_flip;
|
||||||
|
|
||||||
enum nv_drm_transfer_function regamma_tf;
|
|
||||||
struct drm_property_blob *regamma_lut;
|
|
||||||
uint64_t regamma_divisor;
|
|
||||||
struct nv_drm_lut_surface *regamma_drm_lut_surface;
|
|
||||||
NvBool regamma_changed;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct nv_drm_crtc_state *to_nv_crtc_state(struct drm_crtc_state *state)
|
static inline struct nv_drm_crtc_state *to_nv_crtc_state(struct drm_crtc_state *state)
|
||||||
@@ -164,11 +149,6 @@ static inline struct nv_drm_crtc_state *to_nv_crtc_state(struct drm_crtc_state *
|
|||||||
return container_of(state, struct nv_drm_crtc_state, base);
|
return container_of(state, struct nv_drm_crtc_state, base);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline const struct nv_drm_crtc_state *to_nv_crtc_state_const(const struct drm_crtc_state *state)
|
|
||||||
{
|
|
||||||
return container_of(state, struct nv_drm_crtc_state, base);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct nv_drm_plane {
|
struct nv_drm_plane {
|
||||||
/**
|
/**
|
||||||
* @base:
|
* @base:
|
||||||
@@ -190,9 +170,6 @@ struct nv_drm_plane {
|
|||||||
* Index of this plane in the per head array of layers.
|
* Index of this plane in the per head array of layers.
|
||||||
*/
|
*/
|
||||||
uint32_t layer_idx;
|
uint32_t layer_idx;
|
||||||
|
|
||||||
struct NvKmsLUTCaps ilut_caps;
|
|
||||||
struct NvKmsLUTCaps tmo_caps;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct nv_drm_plane *to_nv_plane(struct drm_plane *plane)
|
static inline struct nv_drm_plane *to_nv_plane(struct drm_plane *plane)
|
||||||
@@ -203,22 +180,6 @@ static inline struct nv_drm_plane *to_nv_plane(struct drm_plane *plane)
|
|||||||
return container_of(plane, struct nv_drm_plane, base);
|
return container_of(plane, struct nv_drm_plane, base);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct nv_drm_lut_surface {
|
|
||||||
struct NvKmsKapiDevice *pDevice;
|
|
||||||
struct NvKmsKapiMemory *nvkms_memory;
|
|
||||||
struct NvKmsKapiSurface *nvkms_surface;
|
|
||||||
struct {
|
|
||||||
NvU32 vssSegments;
|
|
||||||
enum NvKmsLUTVssType vssType;
|
|
||||||
|
|
||||||
NvU32 lutEntries;
|
|
||||||
enum NvKmsLUTFormat entryFormat;
|
|
||||||
|
|
||||||
} properties;
|
|
||||||
void *buffer;
|
|
||||||
struct kref refcount;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct nv_drm_plane_state {
|
struct nv_drm_plane_state {
|
||||||
struct drm_plane_state base;
|
struct drm_plane_state base;
|
||||||
s32 __user *fd_user_ptr;
|
s32 __user *fd_user_ptr;
|
||||||
@@ -226,20 +187,6 @@ struct nv_drm_plane_state {
|
|||||||
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
|
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
|
||||||
struct drm_property_blob *hdr_output_metadata;
|
struct drm_property_blob *hdr_output_metadata;
|
||||||
#endif
|
#endif
|
||||||
struct drm_property_blob *lms_ctm;
|
|
||||||
struct drm_property_blob *lms_to_itp_ctm;
|
|
||||||
struct drm_property_blob *itp_to_lms_ctm;
|
|
||||||
struct drm_property_blob *blend_ctm;
|
|
||||||
|
|
||||||
enum nv_drm_transfer_function degamma_tf;
|
|
||||||
struct drm_property_blob *degamma_lut;
|
|
||||||
uint64_t degamma_multiplier; /* S31.32 Sign-Magnitude Format */
|
|
||||||
struct nv_drm_lut_surface *degamma_drm_lut_surface;
|
|
||||||
NvBool degamma_changed;
|
|
||||||
|
|
||||||
struct drm_property_blob *tmo_lut;
|
|
||||||
struct nv_drm_lut_surface *tmo_drm_lut_surface;
|
|
||||||
NvBool tmo_changed;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct nv_drm_plane_state *to_nv_drm_plane_state(struct drm_plane_state *state)
|
static inline struct nv_drm_plane_state *to_nv_drm_plane_state(struct drm_plane_state *state)
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -31,12 +31,6 @@ int nv_drm_probe_devices(void);
|
|||||||
|
|
||||||
void nv_drm_remove_devices(void);
|
void nv_drm_remove_devices(void);
|
||||||
|
|
||||||
void nv_drm_suspend_resume(NvBool suspend);
|
|
||||||
|
|
||||||
void nv_drm_register_drm_device(const nv_gpu_info_t *);
|
|
||||||
|
|
||||||
void nv_drm_update_drm_driver_features(void);
|
|
||||||
|
|
||||||
#endif /* defined(NV_DRM_AVAILABLE) */
|
#endif /* defined(NV_DRM_AVAILABLE) */
|
||||||
|
|
||||||
#endif /* __NVIDIA_DRM_DRV_H__ */
|
#endif /* __NVIDIA_DRM_DRV_H__ */
|
||||||
|
|||||||
@@ -300,7 +300,7 @@ void nv_drm_handle_display_change(struct nv_drm_device *nv_dev,
|
|||||||
|
|
||||||
nv_drm_connector_mark_connection_status_dirty(nv_encoder->nv_connector);
|
nv_drm_connector_mark_connection_status_dirty(nv_encoder->nv_connector);
|
||||||
|
|
||||||
schedule_delayed_work(&nv_dev->hotplug_event_work, 0);
|
drm_kms_helper_hotplug_event(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev,
|
void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev,
|
||||||
@@ -347,6 +347,6 @@ void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev,
|
|||||||
drm_reinit_primary_mode_group(dev);
|
drm_reinit_primary_mode_group(dev);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
schedule_delayed_work(&nv_dev->hotplug_event_work, 0);
|
drm_kms_helper_hotplug_event(dev);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -36,15 +36,12 @@
|
|||||||
|
|
||||||
static void __nv_drm_framebuffer_free(struct nv_drm_framebuffer *nv_fb)
|
static void __nv_drm_framebuffer_free(struct nv_drm_framebuffer *nv_fb)
|
||||||
{
|
{
|
||||||
struct drm_framebuffer *fb = &nv_fb->base;
|
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
|
||||||
/* Unreference gem object */
|
/* Unreference gem object */
|
||||||
for (i = 0; i < NVKMS_MAX_PLANES_PER_SURFACE; i++) {
|
for (i = 0; i < ARRAY_SIZE(nv_fb->nv_gem); i++) {
|
||||||
struct drm_gem_object *gem = nv_fb_get_gem_obj(fb, i);
|
if (nv_fb->nv_gem[i] != NULL) {
|
||||||
if (gem != NULL) {
|
nv_drm_gem_object_unreference_unlocked(nv_fb->nv_gem[i]);
|
||||||
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem);
|
|
||||||
nv_drm_gem_object_unreference_unlocked(nv_gem);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -72,8 +69,10 @@ static int
|
|||||||
nv_drm_framebuffer_create_handle(struct drm_framebuffer *fb,
|
nv_drm_framebuffer_create_handle(struct drm_framebuffer *fb,
|
||||||
struct drm_file *file, unsigned int *handle)
|
struct drm_file *file, unsigned int *handle)
|
||||||
{
|
{
|
||||||
|
struct nv_drm_framebuffer *nv_fb = to_nv_framebuffer(fb);
|
||||||
|
|
||||||
return nv_drm_gem_handle_create(file,
|
return nv_drm_gem_handle_create(file,
|
||||||
to_nv_gem_object(nv_fb_get_gem_obj(fb, 0)),
|
nv_fb->nv_gem[0],
|
||||||
handle);
|
handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,7 +88,6 @@ static struct nv_drm_framebuffer *nv_drm_framebuffer_alloc(
|
|||||||
{
|
{
|
||||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||||
struct nv_drm_framebuffer *nv_fb;
|
struct nv_drm_framebuffer *nv_fb;
|
||||||
struct nv_drm_gem_object *nv_gem;
|
|
||||||
const int num_planes = nv_drm_format_num_planes(cmd->pixel_format);
|
const int num_planes = nv_drm_format_num_planes(cmd->pixel_format);
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
|
||||||
@@ -103,22 +101,21 @@ static struct nv_drm_framebuffer *nv_drm_framebuffer_alloc(
|
|||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (num_planes > NVKMS_MAX_PLANES_PER_SURFACE) {
|
if (num_planes > ARRAY_SIZE(nv_fb->nv_gem)) {
|
||||||
NV_DRM_DEV_DEBUG_DRIVER(nv_dev, "Unsupported number of planes");
|
NV_DRM_DEV_DEBUG_DRIVER(nv_dev, "Unsupported number of planes");
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < num_planes; i++) {
|
for (i = 0; i < num_planes; i++) {
|
||||||
nv_gem = nv_drm_gem_object_lookup(dev, file, cmd->handles[i]);
|
if ((nv_fb->nv_gem[i] = nv_drm_gem_object_lookup(
|
||||||
|
dev,
|
||||||
if (nv_gem == NULL) {
|
file,
|
||||||
|
cmd->handles[i])) == NULL) {
|
||||||
NV_DRM_DEV_DEBUG_DRIVER(
|
NV_DRM_DEV_DEBUG_DRIVER(
|
||||||
nv_dev,
|
nv_dev,
|
||||||
"Failed to find gem object of type nvkms memory");
|
"Failed to find gem object of type nvkms memory");
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
nv_fb_set_gem_obj(&nv_fb->base, i, &nv_gem->base);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nv_fb;
|
return nv_fb;
|
||||||
@@ -138,14 +135,12 @@ static int nv_drm_framebuffer_init(struct drm_device *dev,
|
|||||||
{
|
{
|
||||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||||
struct NvKmsKapiCreateSurfaceParams params = { };
|
struct NvKmsKapiCreateSurfaceParams params = { };
|
||||||
struct nv_drm_gem_object *nv_gem;
|
|
||||||
struct drm_framebuffer *fb = &nv_fb->base;
|
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* Initialize the base framebuffer object and add it to drm subsystem */
|
/* Initialize the base framebuffer object and add it to drm subsystem */
|
||||||
|
|
||||||
ret = drm_framebuffer_init(dev, fb, &nv_framebuffer_funcs);
|
ret = drm_framebuffer_init(dev, &nv_fb->base, &nv_framebuffer_funcs);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
NV_DRM_DEV_DEBUG_DRIVER(
|
NV_DRM_DEV_DEBUG_DRIVER(
|
||||||
nv_dev,
|
nv_dev,
|
||||||
@@ -153,32 +148,23 @@ static int nv_drm_framebuffer_init(struct drm_device *dev,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < NVKMS_MAX_PLANES_PER_SURFACE; i++) {
|
for (i = 0; i < ARRAY_SIZE(nv_fb->nv_gem); i++) {
|
||||||
struct drm_gem_object *gem = nv_fb_get_gem_obj(fb, i);
|
if (nv_fb->nv_gem[i] != NULL) {
|
||||||
if (gem != NULL) {
|
if (!nvKms->isMemoryValidForDisplay(nv_dev->pDevice,
|
||||||
nv_gem = to_nv_gem_object(gem);
|
nv_fb->nv_gem[i]->pMemory)) {
|
||||||
|
NV_DRM_DEV_LOG_INFO(
|
||||||
|
nv_dev,
|
||||||
|
"Framebuffer memory not appropriate for scanout");
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
params.planes[i].memory = nv_gem->pMemory;
|
params.planes[i].memory = nv_fb->nv_gem[i]->pMemory;
|
||||||
params.planes[i].offset = fb->offsets[i];
|
params.planes[i].offset = nv_fb->base.offsets[i];
|
||||||
params.planes[i].pitch = fb->pitches[i];
|
params.planes[i].pitch = nv_fb->base.pitches[i];
|
||||||
|
|
||||||
/*
|
|
||||||
* XXX Use drm_framebuffer_funcs.dirty and
|
|
||||||
* drm_fb_helper_funcs.fb_dirty instead
|
|
||||||
*
|
|
||||||
* Currently using noDisplayCaching when registering surfaces with
|
|
||||||
* NVKMS that are using memory allocated through the DRM
|
|
||||||
* Dumb-Buffers API. This prevents Display Idle Frame Rate from
|
|
||||||
* kicking in and preventing CPU updates to the surface memory from
|
|
||||||
* not being reflected on the display. Ideally, DIFR would be
|
|
||||||
* dynamically disabled whenever a user of the memory blits to the
|
|
||||||
* frontbuffer. DRM provides the needed callbacks to achieve this.
|
|
||||||
*/
|
|
||||||
params.noDisplayCaching |= !!nv_gem->is_drm_dumb;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
params.height = fb->height;
|
params.height = nv_fb->base.height;
|
||||||
params.width = fb->width;
|
params.width = nv_fb->base.width;
|
||||||
params.format = format;
|
params.format = format;
|
||||||
|
|
||||||
if (have_modifier) {
|
if (have_modifier) {
|
||||||
@@ -213,7 +199,7 @@ static int nv_drm_framebuffer_init(struct drm_device *dev,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
drm_framebuffer_cleanup(fb);
|
drm_framebuffer_cleanup(&nv_fb->base);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -254,7 +240,7 @@ struct drm_framebuffer *nv_drm_internal_framebuffer_create(
|
|||||||
if (nv_dev->modifiers[i] == DRM_FORMAT_MOD_INVALID) {
|
if (nv_dev->modifiers[i] == DRM_FORMAT_MOD_INVALID) {
|
||||||
NV_DRM_DEV_DEBUG_DRIVER(
|
NV_DRM_DEV_DEBUG_DRIVER(
|
||||||
nv_dev,
|
nv_dev,
|
||||||
"Invalid format modifier for framebuffer object: 0x%016" NvU64_fmtx,
|
"Invalid format modifier for framebuffer object: 0x%016llx",
|
||||||
modifier);
|
modifier);
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -41,10 +41,8 @@
|
|||||||
struct nv_drm_framebuffer {
|
struct nv_drm_framebuffer {
|
||||||
struct NvKmsKapiSurface *pSurface;
|
struct NvKmsKapiSurface *pSurface;
|
||||||
|
|
||||||
#if !defined(NV_DRM_FRAMEBUFFER_OBJ_PRESENT)
|
struct nv_drm_gem_object*
|
||||||
struct drm_gem_object*
|
nv_gem[NVKMS_MAX_PLANES_PER_SURFACE];
|
||||||
obj[NVKMS_MAX_PLANES_PER_SURFACE];
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct drm_framebuffer base;
|
struct drm_framebuffer base;
|
||||||
};
|
};
|
||||||
@@ -58,29 +56,6 @@ static inline struct nv_drm_framebuffer *to_nv_framebuffer(
|
|||||||
return container_of(fb, struct nv_drm_framebuffer, base);
|
return container_of(fb, struct nv_drm_framebuffer, base);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct drm_gem_object *nv_fb_get_gem_obj(
|
|
||||||
struct drm_framebuffer *fb,
|
|
||||||
uint32_t plane)
|
|
||||||
{
|
|
||||||
#if defined(NV_DRM_FRAMEBUFFER_OBJ_PRESENT)
|
|
||||||
return fb->obj[plane];
|
|
||||||
#else
|
|
||||||
return to_nv_framebuffer(fb)->obj[plane];
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void nv_fb_set_gem_obj(
|
|
||||||
struct drm_framebuffer *fb,
|
|
||||||
uint32_t plane,
|
|
||||||
struct drm_gem_object *obj)
|
|
||||||
{
|
|
||||||
#if defined(NV_DRM_FRAMEBUFFER_OBJ_PRESENT)
|
|
||||||
fb->obj[plane] = obj;
|
|
||||||
#else
|
|
||||||
to_nv_framebuffer(fb)->obj[plane] = obj;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
struct drm_framebuffer *nv_drm_internal_framebuffer_create(
|
struct drm_framebuffer *nv_drm_internal_framebuffer_create(
|
||||||
struct drm_device *dev,
|
struct drm_device *dev,
|
||||||
struct drm_file *file,
|
struct drm_file *file,
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -41,22 +41,6 @@ int nv_drm_prime_fence_context_create_ioctl(struct drm_device *dev,
|
|||||||
int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
|
int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
|
||||||
void *data, struct drm_file *filep);
|
void *data, struct drm_file *filep);
|
||||||
|
|
||||||
int nv_drm_semsurf_fence_ctx_create_ioctl(struct drm_device *dev,
|
|
||||||
void *data,
|
|
||||||
struct drm_file *filep);
|
|
||||||
|
|
||||||
int nv_drm_semsurf_fence_create_ioctl(struct drm_device *dev,
|
|
||||||
void *data,
|
|
||||||
struct drm_file *filep);
|
|
||||||
|
|
||||||
int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
|
|
||||||
void *data,
|
|
||||||
struct drm_file *filep);
|
|
||||||
|
|
||||||
int nv_drm_semsurf_fence_attach_ioctl(struct drm_device *dev,
|
|
||||||
void *data,
|
|
||||||
struct drm_file *filep);
|
|
||||||
|
|
||||||
#endif /* NV_DRM_FENCE_AVAILABLE */
|
#endif /* NV_DRM_FENCE_AVAILABLE */
|
||||||
|
|
||||||
#endif /* NV_DRM_AVAILABLE */
|
#endif /* NV_DRM_AVAILABLE */
|
||||||
|
|||||||
@@ -71,42 +71,12 @@ static int __nv_drm_gem_dma_buf_create_mmap_offset(
|
|||||||
static int __nv_drm_gem_dma_buf_mmap(struct nv_drm_gem_object *nv_gem,
|
static int __nv_drm_gem_dma_buf_mmap(struct nv_drm_gem_object *nv_gem,
|
||||||
struct vm_area_struct *vma)
|
struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
#if defined(NV_LINUX)
|
|
||||||
struct dma_buf_attachment *attach = nv_gem->base.import_attach;
|
struct dma_buf_attachment *attach = nv_gem->base.import_attach;
|
||||||
struct dma_buf *dma_buf = attach->dmabuf;
|
struct dma_buf *dma_buf = attach->dmabuf;
|
||||||
#endif
|
|
||||||
struct file *old_file;
|
struct file *old_file;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* check if buffer supports mmap */
|
/* check if buffer supports mmap */
|
||||||
#if defined(NV_BSD)
|
|
||||||
/*
|
|
||||||
* Most of the FreeBSD DRM code refers to struct file*, which is actually
|
|
||||||
* a struct linux_file*. The dmabuf code in FreeBSD is not actually plumbed
|
|
||||||
* through the same linuxkpi bits it seems (probably so it can be used
|
|
||||||
* elsewhere), so dma_buf->file really is a native FreeBSD struct file...
|
|
||||||
*/
|
|
||||||
if (!nv_gem->base.filp->f_op->mmap)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
/* readjust the vma */
|
|
||||||
get_file(nv_gem->base.filp);
|
|
||||||
old_file = vma->vm_file;
|
|
||||||
vma->vm_file = nv_gem->base.filp;
|
|
||||||
vma->vm_pgoff -= drm_vma_node_start(&nv_gem->base.vma_node);
|
|
||||||
|
|
||||||
ret = nv_gem->base.filp->f_op->mmap(nv_gem->base.filp, vma);
|
|
||||||
|
|
||||||
if (ret) {
|
|
||||||
/* restore old parameters on failure */
|
|
||||||
vma->vm_file = old_file;
|
|
||||||
vma->vm_pgoff += drm_vma_node_start(&nv_gem->base.vma_node);
|
|
||||||
fput(nv_gem->base.filp);
|
|
||||||
} else {
|
|
||||||
if (old_file)
|
|
||||||
fput(old_file);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
if (!dma_buf->file->f_op->mmap)
|
if (!dma_buf->file->f_op->mmap)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
@@ -114,20 +84,18 @@ static int __nv_drm_gem_dma_buf_mmap(struct nv_drm_gem_object *nv_gem,
|
|||||||
get_file(dma_buf->file);
|
get_file(dma_buf->file);
|
||||||
old_file = vma->vm_file;
|
old_file = vma->vm_file;
|
||||||
vma->vm_file = dma_buf->file;
|
vma->vm_file = dma_buf->file;
|
||||||
vma->vm_pgoff -= drm_vma_node_start(&nv_gem->base.vma_node);
|
vma->vm_pgoff -= drm_vma_node_start(&nv_gem->base.vma_node);;
|
||||||
|
|
||||||
ret = dma_buf->file->f_op->mmap(dma_buf->file, vma);
|
ret = dma_buf->file->f_op->mmap(dma_buf->file, vma);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
/* restore old parameters on failure */
|
/* restore old parameters on failure */
|
||||||
vma->vm_file = old_file;
|
vma->vm_file = old_file;
|
||||||
vma->vm_pgoff += drm_vma_node_start(&nv_gem->base.vma_node);
|
|
||||||
fput(dma_buf->file);
|
fput(dma_buf->file);
|
||||||
} else {
|
} else {
|
||||||
if (old_file)
|
if (old_file)
|
||||||
fput(old_file);
|
fput(old_file);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,9 +37,6 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#if defined(NV_BSD)
|
|
||||||
#include <vm/vm_pageout.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "nv-mm.h"
|
#include "nv-mm.h"
|
||||||
|
|
||||||
@@ -71,20 +68,9 @@ static void __nv_drm_gem_nvkms_memory_free(struct nv_drm_gem_object *nv_gem)
|
|||||||
nv_drm_free(nv_nvkms_memory);
|
nv_drm_free(nv_nvkms_memory);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __nv_drm_gem_nvkms_map(
|
|
||||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory);
|
|
||||||
|
|
||||||
static int __nv_drm_gem_nvkms_mmap(struct nv_drm_gem_object *nv_gem,
|
static int __nv_drm_gem_nvkms_mmap(struct nv_drm_gem_object *nv_gem,
|
||||||
struct vm_area_struct *vma)
|
struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
|
|
||||||
to_nv_nvkms_memory(nv_gem);
|
|
||||||
|
|
||||||
int ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
|
|
||||||
if (ret) {
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
return drm_gem_mmap_obj(&nv_gem->base,
|
return drm_gem_mmap_obj(&nv_gem->base,
|
||||||
drm_vma_node_size(&nv_gem->base.vma_node) << PAGE_SHIFT, vma);
|
drm_vma_node_size(&nv_gem->base.vma_node) << PAGE_SHIFT, vma);
|
||||||
}
|
}
|
||||||
@@ -107,17 +93,7 @@ static vm_fault_t __nv_drm_gem_nvkms_handle_vma_fault(
|
|||||||
if (nv_nvkms_memory->pages_count == 0) {
|
if (nv_nvkms_memory->pages_count == 0) {
|
||||||
pfn = (unsigned long)(uintptr_t)nv_nvkms_memory->pPhysicalAddress;
|
pfn = (unsigned long)(uintptr_t)nv_nvkms_memory->pPhysicalAddress;
|
||||||
pfn >>= PAGE_SHIFT;
|
pfn >>= PAGE_SHIFT;
|
||||||
#if defined(NV_LINUX)
|
|
||||||
/*
|
|
||||||
* FreeBSD doesn't set pgoff. We instead have pfn be the base physical
|
|
||||||
* address, and we will calculate the index pidx from the virtual address.
|
|
||||||
*
|
|
||||||
* This only works because linux_cdev_pager_populate passes the pidx as
|
|
||||||
* vmf->virtual_address. Then we turn the virtual address
|
|
||||||
* into a physical page number.
|
|
||||||
*/
|
|
||||||
pfn += page_offset;
|
pfn += page_offset;
|
||||||
#endif
|
|
||||||
} else {
|
} else {
|
||||||
BUG_ON(page_offset >= nv_nvkms_memory->pages_count);
|
BUG_ON(page_offset >= nv_nvkms_memory->pages_count);
|
||||||
pfn = page_to_pfn(nv_nvkms_memory->pages[page_offset]);
|
pfn = page_to_pfn(nv_nvkms_memory->pages[page_offset]);
|
||||||
@@ -157,18 +133,11 @@ static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup(
|
|||||||
static int __nv_drm_gem_nvkms_map(
|
static int __nv_drm_gem_nvkms_map(
|
||||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory)
|
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
|
||||||
struct nv_drm_device *nv_dev = nv_nvkms_memory->base.nv_dev;
|
struct nv_drm_device *nv_dev = nv_nvkms_memory->base.nv_dev;
|
||||||
struct NvKmsKapiMemory *pMemory = nv_nvkms_memory->base.pMemory;
|
struct NvKmsKapiMemory *pMemory = nv_nvkms_memory->base.pMemory;
|
||||||
|
|
||||||
mutex_lock(&nv_nvkms_memory->map_lock);
|
if (!nv_dev->hasVideoMemory) {
|
||||||
|
return 0;
|
||||||
if (nv_nvkms_memory->physically_mapped) {
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!nvKms->isVidmem(pMemory)) {
|
|
||||||
goto done;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!nvKms->mapMemory(nv_dev->pDevice,
|
if (!nvKms->mapMemory(nv_dev->pDevice,
|
||||||
@@ -179,8 +148,7 @@ static int __nv_drm_gem_nvkms_map(
|
|||||||
nv_dev,
|
nv_dev,
|
||||||
"Failed to map NvKmsKapiMemory 0x%p",
|
"Failed to map NvKmsKapiMemory 0x%p",
|
||||||
pMemory);
|
pMemory);
|
||||||
ret = -ENOMEM;
|
return -ENOMEM;
|
||||||
goto done;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nv_nvkms_memory->pWriteCombinedIORemapAddress = ioremap_wc(
|
nv_nvkms_memory->pWriteCombinedIORemapAddress = ioremap_wc(
|
||||||
@@ -196,9 +164,7 @@ static int __nv_drm_gem_nvkms_map(
|
|||||||
|
|
||||||
nv_nvkms_memory->physically_mapped = true;
|
nv_nvkms_memory->physically_mapped = true;
|
||||||
|
|
||||||
done:
|
return 0;
|
||||||
mutex_unlock(&nv_nvkms_memory->map_lock);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *__nv_drm_gem_nvkms_prime_vmap(
|
static void *__nv_drm_gem_nvkms_prime_vmap(
|
||||||
@@ -207,40 +173,14 @@ static void *__nv_drm_gem_nvkms_prime_vmap(
|
|||||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
|
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
|
||||||
to_nv_nvkms_memory(nv_gem);
|
to_nv_nvkms_memory(nv_gem);
|
||||||
|
|
||||||
int ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
|
if (!nv_nvkms_memory->physically_mapped) {
|
||||||
if (ret) {
|
int ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
|
||||||
return ERR_PTR(ret);
|
if (ret) {
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nv_nvkms_memory->physically_mapped) {
|
return nv_nvkms_memory->pWriteCombinedIORemapAddress;
|
||||||
return nv_nvkms_memory->pWriteCombinedIORemapAddress;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If this buffer isn't physically mapped, it might be backed by struct
|
|
||||||
* pages. Use vmap in that case. Do a noncached mapping for system memory
|
|
||||||
* as display is non io-coherent device in case of Tegra.
|
|
||||||
*/
|
|
||||||
if (nv_nvkms_memory->pages_count > 0) {
|
|
||||||
return nv_drm_vmap(nv_nvkms_memory->pages,
|
|
||||||
nv_nvkms_memory->pages_count,
|
|
||||||
false);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __nv_drm_gem_nvkms_prime_vunmap(
|
|
||||||
struct nv_drm_gem_object *nv_gem,
|
|
||||||
void *address)
|
|
||||||
{
|
|
||||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
|
|
||||||
to_nv_nvkms_memory(nv_gem);
|
|
||||||
|
|
||||||
if (!nv_nvkms_memory->physically_mapped &&
|
|
||||||
nv_nvkms_memory->pages_count > 0) {
|
|
||||||
nv_drm_vunmap(address);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __nv_drm_gem_map_nvkms_memory_offset(
|
static int __nv_drm_gem_map_nvkms_memory_offset(
|
||||||
@@ -248,7 +188,17 @@ static int __nv_drm_gem_map_nvkms_memory_offset(
|
|||||||
struct nv_drm_gem_object *nv_gem,
|
struct nv_drm_gem_object *nv_gem,
|
||||||
uint64_t *offset)
|
uint64_t *offset)
|
||||||
{
|
{
|
||||||
return nv_drm_gem_create_mmap_offset(nv_gem, offset);
|
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
|
||||||
|
to_nv_nvkms_memory(nv_gem);
|
||||||
|
|
||||||
|
if (!nv_nvkms_memory->physically_mapped) {
|
||||||
|
int ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
|
||||||
|
if (ret) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nv_drm_gem_create_mmap_offset(&nv_nvkms_memory->base, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sg_table *__nv_drm_gem_nvkms_memory_prime_get_sg_table(
|
static struct sg_table *__nv_drm_gem_nvkms_memory_prime_get_sg_table(
|
||||||
@@ -260,7 +210,7 @@ static struct sg_table *__nv_drm_gem_nvkms_memory_prime_get_sg_table(
|
|||||||
struct sg_table *sg_table;
|
struct sg_table *sg_table;
|
||||||
|
|
||||||
if (nv_nvkms_memory->pages_count == 0) {
|
if (nv_nvkms_memory->pages_count == 0) {
|
||||||
NV_DRM_DEV_DEBUG_DRIVER(
|
NV_DRM_DEV_LOG_ERR(
|
||||||
nv_dev,
|
nv_dev,
|
||||||
"Cannot create sg_table for NvKmsKapiMemory 0x%p",
|
"Cannot create sg_table for NvKmsKapiMemory 0x%p",
|
||||||
nv_gem->pMemory);
|
nv_gem->pMemory);
|
||||||
@@ -278,7 +228,6 @@ const struct nv_drm_gem_object_funcs nv_gem_nvkms_memory_ops = {
|
|||||||
.free = __nv_drm_gem_nvkms_memory_free,
|
.free = __nv_drm_gem_nvkms_memory_free,
|
||||||
.prime_dup = __nv_drm_gem_nvkms_prime_dup,
|
.prime_dup = __nv_drm_gem_nvkms_prime_dup,
|
||||||
.prime_vmap = __nv_drm_gem_nvkms_prime_vmap,
|
.prime_vmap = __nv_drm_gem_nvkms_prime_vmap,
|
||||||
.prime_vunmap = __nv_drm_gem_nvkms_prime_vunmap,
|
|
||||||
.mmap = __nv_drm_gem_nvkms_mmap,
|
.mmap = __nv_drm_gem_nvkms_mmap,
|
||||||
.handle_vma_fault = __nv_drm_gem_nvkms_handle_vma_fault,
|
.handle_vma_fault = __nv_drm_gem_nvkms_handle_vma_fault,
|
||||||
.create_mmap_offset = __nv_drm_gem_map_nvkms_memory_offset,
|
.create_mmap_offset = __nv_drm_gem_map_nvkms_memory_offset,
|
||||||
@@ -303,7 +252,6 @@ static int __nv_drm_nvkms_gem_obj_init(
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_init(&nv_nvkms_memory->map_lock);
|
|
||||||
nv_nvkms_memory->pPhysicalAddress = NULL;
|
nv_nvkms_memory->pPhysicalAddress = NULL;
|
||||||
nv_nvkms_memory->pWriteCombinedIORemapAddress = NULL;
|
nv_nvkms_memory->pWriteCombinedIORemapAddress = NULL;
|
||||||
nv_nvkms_memory->physically_mapped = false;
|
nv_nvkms_memory->physically_mapped = false;
|
||||||
@@ -312,7 +260,7 @@ static int __nv_drm_nvkms_gem_obj_init(
|
|||||||
pMemory,
|
pMemory,
|
||||||
&pages,
|
&pages,
|
||||||
&numPages) &&
|
&numPages) &&
|
||||||
!nvKms->isVidmem(pMemory)) {
|
!nv_dev->hasVideoMemory) {
|
||||||
/* GetMemoryPages may fail for vidmem allocations,
|
/* GetMemoryPages may fail for vidmem allocations,
|
||||||
* but it should not fail for sysmem allocations. */
|
* but it should not fail for sysmem allocations. */
|
||||||
NV_DRM_DEV_LOG_ERR(nv_dev,
|
NV_DRM_DEV_LOG_ERR(nv_dev,
|
||||||
@@ -375,7 +323,7 @@ int nv_drm_dumb_create(
|
|||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
NV_DRM_DEV_LOG_ERR(
|
NV_DRM_DEV_LOG_ERR(
|
||||||
nv_dev,
|
nv_dev,
|
||||||
"Failed to allocate NvKmsKapiMemory for dumb object of size %" NvU64_fmtu,
|
"Failed to allocate NvKmsKapiMemory for dumb object of size %llu",
|
||||||
args->size);
|
args->size);
|
||||||
goto nvkms_alloc_memory_failed;
|
goto nvkms_alloc_memory_failed;
|
||||||
}
|
}
|
||||||
@@ -385,8 +333,6 @@ int nv_drm_dumb_create(
|
|||||||
goto nvkms_gem_obj_init_failed;
|
goto nvkms_gem_obj_init_failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
nv_nvkms_memory->base.is_drm_dumb = true;
|
|
||||||
|
|
||||||
/* Always map dumb buffer memory up front. Clients are only expected
|
/* Always map dumb buffer memory up front. Clients are only expected
|
||||||
* to use dumb buffers for software rendering, so they're not much use
|
* to use dumb buffers for software rendering, so they're not much use
|
||||||
* without a CPU mapping.
|
* without a CPU mapping.
|
||||||
@@ -421,7 +367,7 @@ int nv_drm_gem_import_nvkms_memory_ioctl(struct drm_device *dev,
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||||
ret = -EOPNOTSUPP;
|
ret = -EINVAL;
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -471,7 +417,7 @@ int nv_drm_gem_export_nvkms_memory_ioctl(struct drm_device *dev,
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||||
ret = -EOPNOTSUPP;
|
ret = -EINVAL;
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -524,11 +470,11 @@ int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev,
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||||
ret = -EOPNOTSUPP;
|
ret = -EINVAL;
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((p->__pad0 != 0) || (p->__pad1 != 0)) {
|
if (p->__pad != 0) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
NV_DRM_DEV_LOG_ERR(nv_dev, "non-zero value in padding field");
|
NV_DRM_DEV_LOG_ERR(nv_dev, "non-zero value in padding field");
|
||||||
goto failed;
|
goto failed;
|
||||||
@@ -592,12 +538,14 @@ static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup(
|
|||||||
{
|
{
|
||||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||||
const struct nv_drm_device *nv_dev_src;
|
const struct nv_drm_device *nv_dev_src;
|
||||||
|
const struct nv_drm_gem_nvkms_memory *nv_nvkms_memory_src;
|
||||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
|
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
|
||||||
struct NvKmsKapiMemory *pMemory;
|
struct NvKmsKapiMemory *pMemory;
|
||||||
|
|
||||||
BUG_ON(nv_gem_src == NULL || nv_gem_src->ops != &nv_gem_nvkms_memory_ops);
|
BUG_ON(nv_gem_src == NULL || nv_gem_src->ops != &nv_gem_nvkms_memory_ops);
|
||||||
|
|
||||||
nv_dev_src = to_nv_device(nv_gem_src->base.dev);
|
nv_dev_src = to_nv_device(nv_gem_src->base.dev);
|
||||||
|
nv_nvkms_memory_src = to_nv_nvkms_memory_const(nv_gem_src);
|
||||||
|
|
||||||
if ((nv_nvkms_memory =
|
if ((nv_nvkms_memory =
|
||||||
nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) {
|
nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) {
|
||||||
|
|||||||
@@ -32,15 +32,8 @@
|
|||||||
struct nv_drm_gem_nvkms_memory {
|
struct nv_drm_gem_nvkms_memory {
|
||||||
struct nv_drm_gem_object base;
|
struct nv_drm_gem_object base;
|
||||||
|
|
||||||
/*
|
|
||||||
* Lock to protect concurrent writes to physically_mapped, pPhysicalAddress,
|
|
||||||
* and pWriteCombinedIORemapAddress.
|
|
||||||
*
|
|
||||||
* __nv_drm_gem_nvkms_map(), the sole writer, is structured such that
|
|
||||||
* readers are not required to hold the lock.
|
|
||||||
*/
|
|
||||||
struct mutex map_lock;
|
|
||||||
bool physically_mapped;
|
bool physically_mapped;
|
||||||
|
|
||||||
void *pPhysicalAddress;
|
void *pPhysicalAddress;
|
||||||
void *pWriteCombinedIORemapAddress;
|
void *pWriteCombinedIORemapAddress;
|
||||||
|
|
||||||
|
|||||||
@@ -36,14 +36,6 @@
|
|||||||
#include "linux/mm.h"
|
#include "linux/mm.h"
|
||||||
#include "nv-mm.h"
|
#include "nv-mm.h"
|
||||||
|
|
||||||
#if defined(NV_LINUX_PFN_T_H_PRESENT)
|
|
||||||
#include "linux/pfn_t.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(NV_BSD)
|
|
||||||
#include <vm/vm_pageout.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
void __nv_drm_gem_user_memory_free(struct nv_drm_gem_object *nv_gem)
|
void __nv_drm_gem_user_memory_free(struct nv_drm_gem_object *nv_gem)
|
||||||
{
|
{
|
||||||
@@ -72,8 +64,7 @@ static void *__nv_drm_gem_user_memory_prime_vmap(
|
|||||||
struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
|
struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
|
||||||
|
|
||||||
return nv_drm_vmap(nv_user_memory->pages,
|
return nv_drm_vmap(nv_user_memory->pages,
|
||||||
nv_user_memory->pages_count,
|
nv_user_memory->pages_count);
|
||||||
true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __nv_drm_gem_user_memory_prime_vunmap(
|
static void __nv_drm_gem_user_memory_prime_vunmap(
|
||||||
@@ -108,37 +99,6 @@ static int __nv_drm_gem_user_memory_mmap(struct nv_drm_gem_object *nv_gem,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(NV_LINUX) && !defined(NV_VMF_INSERT_MIXED_PRESENT)
|
|
||||||
static vm_fault_t __nv_vm_insert_mixed_helper(
|
|
||||||
struct vm_area_struct *vma,
|
|
||||||
unsigned long address,
|
|
||||||
unsigned long pfn)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
#if defined(NV_PFN_TO_PFN_T_PRESENT)
|
|
||||||
ret = vm_insert_mixed(vma, address, pfn_to_pfn_t(pfn));
|
|
||||||
#else
|
|
||||||
ret = vm_insert_mixed(vma, address, pfn);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
switch (ret) {
|
|
||||||
case 0:
|
|
||||||
case -EBUSY:
|
|
||||||
/*
|
|
||||||
* EBUSY indicates that another thread already handled
|
|
||||||
* the faulted range.
|
|
||||||
*/
|
|
||||||
return VM_FAULT_NOPAGE;
|
|
||||||
case -ENOMEM:
|
|
||||||
return VM_FAULT_OOM;
|
|
||||||
default:
|
|
||||||
WARN_ONCE(1, "Unhandled error in %s: %d\n", __FUNCTION__, ret);
|
|
||||||
return VM_FAULT_SIGBUS;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static vm_fault_t __nv_drm_gem_user_memory_handle_vma_fault(
|
static vm_fault_t __nv_drm_gem_user_memory_handle_vma_fault(
|
||||||
struct nv_drm_gem_object *nv_gem,
|
struct nv_drm_gem_object *nv_gem,
|
||||||
struct vm_area_struct *vma,
|
struct vm_area_struct *vma,
|
||||||
@@ -148,19 +108,31 @@ static vm_fault_t __nv_drm_gem_user_memory_handle_vma_fault(
|
|||||||
unsigned long address = nv_page_fault_va(vmf);
|
unsigned long address = nv_page_fault_va(vmf);
|
||||||
struct drm_gem_object *gem = vma->vm_private_data;
|
struct drm_gem_object *gem = vma->vm_private_data;
|
||||||
unsigned long page_offset;
|
unsigned long page_offset;
|
||||||
unsigned long pfn;
|
vm_fault_t ret;
|
||||||
|
|
||||||
page_offset = vmf->pgoff - drm_vma_node_start(&gem->vma_node);
|
page_offset = vmf->pgoff - drm_vma_node_start(&gem->vma_node);
|
||||||
BUG_ON(page_offset >= nv_user_memory->pages_count);
|
|
||||||
pfn = page_to_pfn(nv_user_memory->pages[page_offset]);
|
|
||||||
|
|
||||||
#if !defined(NV_LINUX)
|
BUG_ON(page_offset >= nv_user_memory->pages_count);
|
||||||
return vmf_insert_pfn(vma, address, pfn);
|
ret = vm_insert_page(vma, address, nv_user_memory->pages[page_offset]);
|
||||||
#elif defined(NV_VMF_INSERT_MIXED_PRESENT)
|
switch (ret) {
|
||||||
return vmf_insert_mixed(vma, address, pfn_to_pfn_t(pfn));
|
case 0:
|
||||||
#else
|
case -EBUSY:
|
||||||
return __nv_vm_insert_mixed_helper(vma, address, pfn);
|
/*
|
||||||
#endif
|
* EBUSY indicates that another thread already handled
|
||||||
|
* the faulted range.
|
||||||
|
*/
|
||||||
|
ret = VM_FAULT_NOPAGE;
|
||||||
|
break;
|
||||||
|
case -ENOMEM:
|
||||||
|
ret = VM_FAULT_OOM;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
WARN_ONCE(1, "Unhandled error in %s: %d\n", __FUNCTION__, ret);
|
||||||
|
ret = VM_FAULT_SIGBUS;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __nv_drm_gem_user_create_mmap_offset(
|
static int __nv_drm_gem_user_create_mmap_offset(
|
||||||
@@ -198,7 +170,7 @@ int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev,
|
|||||||
if ((params->size % PAGE_SIZE) != 0) {
|
if ((params->size % PAGE_SIZE) != 0) {
|
||||||
NV_DRM_DEV_LOG_ERR(
|
NV_DRM_DEV_LOG_ERR(
|
||||||
nv_dev,
|
nv_dev,
|
||||||
"Userspace memory 0x%" NvU64_fmtx " size should be in a multiple of page "
|
"Userspace memory 0x%llx size should be in a multiple of page "
|
||||||
"size to create a gem object",
|
"size to create a gem object",
|
||||||
params->address);
|
params->address);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -211,7 +183,7 @@ int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev,
|
|||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
NV_DRM_DEV_LOG_ERR(
|
NV_DRM_DEV_LOG_ERR(
|
||||||
nv_dev,
|
nv_dev,
|
||||||
"Failed to lock user pages for address 0x%" NvU64_fmtx ": %d",
|
"Failed to lock user pages for address 0x%llx: %d",
|
||||||
params->address, ret);
|
params->address, ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -144,12 +144,6 @@ void nv_drm_gem_object_init(struct nv_drm_device *nv_dev,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
drm_gem_private_object_init(dev, &nv_gem->base, size);
|
drm_gem_private_object_init(dev, &nv_gem->base, size);
|
||||||
|
|
||||||
/* Create mmap offset early for drm_gem_prime_mmap(), if possible. */
|
|
||||||
if (nv_gem->ops->create_mmap_offset) {
|
|
||||||
uint64_t offset;
|
|
||||||
nv_gem->ops->create_mmap_offset(nv_dev, nv_gem, &offset);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct drm_gem_object *nv_drm_gem_prime_import(struct drm_device *dev,
|
struct drm_gem_object *nv_drm_gem_prime_import(struct drm_device *dev,
|
||||||
@@ -172,11 +166,8 @@ struct drm_gem_object *nv_drm_gem_prime_import(struct drm_device *dev,
|
|||||||
*/
|
*/
|
||||||
gem_dst = nv_gem_src->ops->prime_dup(dev, nv_gem_src);
|
gem_dst = nv_gem_src->ops->prime_dup(dev, nv_gem_src);
|
||||||
|
|
||||||
if (gem_dst == NULL) {
|
if (gem_dst)
|
||||||
return ERR_PTR(-ENOTSUPP);
|
return gem_dst;
|
||||||
}
|
|
||||||
|
|
||||||
return gem_dst;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* NV_DMA_BUF_OWNER_PRESENT */
|
#endif /* NV_DMA_BUF_OWNER_PRESENT */
|
||||||
@@ -241,7 +232,6 @@ int nv_drm_gem_map_offset_ioctl(struct drm_device *dev,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* mmap offset creation is idempotent, fetch it by creating it again. */
|
|
||||||
if (nv_gem->ops->create_mmap_offset) {
|
if (nv_gem->ops->create_mmap_offset) {
|
||||||
ret = nv_gem->ops->create_mmap_offset(nv_dev, nv_gem, ¶ms->offset);
|
ret = nv_gem->ops->create_mmap_offset(nv_dev, nv_gem, ¶ms->offset);
|
||||||
} else {
|
} else {
|
||||||
@@ -329,7 +319,7 @@ int nv_drm_gem_identify_object_ioctl(struct drm_device *dev,
|
|||||||
struct nv_drm_gem_object *nv_gem = NULL;
|
struct nv_drm_gem_object *nv_gem = NULL;
|
||||||
|
|
||||||
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||||
return -EOPNOTSUPP;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
nv_dma_buf = nv_drm_gem_object_dma_buf_lookup(dev, filep, p->handle);
|
nv_dma_buf = nv_drm_gem_object_dma_buf_lookup(dev, filep, p->handle);
|
||||||
|
|||||||
@@ -73,8 +73,6 @@ struct nv_drm_gem_object {
|
|||||||
|
|
||||||
struct NvKmsKapiMemory *pMemory;
|
struct NvKmsKapiMemory *pMemory;
|
||||||
|
|
||||||
bool is_drm_dumb;
|
|
||||||
|
|
||||||
#if defined(NV_DRM_FENCE_AVAILABLE) && !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
|
#if defined(NV_DRM_FENCE_AVAILABLE) && !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
|
||||||
nv_dma_resv_t resv;
|
nv_dma_resv_t resv;
|
||||||
#endif
|
#endif
|
||||||
@@ -97,16 +95,6 @@ static inline struct nv_drm_gem_object *to_nv_gem_object(
|
|||||||
* 3e70fd160cf0b1945225eaa08dd2cb8544f21cb8 (2018-11-15).
|
* 3e70fd160cf0b1945225eaa08dd2cb8544f21cb8 (2018-11-15).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static inline void
|
|
||||||
nv_drm_gem_object_reference(struct nv_drm_gem_object *nv_gem)
|
|
||||||
{
|
|
||||||
#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT)
|
|
||||||
drm_gem_object_get(&nv_gem->base);
|
|
||||||
#else
|
|
||||||
drm_gem_object_reference(&nv_gem->base);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
nv_drm_gem_object_unreference_unlocked(struct nv_drm_gem_object *nv_gem)
|
nv_drm_gem_object_unreference_unlocked(struct nv_drm_gem_object *nv_gem)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -45,7 +45,8 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* The inclusion of drm_framebuffer.h was removed from drm_crtc.h by commit
|
* The inclusion of drm_framebuffer.h was removed from drm_crtc.h by commit
|
||||||
* 720cf96d8fec ("drm: Drop drm_framebuffer.h from drm_crtc.h") in v6.0.
|
* 720cf96d8fecde29b72e1101f8a567a0ce99594f ("drm: Drop drm_framebuffer.h from
|
||||||
|
* drm_crtc.h") in linux-next, expected in v5.19-rc7.
|
||||||
*
|
*
|
||||||
* We only need drm_framebuffer.h for drm_framebuffer_put(), and it is always
|
* We only need drm_framebuffer.h for drm_framebuffer_put(), and it is always
|
||||||
* present (v4.9+) when drm_framebuffer_{put,get}() is present (v4.12+), so it
|
* present (v4.9+) when drm_framebuffer_{put,get}() is present (v4.12+), so it
|
||||||
|
|||||||
@@ -40,13 +40,8 @@
|
|||||||
#include <drm/drm_blend.h>
|
#include <drm/drm_blend.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(NV_DRM_ROTATION_AVAILABLE) || \
|
#if defined(NV_DRM_ROTATION_AVAILABLE)
|
||||||
defined(NV_DRM_COLOR_CTM_3X4_PRESENT) || \
|
/* For DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* */
|
||||||
defined(NV_DRM_COLOR_LUT_PRESENT)
|
|
||||||
/*
|
|
||||||
* For DRM_MODE_ROTATE_*, DRM_MODE_REFLECT_*, struct drm_color_ctm_3x4, and
|
|
||||||
* struct drm_color_lut.
|
|
||||||
*/
|
|
||||||
#include <uapi/drm/drm_mode.h>
|
#include <uapi/drm/drm_mode.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -311,36 +306,6 @@ int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
|
|||||||
for_each_plane_in_state(__state, plane, plane_state, __i)
|
for_each_plane_in_state(__state, plane, plane_state, __i)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* for_each_new_plane_in_state() was added by kernel commit
|
|
||||||
* 581e49fe6b411f407102a7f2377648849e0fa37f which was Signed-off-by:
|
|
||||||
* Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
|
|
||||||
* Daniel Vetter <daniel.vetter@ffwll.ch>
|
|
||||||
*
|
|
||||||
* This commit also added the old_state and new_state pointers to
|
|
||||||
* __drm_planes_state. Because of this, the best that can be done on kernel
|
|
||||||
* versions without this macro is for_each_plane_in_state.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* nv_drm_for_each_new_plane_in_state - iterate over all planes in an atomic update
|
|
||||||
* @__state: &struct drm_atomic_state pointer
|
|
||||||
* @plane: &struct drm_plane iteration cursor
|
|
||||||
* @new_plane_state: &struct drm_plane_state iteration cursor for the new state
|
|
||||||
* @__i: int iteration cursor, for macro-internal use
|
|
||||||
*
|
|
||||||
* This iterates over all planes in an atomic update, tracking only the new
|
|
||||||
* state. This is useful in enable functions, where we need the new state the
|
|
||||||
* hardware should be in when the atomic commit operation has completed.
|
|
||||||
*/
|
|
||||||
#if !defined(for_each_new_plane_in_state)
|
|
||||||
#define nv_drm_for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \
|
|
||||||
nv_drm_for_each_plane_in_state(__state, plane, new_plane_state, __i)
|
|
||||||
#else
|
|
||||||
#define nv_drm_for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \
|
|
||||||
for_each_new_plane_in_state(__state, plane, new_plane_state, __i)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline struct drm_connector *
|
static inline struct drm_connector *
|
||||||
nv_drm_connector_lookup(struct drm_device *dev, struct drm_file *filep,
|
nv_drm_connector_lookup(struct drm_device *dev, struct drm_file *filep,
|
||||||
uint32_t id)
|
uint32_t id)
|
||||||
@@ -363,24 +328,6 @@ static inline void nv_drm_connector_put(struct drm_connector *connector)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void nv_drm_property_blob_put(struct drm_property_blob *blob)
|
|
||||||
{
|
|
||||||
#if defined(NV_DRM_PROPERTY_BLOB_PUT_PRESENT)
|
|
||||||
drm_property_blob_put(blob);
|
|
||||||
#else
|
|
||||||
drm_property_unreference_blob(blob);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void nv_drm_property_blob_get(struct drm_property_blob *blob)
|
|
||||||
{
|
|
||||||
#if defined(NV_DRM_PROPERTY_BLOB_PUT_PRESENT)
|
|
||||||
drm_property_blob_get(blob);
|
|
||||||
#else
|
|
||||||
drm_property_reference_blob(blob);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct drm_crtc *
|
static inline struct drm_crtc *
|
||||||
nv_drm_crtc_find(struct drm_device *dev, struct drm_file *filep, uint32_t id)
|
nv_drm_crtc_find(struct drm_device *dev, struct drm_file *filep, uint32_t id)
|
||||||
{
|
{
|
||||||
@@ -636,8 +583,8 @@ static inline int nv_drm_format_num_planes(uint32_t format)
|
|||||||
#endif /* defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) */
|
#endif /* defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* DRM_UNLOCKED was removed with commit 2798ffcc1d6a ("drm: Remove locking for
|
* DRM_UNLOCKED was removed with linux-next commit 2798ffcc1d6a ("drm: Remove
|
||||||
* legacy ioctls and DRM_UNLOCKED") in v6.8, but it was previously made
|
* locking for legacy ioctls and DRM_UNLOCKED"), but it was previously made
|
||||||
* implicit for all non-legacy DRM driver IOCTLs since Linux v4.10 commit
|
* implicit for all non-legacy DRM driver IOCTLs since Linux v4.10 commit
|
||||||
* fa5386459f06 "drm: Used DRM_LEGACY for all legacy functions" (Linux v4.4
|
* fa5386459f06 "drm: Used DRM_LEGACY for all legacy functions" (Linux v4.4
|
||||||
* commit ea487835e887 "drm: Enforce unlocked ioctl operation for kms driver
|
* commit ea487835e887 "drm: Enforce unlocked ioctl operation for kms driver
|
||||||
@@ -648,31 +595,6 @@ static inline int nv_drm_format_num_planes(uint32_t format)
|
|||||||
#define DRM_UNLOCKED 0
|
#define DRM_UNLOCKED 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* struct drm_color_ctm_3x4 was added by commit 6872a189be50 ("drm/amd/display:
|
|
||||||
* Add 3x4 CTM support for plane CTM") in v6.8. For backwards compatibility,
|
|
||||||
* define it when not present.
|
|
||||||
*/
|
|
||||||
#if !defined(NV_DRM_COLOR_CTM_3X4_PRESENT)
|
|
||||||
struct drm_color_ctm_3x4 {
|
|
||||||
__u64 matrix[12];
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* struct drm_color_lut was added by commit 5488dc16fde7 ("drm: introduce pipe
|
|
||||||
* color correction properties") in v4.6. For backwards compatibility, define it
|
|
||||||
* when not present.
|
|
||||||
*/
|
|
||||||
#if !defined(NV_DRM_COLOR_LUT_PRESENT)
|
|
||||||
struct drm_color_lut {
|
|
||||||
__u16 red;
|
|
||||||
__u16 green;
|
|
||||||
__u16 blue;
|
|
||||||
__u16 reserved;
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* drm_vma_offset_exact_lookup_locked() were added
|
* drm_vma_offset_exact_lookup_locked() were added
|
||||||
* by kernel commit 2225cfe46bcc which was Signed-off-by:
|
* by kernel commit 2225cfe46bcc which was Signed-off-by:
|
||||||
|
|||||||
@@ -48,11 +48,6 @@
|
|||||||
#define DRM_NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID 0x11
|
#define DRM_NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID 0x11
|
||||||
#define DRM_NVIDIA_GRANT_PERMISSIONS 0x12
|
#define DRM_NVIDIA_GRANT_PERMISSIONS 0x12
|
||||||
#define DRM_NVIDIA_REVOKE_PERMISSIONS 0x13
|
#define DRM_NVIDIA_REVOKE_PERMISSIONS 0x13
|
||||||
#define DRM_NVIDIA_SEMSURF_FENCE_CTX_CREATE 0x14
|
|
||||||
#define DRM_NVIDIA_SEMSURF_FENCE_CREATE 0x15
|
|
||||||
#define DRM_NVIDIA_SEMSURF_FENCE_WAIT 0x16
|
|
||||||
#define DRM_NVIDIA_SEMSURF_FENCE_ATTACH 0x17
|
|
||||||
#define DRM_NVIDIA_GET_DRM_FILE_UNIQUE_ID 0x18
|
|
||||||
|
|
||||||
#define DRM_IOCTL_NVIDIA_GEM_IMPORT_NVKMS_MEMORY \
|
#define DRM_IOCTL_NVIDIA_GEM_IMPORT_NVKMS_MEMORY \
|
||||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IMPORT_NVKMS_MEMORY), \
|
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IMPORT_NVKMS_MEMORY), \
|
||||||
@@ -72,7 +67,7 @@
|
|||||||
*
|
*
|
||||||
* 'warning: suggest parentheses around arithmetic in operand of |'
|
* 'warning: suggest parentheses around arithmetic in operand of |'
|
||||||
*/
|
*/
|
||||||
#if defined(NV_LINUX) || defined(NV_BSD)
|
#if defined(NV_LINUX)
|
||||||
#define DRM_IOCTL_NVIDIA_FENCE_SUPPORTED \
|
#define DRM_IOCTL_NVIDIA_FENCE_SUPPORTED \
|
||||||
DRM_IO(DRM_COMMAND_BASE + DRM_NVIDIA_FENCE_SUPPORTED)
|
DRM_IO(DRM_COMMAND_BASE + DRM_NVIDIA_FENCE_SUPPORTED)
|
||||||
#define DRM_IOCTL_NVIDIA_DMABUF_SUPPORTED \
|
#define DRM_IOCTL_NVIDIA_DMABUF_SUPPORTED \
|
||||||
@@ -138,31 +133,6 @@
|
|||||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_REVOKE_PERMISSIONS), \
|
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_REVOKE_PERMISSIONS), \
|
||||||
struct drm_nvidia_revoke_permissions_params)
|
struct drm_nvidia_revoke_permissions_params)
|
||||||
|
|
||||||
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_CTX_CREATE \
|
|
||||||
DRM_IOWR((DRM_COMMAND_BASE + \
|
|
||||||
DRM_NVIDIA_SEMSURF_FENCE_CTX_CREATE), \
|
|
||||||
struct drm_nvidia_semsurf_fence_ctx_create_params)
|
|
||||||
|
|
||||||
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_CREATE \
|
|
||||||
DRM_IOWR((DRM_COMMAND_BASE + \
|
|
||||||
DRM_NVIDIA_SEMSURF_FENCE_CREATE), \
|
|
||||||
struct drm_nvidia_semsurf_fence_create_params)
|
|
||||||
|
|
||||||
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_WAIT \
|
|
||||||
DRM_IOW((DRM_COMMAND_BASE + \
|
|
||||||
DRM_NVIDIA_SEMSURF_FENCE_WAIT), \
|
|
||||||
struct drm_nvidia_semsurf_fence_wait_params)
|
|
||||||
|
|
||||||
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_ATTACH \
|
|
||||||
DRM_IOW((DRM_COMMAND_BASE + \
|
|
||||||
DRM_NVIDIA_SEMSURF_FENCE_ATTACH), \
|
|
||||||
struct drm_nvidia_semsurf_fence_attach_params)
|
|
||||||
|
|
||||||
#define DRM_IOCTL_NVIDIA_GET_DRM_FILE_UNIQUE_ID \
|
|
||||||
DRM_IOWR((DRM_COMMAND_BASE + \
|
|
||||||
DRM_NVIDIA_GET_DRM_FILE_UNIQUE_ID), \
|
|
||||||
struct drm_nvidia_get_drm_file_unique_id_params)
|
|
||||||
|
|
||||||
struct drm_nvidia_gem_import_nvkms_memory_params {
|
struct drm_nvidia_gem_import_nvkms_memory_params {
|
||||||
uint64_t mem_size; /* IN */
|
uint64_t mem_size; /* IN */
|
||||||
|
|
||||||
@@ -184,15 +154,10 @@ struct drm_nvidia_get_dev_info_params {
|
|||||||
uint32_t gpu_id; /* OUT */
|
uint32_t gpu_id; /* OUT */
|
||||||
uint32_t primary_index; /* OUT; the "card%d" value */
|
uint32_t primary_index; /* OUT; the "card%d" value */
|
||||||
|
|
||||||
uint32_t supports_alloc; /* OUT */
|
/* See DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D definitions of these */
|
||||||
/* The generic_page_kind, page_kind_generation, and sector_layout
|
|
||||||
* fields are only valid if supports_alloc is true.
|
|
||||||
* See DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D definitions of these. */
|
|
||||||
uint32_t generic_page_kind; /* OUT */
|
uint32_t generic_page_kind; /* OUT */
|
||||||
uint32_t page_kind_generation; /* OUT */
|
uint32_t page_kind_generation; /* OUT */
|
||||||
uint32_t sector_layout; /* OUT */
|
uint32_t sector_layout; /* OUT */
|
||||||
uint32_t supports_sync_fd; /* OUT */
|
|
||||||
uint32_t supports_semsurf; /* OUT */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct drm_nvidia_prime_fence_context_create_params {
|
struct drm_nvidia_prime_fence_context_create_params {
|
||||||
@@ -214,7 +179,6 @@ struct drm_nvidia_gem_prime_fence_attach_params {
|
|||||||
uint32_t handle; /* IN GEM handle to attach fence to */
|
uint32_t handle; /* IN GEM handle to attach fence to */
|
||||||
uint32_t fence_context_handle; /* IN GEM handle to fence context on which fence is run on */
|
uint32_t fence_context_handle; /* IN GEM handle to fence context on which fence is run on */
|
||||||
uint32_t sem_thresh; /* IN Semaphore value to reach before signal */
|
uint32_t sem_thresh; /* IN Semaphore value to reach before signal */
|
||||||
uint32_t __pad;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct drm_nvidia_get_client_capability_params {
|
struct drm_nvidia_get_client_capability_params {
|
||||||
@@ -226,8 +190,6 @@ struct drm_nvidia_get_client_capability_params {
|
|||||||
struct drm_nvidia_crtc_crc32 {
|
struct drm_nvidia_crtc_crc32 {
|
||||||
uint32_t value; /* Read value, undefined if supported is false */
|
uint32_t value; /* Read value, undefined if supported is false */
|
||||||
uint8_t supported; /* Supported boolean, true if readable by hardware */
|
uint8_t supported; /* Supported boolean, true if readable by hardware */
|
||||||
uint8_t __pad0;
|
|
||||||
uint16_t __pad1;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct drm_nvidia_crtc_crc32_v2_out {
|
struct drm_nvidia_crtc_crc32_v2_out {
|
||||||
@@ -267,11 +229,10 @@ struct drm_nvidia_gem_alloc_nvkms_memory_params {
|
|||||||
uint32_t handle; /* OUT */
|
uint32_t handle; /* OUT */
|
||||||
uint8_t block_linear; /* IN */
|
uint8_t block_linear; /* IN */
|
||||||
uint8_t compressible; /* IN/OUT */
|
uint8_t compressible; /* IN/OUT */
|
||||||
uint16_t __pad0;
|
uint16_t __pad;
|
||||||
|
|
||||||
uint64_t memory_size; /* IN */
|
uint64_t memory_size; /* IN */
|
||||||
uint32_t flags; /* IN */
|
uint32_t flags; /* IN */
|
||||||
uint32_t __pad1;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct drm_nvidia_gem_export_dmabuf_memory_params {
|
struct drm_nvidia_gem_export_dmabuf_memory_params {
|
||||||
@@ -305,94 +266,13 @@ struct drm_nvidia_get_connector_id_for_dpy_id_params {
|
|||||||
uint32_t connectorId; /* OUT */
|
uint32_t connectorId; /* OUT */
|
||||||
};
|
};
|
||||||
|
|
||||||
enum drm_nvidia_permissions_type {
|
|
||||||
NV_DRM_PERMISSIONS_TYPE_MODESET = 2,
|
|
||||||
NV_DRM_PERMISSIONS_TYPE_SUB_OWNER = 3
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_nvidia_grant_permissions_params {
|
struct drm_nvidia_grant_permissions_params {
|
||||||
int32_t fd; /* IN */
|
int32_t fd; /* IN */
|
||||||
uint32_t dpyId; /* IN */
|
uint32_t dpyId; /* IN */
|
||||||
uint32_t type; /* IN */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct drm_nvidia_revoke_permissions_params {
|
struct drm_nvidia_revoke_permissions_params {
|
||||||
uint32_t dpyId; /* IN */
|
uint32_t dpyId; /* IN */
|
||||||
uint32_t type; /* IN */
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_nvidia_semsurf_fence_ctx_create_params {
|
|
||||||
uint64_t index; /* IN Index of the desired semaphore in the
|
|
||||||
* fence context's semaphore surface */
|
|
||||||
|
|
||||||
/* Params for importing userspace semaphore surface */
|
|
||||||
uint64_t nvkms_params_ptr; /* IN */
|
|
||||||
uint64_t nvkms_params_size; /* IN */
|
|
||||||
|
|
||||||
uint32_t handle; /* OUT GEM handle to fence context */
|
|
||||||
uint32_t __pad;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_nvidia_semsurf_fence_create_params {
|
|
||||||
uint32_t fence_context_handle; /* IN GEM handle to fence context on which
|
|
||||||
* fence is run on */
|
|
||||||
|
|
||||||
uint32_t timeout_value_ms; /* IN Timeout value in ms for the fence
|
|
||||||
* after which the fence will be signaled
|
|
||||||
* with its error status set to -ETIMEDOUT.
|
|
||||||
* Default timeout value is 5000ms */
|
|
||||||
|
|
||||||
uint64_t wait_value; /* IN Semaphore value to reach before signal */
|
|
||||||
|
|
||||||
int32_t fd; /* OUT sync FD object representing the
|
|
||||||
* semaphore at the specified index reaching
|
|
||||||
* a value >= wait_value */
|
|
||||||
uint32_t __pad;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note there is no provision for timeouts in this ioctl. The kernel
|
|
||||||
* documentation asserts timeouts should be handled by fence producers, and
|
|
||||||
* that waiters should not second-guess their logic, as it is producers rather
|
|
||||||
* than consumers that have better information when it comes to determining a
|
|
||||||
* reasonable timeout for a given workload.
|
|
||||||
*/
|
|
||||||
struct drm_nvidia_semsurf_fence_wait_params {
|
|
||||||
uint32_t fence_context_handle; /* IN GEM handle to fence context which will
|
|
||||||
* be used to wait on the sync FD. Need not
|
|
||||||
* be the fence context used to create the
|
|
||||||
* sync FD. */
|
|
||||||
|
|
||||||
int32_t fd; /* IN sync FD object to wait on */
|
|
||||||
|
|
||||||
uint64_t pre_wait_value; /* IN Wait for the semaphore represented by
|
|
||||||
* fence_context to reach this value before
|
|
||||||
* waiting for the sync file. */
|
|
||||||
|
|
||||||
uint64_t post_wait_value; /* IN Signal the semaphore represented by
|
|
||||||
* fence_context to this value after waiting
|
|
||||||
* for the sync file */
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_nvidia_semsurf_fence_attach_params {
|
|
||||||
uint32_t handle; /* IN GEM handle of buffer */
|
|
||||||
|
|
||||||
uint32_t fence_context_handle; /* IN GEM handle of fence context */
|
|
||||||
|
|
||||||
uint32_t timeout_value_ms; /* IN Timeout value in ms for the fence
|
|
||||||
* after which the fence will be signaled
|
|
||||||
* with its error status set to -ETIMEDOUT.
|
|
||||||
* Default timeout value is 5000ms */
|
|
||||||
|
|
||||||
uint32_t shared; /* IN If true, fence will reserve shared
|
|
||||||
* access to the buffer, otherwise it will
|
|
||||||
* reserve exclusive access */
|
|
||||||
|
|
||||||
uint64_t wait_value; /* IN Semaphore value to reach before signal */
|
|
||||||
};
|
|
||||||
|
|
||||||
struct drm_nvidia_get_drm_file_unique_id_params {
|
|
||||||
uint64_t id; /* OUT Unique ID of the DRM file */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _UAPI_NVIDIA_DRM_IOCTL_H_ */
|
#endif /* _UAPI_NVIDIA_DRM_IOCTL_H_ */
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2015-2023, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -21,6 +21,8 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/err.h>
|
||||||
|
|
||||||
#include "nvidia-drm-os-interface.h"
|
#include "nvidia-drm-os-interface.h"
|
||||||
#include "nvidia-drm.h"
|
#include "nvidia-drm.h"
|
||||||
@@ -29,18 +31,135 @@
|
|||||||
|
|
||||||
#if defined(NV_DRM_AVAILABLE)
|
#if defined(NV_DRM_AVAILABLE)
|
||||||
|
|
||||||
|
#if defined(NV_DRM_DRMP_H_PRESENT)
|
||||||
|
#include <drm/drmP.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <linux/vmalloc.h>
|
||||||
|
|
||||||
|
#include "nv-mm.h"
|
||||||
|
|
||||||
MODULE_PARM_DESC(
|
MODULE_PARM_DESC(
|
||||||
modeset,
|
modeset,
|
||||||
"Enable atomic kernel modesetting (1 = enable, 0 = disable (default))");
|
"Enable atomic kernel modesetting (1 = enable, 0 = disable (default))");
|
||||||
|
bool nv_drm_modeset_module_param = false;
|
||||||
module_param_named(modeset, nv_drm_modeset_module_param, bool, 0400);
|
module_param_named(modeset, nv_drm_modeset_module_param, bool, 0400);
|
||||||
|
|
||||||
#if defined(NV_DRM_FBDEV_AVAILABLE)
|
void *nv_drm_calloc(size_t nmemb, size_t size)
|
||||||
MODULE_PARM_DESC(
|
{
|
||||||
fbdev,
|
size_t total_size = nmemb * size;
|
||||||
"Create a framebuffer device (1 = enable (default), 0 = disable)");
|
//
|
||||||
module_param_named(fbdev, nv_drm_fbdev_module_param, bool, 0400);
|
// Check for overflow.
|
||||||
|
//
|
||||||
|
if ((nmemb != 0) && ((total_size / nmemb) != size))
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
return kzalloc(nmemb * size, GFP_KERNEL);
|
||||||
|
}
|
||||||
|
|
||||||
|
void nv_drm_free(void *ptr)
|
||||||
|
{
|
||||||
|
if (IS_ERR(ptr)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
kfree(ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
char *nv_drm_asprintf(const char *fmt, ...)
|
||||||
|
{
|
||||||
|
va_list ap;
|
||||||
|
char *p;
|
||||||
|
|
||||||
|
va_start(ap, fmt);
|
||||||
|
p = kvasprintf(GFP_KERNEL, fmt, ap);
|
||||||
|
va_end(ap);
|
||||||
|
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(NVCPU_X86) || defined(NVCPU_X86_64)
|
||||||
|
#define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory")
|
||||||
|
#elif defined(NVCPU_FAMILY_ARM)
|
||||||
|
#if defined(NVCPU_ARM)
|
||||||
|
#define WRITE_COMBINE_FLUSH() { dsb(); outer_sync(); }
|
||||||
|
#elif defined(NVCPU_AARCH64)
|
||||||
|
#define WRITE_COMBINE_FLUSH() mb()
|
||||||
|
#endif
|
||||||
|
#elif defined(NVCPU_PPC64LE)
|
||||||
|
#define WRITE_COMBINE_FLUSH() asm volatile("sync":::"memory")
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
void nv_drm_write_combine_flush(void)
|
||||||
|
{
|
||||||
|
WRITE_COMBINE_FLUSH();
|
||||||
|
}
|
||||||
|
|
||||||
|
int nv_drm_lock_user_pages(unsigned long address,
|
||||||
|
unsigned long pages_count, struct page ***pages)
|
||||||
|
{
|
||||||
|
struct mm_struct *mm = current->mm;
|
||||||
|
struct page **user_pages;
|
||||||
|
int pages_pinned;
|
||||||
|
|
||||||
|
user_pages = nv_drm_calloc(pages_count, sizeof(*user_pages));
|
||||||
|
|
||||||
|
if (user_pages == NULL) {
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
nv_mmap_read_lock(mm);
|
||||||
|
|
||||||
|
pages_pinned = NV_PIN_USER_PAGES(address, pages_count, FOLL_WRITE,
|
||||||
|
user_pages, NULL);
|
||||||
|
nv_mmap_read_unlock(mm);
|
||||||
|
|
||||||
|
if (pages_pinned < 0 || (unsigned)pages_pinned < pages_count) {
|
||||||
|
goto failed;
|
||||||
|
}
|
||||||
|
|
||||||
|
*pages = user_pages;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
failed:
|
||||||
|
|
||||||
|
if (pages_pinned > 0) {
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < pages_pinned; i++) {
|
||||||
|
NV_UNPIN_USER_PAGE(user_pages[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nv_drm_free(user_pages);
|
||||||
|
|
||||||
|
return (pages_pinned < 0) ? pages_pinned : -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages)
|
||||||
|
{
|
||||||
|
unsigned long i;
|
||||||
|
|
||||||
|
for (i = 0; i < pages_count; i++) {
|
||||||
|
set_page_dirty_lock(pages[i]);
|
||||||
|
NV_UNPIN_USER_PAGE(pages[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
nv_drm_free(pages);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *nv_drm_vmap(struct page **pages, unsigned long pages_count)
|
||||||
|
{
|
||||||
|
return vmap(pages, pages_count, VM_USERMAP, PAGE_KERNEL);
|
||||||
|
}
|
||||||
|
|
||||||
|
void nv_drm_vunmap(void *address)
|
||||||
|
{
|
||||||
|
vunmap(address);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* NV_DRM_AVAILABLE */
|
#endif /* NV_DRM_AVAILABLE */
|
||||||
|
|
||||||
/*************************************************************************
|
/*************************************************************************
|
||||||
|
|||||||
@@ -42,16 +42,6 @@
|
|||||||
#include <drm/drm_atomic_helper.h>
|
#include <drm/drm_atomic_helper.h>
|
||||||
#include <drm/drm_crtc.h>
|
#include <drm/drm_crtc.h>
|
||||||
|
|
||||||
#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST)
|
|
||||||
#include <linux/nvhost.h>
|
|
||||||
#elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT)
|
|
||||||
#include <linux/host1x-next.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(NV_DRM_FENCE_AVAILABLE)
|
|
||||||
#include "nvidia-dma-fence-helper.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct nv_drm_atomic_state {
|
struct nv_drm_atomic_state {
|
||||||
struct NvKmsKapiRequestedModeSetConfig config;
|
struct NvKmsKapiRequestedModeSetConfig config;
|
||||||
struct drm_atomic_state base;
|
struct drm_atomic_state base;
|
||||||
@@ -156,165 +146,6 @@ static int __nv_drm_put_back_post_fence_fd(
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(NV_DRM_FENCE_AVAILABLE)
|
|
||||||
struct nv_drm_plane_fence_cb_data {
|
|
||||||
nv_dma_fence_cb_t dma_fence_cb;
|
|
||||||
struct nv_drm_device *nv_dev;
|
|
||||||
NvU32 semaphore_index;
|
|
||||||
};
|
|
||||||
|
|
||||||
static void
|
|
||||||
__nv_drm_plane_fence_cb(
|
|
||||||
nv_dma_fence_t *fence,
|
|
||||||
nv_dma_fence_cb_t *cb_data
|
|
||||||
)
|
|
||||||
{
|
|
||||||
struct nv_drm_plane_fence_cb_data *fence_data =
|
|
||||||
container_of(cb_data, typeof(*fence_data), dma_fence_cb);
|
|
||||||
struct nv_drm_device *nv_dev = fence_data->nv_dev;
|
|
||||||
|
|
||||||
nv_dma_fence_put(fence);
|
|
||||||
nvKms->signalDisplaySemaphore(nv_dev->pDevice, fence_data->semaphore_index);
|
|
||||||
nv_drm_free(fence_data);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __nv_drm_convert_in_fences(
|
|
||||||
struct nv_drm_device *nv_dev,
|
|
||||||
struct drm_atomic_state *state,
|
|
||||||
struct drm_crtc *crtc,
|
|
||||||
struct drm_crtc_state *crtc_state)
|
|
||||||
{
|
|
||||||
struct drm_plane *plane = NULL;
|
|
||||||
struct drm_plane_state *plane_state = NULL;
|
|
||||||
struct nv_drm_plane *nv_plane = NULL;
|
|
||||||
struct NvKmsKapiLayerRequestedConfig *plane_req_config = NULL;
|
|
||||||
struct NvKmsKapiHeadRequestedConfig *head_req_config =
|
|
||||||
&to_nv_crtc_state(crtc_state)->req_config;
|
|
||||||
struct nv_drm_plane_fence_cb_data *fence_data;
|
|
||||||
uint32_t semaphore_index;
|
|
||||||
uint32_t idx_count;
|
|
||||||
int ret, i;
|
|
||||||
|
|
||||||
if (!crtc_state->active) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
nv_drm_for_each_new_plane_in_state(state, plane, plane_state, i) {
|
|
||||||
if ((plane->type == DRM_PLANE_TYPE_CURSOR) ||
|
|
||||||
(plane_state->crtc != crtc) ||
|
|
||||||
(plane_state->fence == NULL)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
nv_plane = to_nv_plane(plane);
|
|
||||||
plane_req_config =
|
|
||||||
&head_req_config->layerRequestedConfig[nv_plane->layer_idx];
|
|
||||||
|
|
||||||
if (nv_dev->supportsSyncpts) {
|
|
||||||
#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST)
|
|
||||||
#if defined(NV_NVHOST_DMA_FENCE_UNPACK_PRESENT)
|
|
||||||
int ret =
|
|
||||||
nvhost_dma_fence_unpack(
|
|
||||||
plane_state->fence,
|
|
||||||
&plane_req_config->config.syncParams.u.syncpt.preSyncptId,
|
|
||||||
&plane_req_config->config.syncParams.u.syncpt.preSyncptValue);
|
|
||||||
if (ret == 0) {
|
|
||||||
plane_req_config->config.syncParams.preSyncptSpecified = true;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT)
|
|
||||||
int ret =
|
|
||||||
host1x_fence_extract(
|
|
||||||
plane_state->fence,
|
|
||||||
&plane_req_config->config.syncParams.u.syncpt.preSyncptId,
|
|
||||||
&plane_req_config->config.syncParams.u.syncpt.preSyncptValue);
|
|
||||||
if (ret == 0) {
|
|
||||||
plane_req_config->config.syncParams.preSyncptSpecified = true;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Syncpt extraction failed, or syncpts are not supported.
|
|
||||||
* Use general DRM fence support with semaphores instead.
|
|
||||||
*/
|
|
||||||
if (plane_req_config->config.syncParams.postSyncptRequested) {
|
|
||||||
// Can't mix Syncpts and semaphores in a given request.
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (idx_count = 0; idx_count < nv_dev->display_semaphores.count; idx_count++) {
|
|
||||||
semaphore_index = nv_drm_next_display_semaphore(nv_dev);
|
|
||||||
if (nvKms->tryInitDisplaySemaphore(nv_dev->pDevice, semaphore_index)) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (idx_count == nv_dev->display_semaphores.count) {
|
|
||||||
NV_DRM_DEV_LOG_ERR(
|
|
||||||
nv_dev,
|
|
||||||
"Failed to initialize semaphore for plane fence");
|
|
||||||
/*
|
|
||||||
* This should only happen if the semaphore pool was somehow
|
|
||||||
* exhausted. Waiting a bit and retrying may help in that case.
|
|
||||||
*/
|
|
||||||
return -EAGAIN;
|
|
||||||
}
|
|
||||||
|
|
||||||
plane_req_config->config.syncParams.semaphoreSpecified = true;
|
|
||||||
plane_req_config->config.syncParams.u.semaphore.index = semaphore_index;
|
|
||||||
|
|
||||||
fence_data = nv_drm_calloc(1, sizeof(*fence_data));
|
|
||||||
|
|
||||||
if (!fence_data) {
|
|
||||||
NV_DRM_DEV_LOG_ERR(
|
|
||||||
nv_dev,
|
|
||||||
"Failed to allocate callback data for plane fence");
|
|
||||||
nvKms->cancelDisplaySemaphore(nv_dev->pDevice, semaphore_index);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
fence_data->nv_dev = nv_dev;
|
|
||||||
fence_data->semaphore_index = semaphore_index;
|
|
||||||
|
|
||||||
ret = nv_dma_fence_add_callback(plane_state->fence,
|
|
||||||
&fence_data->dma_fence_cb,
|
|
||||||
__nv_drm_plane_fence_cb);
|
|
||||||
|
|
||||||
switch (ret) {
|
|
||||||
case -ENOENT:
|
|
||||||
/* The fence is already signaled */
|
|
||||||
__nv_drm_plane_fence_cb(plane_state->fence,
|
|
||||||
&fence_data->dma_fence_cb);
|
|
||||||
#if defined(fallthrough)
|
|
||||||
fallthrough;
|
|
||||||
#else
|
|
||||||
/* Fallthrough */
|
|
||||||
#endif
|
|
||||||
case 0:
|
|
||||||
/*
|
|
||||||
* The plane state's fence reference has either been consumed or
|
|
||||||
* belongs to the outstanding callback now.
|
|
||||||
*/
|
|
||||||
plane_state->fence = NULL;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
NV_DRM_DEV_LOG_ERR(
|
|
||||||
nv_dev,
|
|
||||||
"Failed plane fence callback registration");
|
|
||||||
/* Fence callback registration failed */
|
|
||||||
nvKms->cancelDisplaySemaphore(nv_dev->pDevice, semaphore_index);
|
|
||||||
nv_drm_free(fence_data);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
|
|
||||||
|
|
||||||
static int __nv_drm_get_syncpt_data(
|
static int __nv_drm_get_syncpt_data(
|
||||||
struct nv_drm_device *nv_dev,
|
struct nv_drm_device *nv_dev,
|
||||||
struct drm_crtc *crtc,
|
struct drm_crtc *crtc,
|
||||||
@@ -406,39 +237,6 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
|
|||||||
int i;
|
int i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/*
|
|
||||||
* If sub-owner permission was granted to another NVKMS client, disallow
|
|
||||||
* modesets through the DRM interface.
|
|
||||||
*/
|
|
||||||
if (nv_dev->subOwnershipGranted) {
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if defined(NV_DRM_FRAMEBUFFER_OBJ_PRESENT)
|
|
||||||
if (commit) {
|
|
||||||
/*
|
|
||||||
* This function does what is necessary to prepare the framebuffers
|
|
||||||
* attached to each new plane in the state for scan out, mostly by
|
|
||||||
* calling back into driver callbacks the NVIDIA driver does not
|
|
||||||
* provide. The end result is that all it does on the NVIDIA driver
|
|
||||||
* is populate the plane state's dma fence pointers with any implicit
|
|
||||||
* sync fences attached to the GEM objects associated with those planes
|
|
||||||
* in the new state, prefering explicit sync fences when appropriate.
|
|
||||||
* This must be done prior to converting the per-plane fences to
|
|
||||||
* semaphore waits below.
|
|
||||||
*
|
|
||||||
* Note this only works when the drm_framebuffer:obj[] field is present
|
|
||||||
* and populated, so skip calling this function on kernels where that
|
|
||||||
* field is not present.
|
|
||||||
*/
|
|
||||||
ret = drm_atomic_helper_prepare_planes(dev, state);
|
|
||||||
|
|
||||||
if (ret) {
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif /* defined(NV_DRM_FRAMEBUFFER_OBJ_PRESENT) */
|
|
||||||
|
|
||||||
memset(requested_config, 0, sizeof(*requested_config));
|
memset(requested_config, 0, sizeof(*requested_config));
|
||||||
|
|
||||||
/* Loop over affected crtcs and construct NvKmsKapiRequestedModeSetConfig */
|
/* Loop over affected crtcs and construct NvKmsKapiRequestedModeSetConfig */
|
||||||
@@ -452,6 +250,11 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
|
|||||||
commit ? crtc->state : crtc_state;
|
commit ? crtc->state : crtc_state;
|
||||||
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
|
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
|
||||||
|
|
||||||
|
requested_config->headRequestedConfig[nv_crtc->head] =
|
||||||
|
to_nv_crtc_state(new_crtc_state)->req_config;
|
||||||
|
|
||||||
|
requested_config->headsMask |= 1 << nv_crtc->head;
|
||||||
|
|
||||||
if (commit) {
|
if (commit) {
|
||||||
struct drm_crtc_state *old_crtc_state = crtc_state;
|
struct drm_crtc_state *old_crtc_state = crtc_state;
|
||||||
struct nv_drm_crtc_state *nv_new_crtc_state =
|
struct nv_drm_crtc_state *nv_new_crtc_state =
|
||||||
@@ -471,27 +274,10 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
|
|||||||
|
|
||||||
nv_new_crtc_state->nv_flip = NULL;
|
nv_new_crtc_state->nv_flip = NULL;
|
||||||
}
|
}
|
||||||
|
#if defined(NV_DRM_CRTC_STATE_HAS_VRR_ENABLED)
|
||||||
#if defined(NV_DRM_FENCE_AVAILABLE)
|
requested_config->headRequestedConfig[nv_crtc->head].modeSetConfig.vrrEnabled = new_crtc_state->vrr_enabled;
|
||||||
ret = __nv_drm_convert_in_fences(nv_dev,
|
#endif
|
||||||
state,
|
|
||||||
crtc,
|
|
||||||
new_crtc_state);
|
|
||||||
|
|
||||||
if (ret != 0) {
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Do this deep copy after calling __nv_drm_convert_in_fences,
|
|
||||||
* which modifies the new CRTC state's req_config member
|
|
||||||
*/
|
|
||||||
requested_config->headRequestedConfig[nv_crtc->head] =
|
|
||||||
to_nv_crtc_state(new_crtc_state)->req_config;
|
|
||||||
|
|
||||||
requested_config->headsMask |= 1 << nv_crtc->head;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (commit && nvKms->systemInfo.bAllowWriteCombining) {
|
if (commit && nvKms->systemInfo.bAllowWriteCombining) {
|
||||||
@@ -506,9 +292,7 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
|
|||||||
requested_config,
|
requested_config,
|
||||||
&reply_config,
|
&reply_config,
|
||||||
commit)) {
|
commit)) {
|
||||||
if (commit || reply_config.flipResult != NV_KMS_FLIP_RESULT_IN_PROGRESS) {
|
return -EINVAL;
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (commit && nv_dev->supportsSyncpts) {
|
if (commit && nv_dev->supportsSyncpts) {
|
||||||
@@ -522,10 +306,6 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (commit && nv_dev->requiresVrrSemaphores && reply_config.vrrFlip) {
|
|
||||||
nvKms->signalVrrSemaphore(nv_dev->pDevice, reply_config.vrrSemaphoreIndex);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -534,24 +314,6 @@ int nv_drm_atomic_check(struct drm_device *dev,
|
|||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
|
|
||||||
struct drm_crtc *crtc;
|
|
||||||
struct drm_crtc_state *crtc_state;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
|
||||||
/*
|
|
||||||
* if the color management changed on the crtc, we need to update the
|
|
||||||
* crtc's plane's CSC matrices, so add the crtc's planes to the commit
|
|
||||||
*/
|
|
||||||
if (crtc_state->color_mgmt_changed) {
|
|
||||||
if ((ret = drm_atomic_add_affected_planes(state, crtc)) != 0) {
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif /* NV_DRM_COLOR_MGMT_AVAILABLE */
|
|
||||||
|
|
||||||
if ((ret = drm_atomic_helper_check(dev, state)) != 0) {
|
if ((ret = drm_atomic_helper_check(dev, state)) != 0) {
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
@@ -626,56 +388,42 @@ int nv_drm_atomic_commit(struct drm_device *dev,
|
|||||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX: drm_mode_config_funcs::atomic_commit() mandates to return -EBUSY
|
* drm_mode_config_funcs::atomic_commit() mandates to return -EBUSY
|
||||||
* for nonblocking commit if the commit would need to wait for previous
|
* for nonblocking commit if previous updates (commit tasks/flip event) are
|
||||||
* updates (commit tasks/flip event) to complete. In case of blocking
|
* pending. In case of blocking commits it mandates to wait for previous
|
||||||
* commits it mandates to wait for previous updates to complete. However,
|
* updates to complete.
|
||||||
* the kernel DRM-KMS documentation does explicitly allow maintaining a
|
|
||||||
* queue of outstanding commits.
|
|
||||||
*
|
|
||||||
* Our system already implements such a queue, but due to
|
|
||||||
* bug 4054608, it is currently not used.
|
|
||||||
*/
|
*/
|
||||||
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
if (nonblock) {
|
||||||
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
|
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
||||||
|
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Here you aren't required to hold nv_drm_crtc::flip_list_lock
|
* Here you aren't required to hold nv_drm_crtc::flip_list_lock
|
||||||
* because:
|
* because:
|
||||||
*
|
*
|
||||||
* The core DRM driver acquires lock for all affected crtcs before
|
* The core DRM driver acquires lock for all affected crtcs before
|
||||||
* calling into ->commit() hook, therefore it is not possible for
|
* calling into ->commit() hook, therefore it is not possible for
|
||||||
* other threads to call into ->commit() hook affecting same crtcs
|
* other threads to call into ->commit() hook affecting same crtcs
|
||||||
* and enqueue flip objects into flip_list -
|
* and enqueue flip objects into flip_list -
|
||||||
*
|
*
|
||||||
* nv_drm_atomic_commit_internal()
|
* nv_drm_atomic_commit_internal()
|
||||||
* |-> nv_drm_atomic_apply_modeset_config(commit=true)
|
* |-> nv_drm_atomic_apply_modeset_config(commit=true)
|
||||||
* |-> nv_drm_crtc_enqueue_flip()
|
* |-> nv_drm_crtc_enqueue_flip()
|
||||||
*
|
*
|
||||||
* Only possibility is list_empty check races with code path
|
* Only possibility is list_empty check races with code path
|
||||||
* dequeuing flip object -
|
* dequeuing flip object -
|
||||||
*
|
*
|
||||||
* __nv_drm_handle_flip_event()
|
* __nv_drm_handle_flip_event()
|
||||||
* |-> nv_drm_crtc_dequeue_flip()
|
* |-> nv_drm_crtc_dequeue_flip()
|
||||||
*
|
*
|
||||||
* But this race condition can't lead list_empty() to return
|
* But this race condition can't lead list_empty() to return
|
||||||
* incorrect result. nv_drm_crtc_dequeue_flip() in the middle of
|
* incorrect result. nv_drm_crtc_dequeue_flip() in the middle of
|
||||||
* updating the list could not trick us into thinking the list is
|
* updating the list could not trick us into thinking the list is
|
||||||
* empty when it isn't.
|
* empty when it isn't.
|
||||||
*/
|
*/
|
||||||
if (nonblock) {
|
|
||||||
if (!list_empty(&nv_crtc->flip_list)) {
|
if (!list_empty(&nv_crtc->flip_list)) {
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
if (wait_event_timeout(
|
|
||||||
nv_dev->flip_event_wq,
|
|
||||||
list_empty(&nv_crtc->flip_list),
|
|
||||||
3 * HZ /* 3 second */) == 0) {
|
|
||||||
NV_DRM_DEV_LOG_ERR(
|
|
||||||
nv_dev,
|
|
||||||
"Flip event timeout on head %u", nv_crtc->head);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -799,9 +547,6 @@ int nv_drm_atomic_commit(struct drm_device *dev,
|
|||||||
NV_DRM_DEV_LOG_ERR(
|
NV_DRM_DEV_LOG_ERR(
|
||||||
nv_dev,
|
nv_dev,
|
||||||
"Flip event timeout on head %u", nv_crtc->head);
|
"Flip event timeout on head %u", nv_crtc->head);
|
||||||
while (!list_empty(&nv_crtc->flip_list)) {
|
|
||||||
__nv_drm_handle_flip_event(nv_crtc);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,291 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2015-2023, NVIDIA CORPORATION. All rights reserved.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/slab.h>
|
|
||||||
|
|
||||||
#include "nvidia-drm-os-interface.h"
|
|
||||||
|
|
||||||
#if defined(NV_DRM_AVAILABLE)
|
|
||||||
|
|
||||||
#if defined(NV_LINUX_SYNC_FILE_H_PRESENT)
|
|
||||||
#include <linux/file.h>
|
|
||||||
#include <linux/sync_file.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <linux/vmalloc.h>
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/device.h>
|
|
||||||
|
|
||||||
#include "nv-mm.h"
|
|
||||||
|
|
||||||
#if defined(NV_DRM_DRMP_H_PRESENT)
|
|
||||||
#include <drm/drmP.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
bool nv_drm_modeset_module_param = false;
|
|
||||||
bool nv_drm_fbdev_module_param = true;
|
|
||||||
|
|
||||||
void *nv_drm_calloc(size_t nmemb, size_t size)
|
|
||||||
{
|
|
||||||
size_t total_size = nmemb * size;
|
|
||||||
//
|
|
||||||
// Check for overflow.
|
|
||||||
//
|
|
||||||
if ((nmemb != 0) && ((total_size / nmemb) != size))
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
return kzalloc(nmemb * size, GFP_KERNEL);
|
|
||||||
}
|
|
||||||
|
|
||||||
void nv_drm_free(void *ptr)
|
|
||||||
{
|
|
||||||
if (IS_ERR(ptr)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
kfree(ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
char *nv_drm_asprintf(const char *fmt, ...)
|
|
||||||
{
|
|
||||||
va_list ap;
|
|
||||||
char *p;
|
|
||||||
|
|
||||||
va_start(ap, fmt);
|
|
||||||
p = kvasprintf(GFP_KERNEL, fmt, ap);
|
|
||||||
va_end(ap);
|
|
||||||
|
|
||||||
return p;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if defined(NVCPU_X86) || defined(NVCPU_X86_64)
|
|
||||||
#define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory")
|
|
||||||
#elif defined(NVCPU_PPC64LE)
|
|
||||||
#define WRITE_COMBINE_FLUSH() asm volatile("sync":::"memory")
|
|
||||||
#else
|
|
||||||
#define WRITE_COMBINE_FLUSH() mb()
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void nv_drm_write_combine_flush(void)
|
|
||||||
{
|
|
||||||
WRITE_COMBINE_FLUSH();
|
|
||||||
}
|
|
||||||
|
|
||||||
int nv_drm_lock_user_pages(unsigned long address,
|
|
||||||
unsigned long pages_count, struct page ***pages)
|
|
||||||
{
|
|
||||||
struct mm_struct *mm = current->mm;
|
|
||||||
struct page **user_pages;
|
|
||||||
int pages_pinned;
|
|
||||||
|
|
||||||
user_pages = nv_drm_calloc(pages_count, sizeof(*user_pages));
|
|
||||||
|
|
||||||
if (user_pages == NULL) {
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
nv_mmap_read_lock(mm);
|
|
||||||
|
|
||||||
pages_pinned = NV_PIN_USER_PAGES(address, pages_count, FOLL_WRITE,
|
|
||||||
user_pages);
|
|
||||||
nv_mmap_read_unlock(mm);
|
|
||||||
|
|
||||||
if (pages_pinned < 0 || (unsigned)pages_pinned < pages_count) {
|
|
||||||
goto failed;
|
|
||||||
}
|
|
||||||
|
|
||||||
*pages = user_pages;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
failed:
|
|
||||||
|
|
||||||
if (pages_pinned > 0) {
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < pages_pinned; i++) {
|
|
||||||
NV_UNPIN_USER_PAGE(user_pages[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nv_drm_free(user_pages);
|
|
||||||
|
|
||||||
return (pages_pinned < 0) ? pages_pinned : -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages)
|
|
||||||
{
|
|
||||||
unsigned long i;
|
|
||||||
|
|
||||||
for (i = 0; i < pages_count; i++) {
|
|
||||||
set_page_dirty_lock(pages[i]);
|
|
||||||
NV_UNPIN_USER_PAGE(pages[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
nv_drm_free(pages);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* linuxkpi vmap doesn't use the flags argument as it
|
|
||||||
* doesn't seem to be needed. Define VM_USERMAP to 0
|
|
||||||
* to make errors go away
|
|
||||||
*
|
|
||||||
* vmap: sys/compat/linuxkpi/common/src/linux_compat.c
|
|
||||||
*/
|
|
||||||
#if defined(NV_BSD)
|
|
||||||
#define VM_USERMAP 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void *nv_drm_vmap(struct page **pages, unsigned long pages_count, bool cached)
|
|
||||||
{
|
|
||||||
pgprot_t prot = PAGE_KERNEL;
|
|
||||||
|
|
||||||
if (!cached) {
|
|
||||||
prot = pgprot_noncached(PAGE_KERNEL);
|
|
||||||
}
|
|
||||||
|
|
||||||
return vmap(pages, pages_count, VM_USERMAP, prot);
|
|
||||||
}
|
|
||||||
|
|
||||||
void nv_drm_vunmap(void *address)
|
|
||||||
{
|
|
||||||
vunmap(address);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool nv_drm_workthread_init(nv_drm_workthread *worker, const char *name)
|
|
||||||
{
|
|
||||||
worker->shutting_down = false;
|
|
||||||
if (nv_kthread_q_init(&worker->q, name)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock_init(&worker->lock);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void nv_drm_workthread_shutdown(nv_drm_workthread *worker)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&worker->lock, flags);
|
|
||||||
worker->shutting_down = true;
|
|
||||||
spin_unlock_irqrestore(&worker->lock, flags);
|
|
||||||
|
|
||||||
nv_kthread_q_stop(&worker->q);
|
|
||||||
}
|
|
||||||
|
|
||||||
void nv_drm_workthread_work_init(nv_drm_work *work,
|
|
||||||
void (*callback)(void *),
|
|
||||||
void *arg)
|
|
||||||
{
|
|
||||||
nv_kthread_q_item_init(work, callback, arg);
|
|
||||||
}
|
|
||||||
|
|
||||||
int nv_drm_workthread_add_work(nv_drm_workthread *worker, nv_drm_work *work)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&worker->lock, flags);
|
|
||||||
if (!worker->shutting_down) {
|
|
||||||
ret = nv_kthread_q_schedule_q_item(&worker->q, work);
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&worker->lock, flags);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
void nv_drm_timer_setup(nv_drm_timer *timer, void (*callback)(nv_drm_timer *nv_drm_timer))
|
|
||||||
{
|
|
||||||
nv_timer_setup(timer, callback);
|
|
||||||
}
|
|
||||||
|
|
||||||
void nv_drm_mod_timer(nv_drm_timer *timer, unsigned long timeout_native)
|
|
||||||
{
|
|
||||||
mod_timer(&timer->kernel_timer, timeout_native);
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned long nv_drm_timer_now(void)
|
|
||||||
{
|
|
||||||
return jiffies;
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms)
|
|
||||||
{
|
|
||||||
return jiffies + msecs_to_jiffies(relative_timeout_ms);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool nv_drm_del_timer_sync(nv_drm_timer *timer)
|
|
||||||
{
|
|
||||||
if (del_timer_sync(&timer->kernel_timer)) {
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#if defined(NV_DRM_FENCE_AVAILABLE)
|
|
||||||
int nv_drm_create_sync_file(nv_dma_fence_t *fence)
|
|
||||||
{
|
|
||||||
#if defined(NV_LINUX_SYNC_FILE_H_PRESENT)
|
|
||||||
struct sync_file *sync;
|
|
||||||
int fd = get_unused_fd_flags(O_CLOEXEC);
|
|
||||||
|
|
||||||
if (fd < 0) {
|
|
||||||
return fd;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* sync_file_create() generates its own reference to the fence */
|
|
||||||
sync = sync_file_create(fence);
|
|
||||||
|
|
||||||
if (IS_ERR(sync)) {
|
|
||||||
put_unused_fd(fd);
|
|
||||||
return PTR_ERR(sync);
|
|
||||||
}
|
|
||||||
|
|
||||||
fd_install(fd, sync->file);
|
|
||||||
|
|
||||||
return fd;
|
|
||||||
#else /* defined(NV_LINUX_SYNC_FILE_H_PRESENT) */
|
|
||||||
return -EINVAL;
|
|
||||||
#endif /* defined(NV_LINUX_SYNC_FILE_H_PRESENT) */
|
|
||||||
}
|
|
||||||
|
|
||||||
nv_dma_fence_t *nv_drm_sync_file_get_fence(int fd)
|
|
||||||
{
|
|
||||||
#if defined(NV_SYNC_FILE_GET_FENCE_PRESENT)
|
|
||||||
return sync_file_get_fence(fd);
|
|
||||||
#else /* defined(NV_SYNC_FILE_GET_FENCE_PRESENT) */
|
|
||||||
return NULL;
|
|
||||||
#endif /* defined(NV_SYNC_FILE_GET_FENCE_PRESENT) */
|
|
||||||
}
|
|
||||||
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
|
|
||||||
|
|
||||||
void nv_drm_yield(void)
|
|
||||||
{
|
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
|
||||||
schedule_timeout(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* NV_DRM_AVAILABLE */
|
|
||||||
@@ -29,53 +29,10 @@
|
|||||||
|
|
||||||
#if defined(NV_DRM_AVAILABLE)
|
#if defined(NV_DRM_AVAILABLE)
|
||||||
|
|
||||||
#if defined(NV_DRM_FENCE_AVAILABLE)
|
|
||||||
#include "nvidia-dma-fence-helper.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(NV_LINUX) || defined(NV_BSD)
|
|
||||||
#include "nv-kthread-q.h"
|
|
||||||
#include "linux/spinlock.h"
|
|
||||||
|
|
||||||
typedef struct nv_drm_workthread {
|
|
||||||
spinlock_t lock;
|
|
||||||
struct nv_kthread_q q;
|
|
||||||
bool shutting_down;
|
|
||||||
} nv_drm_workthread;
|
|
||||||
|
|
||||||
typedef nv_kthread_q_item_t nv_drm_work;
|
|
||||||
|
|
||||||
#else
|
|
||||||
#error "Need to define deferred work primitives for this OS"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(NV_LINUX) || defined(NV_BSD)
|
|
||||||
#include "nv-timer.h"
|
|
||||||
|
|
||||||
typedef struct nv_timer nv_drm_timer;
|
|
||||||
|
|
||||||
#else
|
|
||||||
#error "Need to define kernel timer callback primitives for this OS"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(NV_DRM_FBDEV_GENERIC_SETUP_PRESENT) && defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
|
|
||||||
#define NV_DRM_FBDEV_AVAILABLE
|
|
||||||
#define NV_DRM_FBDEV_GENERIC_AVAILABLE
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(NV_DRM_FBDEV_TTM_SETUP_PRESENT) && defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
|
|
||||||
#define NV_DRM_FBDEV_AVAILABLE
|
|
||||||
#define NV_DRM_FBDEV_TTM_AVAILABLE
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct page;
|
struct page;
|
||||||
|
|
||||||
/* Set to true when the atomic modeset feature is enabled. */
|
/* Set to true when the atomic modeset feature is enabled. */
|
||||||
extern bool nv_drm_modeset_module_param;
|
extern bool nv_drm_modeset_module_param;
|
||||||
#if defined(NV_DRM_FBDEV_AVAILABLE)
|
|
||||||
/* Set to true when the nvidia-drm driver should install a framebuffer device */
|
|
||||||
extern bool nv_drm_fbdev_module_param;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void *nv_drm_calloc(size_t nmemb, size_t size);
|
void *nv_drm_calloc(size_t nmemb, size_t size);
|
||||||
|
|
||||||
@@ -90,41 +47,10 @@ int nv_drm_lock_user_pages(unsigned long address,
|
|||||||
|
|
||||||
void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages);
|
void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages);
|
||||||
|
|
||||||
void *nv_drm_vmap(struct page **pages, unsigned long pages_count, bool cached);
|
void *nv_drm_vmap(struct page **pages, unsigned long pages_count);
|
||||||
|
|
||||||
void nv_drm_vunmap(void *address);
|
void nv_drm_vunmap(void *address);
|
||||||
|
|
||||||
bool nv_drm_workthread_init(nv_drm_workthread *worker, const char *name);
|
#endif
|
||||||
|
|
||||||
/* Can be called concurrently with nv_drm_workthread_add_work() */
|
|
||||||
void nv_drm_workthread_shutdown(nv_drm_workthread *worker);
|
|
||||||
|
|
||||||
void nv_drm_workthread_work_init(nv_drm_work *work,
|
|
||||||
void (*callback)(void *),
|
|
||||||
void *arg);
|
|
||||||
|
|
||||||
/* Can be called concurrently with nv_drm_workthread_shutdown() */
|
|
||||||
int nv_drm_workthread_add_work(nv_drm_workthread *worker, nv_drm_work *work);
|
|
||||||
|
|
||||||
void nv_drm_timer_setup(nv_drm_timer *timer,
|
|
||||||
void (*callback)(nv_drm_timer *nv_drm_timer));
|
|
||||||
|
|
||||||
void nv_drm_mod_timer(nv_drm_timer *timer, unsigned long relative_timeout_ms);
|
|
||||||
|
|
||||||
bool nv_drm_del_timer_sync(nv_drm_timer *timer);
|
|
||||||
|
|
||||||
unsigned long nv_drm_timer_now(void);
|
|
||||||
|
|
||||||
unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms);
|
|
||||||
|
|
||||||
#if defined(NV_DRM_FENCE_AVAILABLE)
|
|
||||||
int nv_drm_create_sync_file(nv_dma_fence_t *fence);
|
|
||||||
|
|
||||||
nv_dma_fence_t *nv_drm_sync_file_get_fence(int fd);
|
|
||||||
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
|
|
||||||
|
|
||||||
void nv_drm_yield(void);
|
|
||||||
|
|
||||||
#endif /* defined(NV_DRM_AVAILABLE) */
|
|
||||||
|
|
||||||
#endif /* __NVIDIA_DRM_OS_INTERFACE_H__ */
|
#endif /* __NVIDIA_DRM_OS_INTERFACE_H__ */
|
||||||
|
|||||||
@@ -46,33 +46,12 @@
|
|||||||
#define NV_DRM_LOG_ERR(__fmt, ...) \
|
#define NV_DRM_LOG_ERR(__fmt, ...) \
|
||||||
DRM_ERROR("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
|
DRM_ERROR("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
|
||||||
|
|
||||||
/*
|
|
||||||
* DRM_WARN() was added in v4.9 by kernel commit
|
|
||||||
* 30b0da8d556e65ff935a56cd82c05ba0516d3e4a
|
|
||||||
*
|
|
||||||
* Before this commit, only DRM_INFO and DRM_ERROR were defined and
|
|
||||||
* DRM_INFO(fmt, ...) was defined as
|
|
||||||
* printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__). So, if
|
|
||||||
* DRM_WARN is undefined this defines NV_DRM_LOG_WARN following the
|
|
||||||
* same pattern as DRM_INFO.
|
|
||||||
*/
|
|
||||||
#ifdef DRM_WARN
|
|
||||||
#define NV_DRM_LOG_WARN(__fmt, ...) \
|
|
||||||
DRM_WARN("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
|
|
||||||
#else
|
|
||||||
#define NV_DRM_LOG_WARN(__fmt, ...) \
|
|
||||||
printk(KERN_WARNING "[" DRM_NAME "] [nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define NV_DRM_LOG_INFO(__fmt, ...) \
|
#define NV_DRM_LOG_INFO(__fmt, ...) \
|
||||||
DRM_INFO("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
|
DRM_INFO("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
|
||||||
|
|
||||||
#define NV_DRM_DEV_LOG_INFO(__dev, __fmt, ...) \
|
#define NV_DRM_DEV_LOG_INFO(__dev, __fmt, ...) \
|
||||||
NV_DRM_LOG_INFO("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
|
NV_DRM_LOG_INFO("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
|
||||||
|
|
||||||
#define NV_DRM_DEV_LOG_WARN(__dev, __fmt, ...) \
|
|
||||||
NV_DRM_LOG_WARN("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
|
|
||||||
|
|
||||||
#define NV_DRM_DEV_LOG_ERR(__dev, __fmt, ...) \
|
#define NV_DRM_DEV_LOG_ERR(__dev, __fmt, ...) \
|
||||||
NV_DRM_LOG_ERR("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
|
NV_DRM_LOG_ERR("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
|
||||||
|
|
||||||
@@ -126,7 +105,6 @@ struct nv_drm_device {
|
|||||||
NvU64 modifiers[6 /* block linear */ + 1 /* linear */ + 1 /* terminator */];
|
NvU64 modifiers[6 /* block linear */ + 1 /* linear */ + 1 /* terminator */];
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct delayed_work hotplug_event_work;
|
|
||||||
atomic_t enable_event_handling;
|
atomic_t enable_event_handling;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -139,64 +117,20 @@ struct nv_drm_device {
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(NV_DRM_FENCE_AVAILABLE)
|
|
||||||
NvU64 semsurf_stride;
|
|
||||||
NvU64 semsurf_max_submitted_offset;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
NvBool hasVideoMemory;
|
NvBool hasVideoMemory;
|
||||||
|
|
||||||
NvBool supportsSyncpts;
|
NvBool supportsSyncpts;
|
||||||
NvBool requiresVrrSemaphores;
|
|
||||||
NvBool subOwnershipGranted;
|
|
||||||
NvBool hasFramebufferConsole;
|
|
||||||
|
|
||||||
struct drm_property *nv_out_fence_property;
|
struct drm_property *nv_out_fence_property;
|
||||||
struct drm_property *nv_input_colorspace_property;
|
struct drm_property *nv_input_colorspace_property;
|
||||||
|
|
||||||
struct {
|
|
||||||
NvU32 count;
|
|
||||||
NvU32 next_index;
|
|
||||||
} display_semaphores;
|
|
||||||
|
|
||||||
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
|
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
|
||||||
struct drm_property *nv_hdr_output_metadata_property;
|
struct drm_property *nv_hdr_output_metadata_property;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct drm_property *nv_plane_lms_ctm_property;
|
|
||||||
struct drm_property *nv_plane_lms_to_itp_ctm_property;
|
|
||||||
struct drm_property *nv_plane_itp_to_lms_ctm_property;
|
|
||||||
struct drm_property *nv_plane_blend_ctm_property;
|
|
||||||
|
|
||||||
struct drm_property *nv_plane_degamma_tf_property;
|
|
||||||
struct drm_property *nv_plane_degamma_lut_property;
|
|
||||||
struct drm_property *nv_plane_degamma_lut_size_property;
|
|
||||||
struct drm_property *nv_plane_degamma_multiplier_property;
|
|
||||||
|
|
||||||
struct drm_property *nv_plane_tmo_lut_property;
|
|
||||||
struct drm_property *nv_plane_tmo_lut_size_property;
|
|
||||||
|
|
||||||
struct drm_property *nv_crtc_regamma_tf_property;
|
|
||||||
struct drm_property *nv_crtc_regamma_lut_property;
|
|
||||||
struct drm_property *nv_crtc_regamma_lut_size_property;
|
|
||||||
struct drm_property *nv_crtc_regamma_divisor_property;
|
|
||||||
|
|
||||||
struct nv_drm_device *next;
|
struct nv_drm_device *next;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline NvU32 nv_drm_next_display_semaphore(
|
|
||||||
struct nv_drm_device *nv_dev)
|
|
||||||
{
|
|
||||||
NvU32 current_index = nv_dev->display_semaphores.next_index++;
|
|
||||||
|
|
||||||
if (nv_dev->display_semaphores.next_index >=
|
|
||||||
nv_dev->display_semaphores.count) {
|
|
||||||
nv_dev->display_semaphores.next_index = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return current_index;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct nv_drm_device *to_nv_device(
|
static inline struct nv_drm_device *to_nv_device(
|
||||||
struct drm_device *dev)
|
struct drm_device *dev)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,148 +0,0 @@
|
|||||||
###########################################################################
|
|
||||||
# Kbuild fragment for nvidia-drm.ko
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
#
|
|
||||||
# Define NVIDIA_DRM_SOURCES
|
|
||||||
#
|
|
||||||
|
|
||||||
NVIDIA_DRM_SOURCES =
|
|
||||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm.c
|
|
||||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-drv.c
|
|
||||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-utils.c
|
|
||||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-crtc.c
|
|
||||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-encoder.c
|
|
||||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-connector.c
|
|
||||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem.c
|
|
||||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fb.c
|
|
||||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-modeset.c
|
|
||||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fence.c
|
|
||||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-helper.c
|
|
||||||
NVIDIA_DRM_SOURCES += nvidia-drm/nv-kthread-q.c
|
|
||||||
NVIDIA_DRM_SOURCES += nvidia-drm/nv-pci-table.c
|
|
||||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-nvkms-memory.c
|
|
||||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-user-memory.c
|
|
||||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-dma-buf.c
|
|
||||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-format.c
|
|
||||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-os-interface.c
|
|
||||||
|
|
||||||
#
|
|
||||||
# Register the conftests needed by nvidia-drm.ko
|
|
||||||
#
|
|
||||||
|
|
||||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available
|
|
||||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_atomic_available
|
|
||||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_inc
|
|
||||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_dec_and_test
|
|
||||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_alpha_blending_available
|
|
||||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_fd_to_handle
|
|
||||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_handle_to_fd
|
|
||||||
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_unref
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_reinit_primary_mode_group
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages_remote
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages_remote
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_lookup
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_state_ref_counting
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_driver_has_gem_prime_res_obj
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_helper_connector_dpms
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_funcs_have_mode_in_name
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_has_vrr_capable_property
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_framebuffer_get
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_put
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_format_num_planes
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_for_each_possible_encoder
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_rotation_available
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_vma_offset_exact_lookup_locked
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += nvhost_dma_fence_unpack
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_fence_set_error
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += fence_set_error
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += sync_file_get_fence
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffers
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += aperture_remove_conflicting_devices
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += aperture_remove_conflicting_pci_devices
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_fbdev_generic_setup
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_fbdev_ttm_setup
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_client_setup
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_attach_hdr_output_metadata_property
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_helper_crtc_enable_color_mgmt
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_crtc_enable_color_mgmt
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_helper_legacy_gamma_set
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_mixed
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pfn_to_pfn_t
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_prime_mmap
|
|
||||||
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_present
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_bus_type
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_get_irq
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_get_name
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_device_list
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_legacy_dev_list
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_set_busid
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_connectors_changed
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_init_function_args
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_helper_mode_fill_fb_struct
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_master_drop_has_from_release_arg
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_unload_has_int_return_type
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_has_address
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_crtc_destroy_state_has_crtc_arg
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_plane_destroy_state_has_plane_arg
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_object_find_has_file_priv_arg
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_buf_owner
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_list_iter
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_swap_state_has_stall_arg
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_prime_flag_present
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_t
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_has_resv
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_async_flip
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_pageflip_flags
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_vrr_enabled
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_format_modifiers_present
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_node_is_allowed_has_tag_arg
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_offset_node_has_readonly
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_display_mode_has_vrefresh
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_master_set_has_int_return_type
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_free_object
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_prime_pages_to_sg_has_drm_device_arg
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_callbacks
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_atomic_check_has_atomic_state_arg
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_vmap_has_map_arg
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_plane_atomic_check_has_atomic_state_arg
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_device_has_pdev
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_no_vblank
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_config_has_allow_fb_modifiers
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_has_hdr_output_metadata
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_add_fence
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_reserve_fences
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += reservation_object_reserve_shared_has_num_fences_arg
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_has_override_edid
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_master_has_leases
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_file_get_master
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_modeset_lock_all_end
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_lookup
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_put
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_dumb_destroy
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += fence_ops_use_64bit_seqno
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffers_has_driver_arg
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_create_dp_colorspace_property_has_supported_colorspaces_arg
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_syncobj_features_present
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_unlocked_ioctl_flag_present
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_framebuffer_obj_present
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_color_ctm_3x4_present
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_color_lut
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_property_blob_put
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_mmap
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_output_poll_changed
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_date
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += file_operations_fop_unsigned_offset_present
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_helper_funcs_mode_valid_has_const_mode_arg
|
|
||||||
@@ -2,16 +2,29 @@
|
|||||||
# Kbuild fragment for nvidia-drm.ko
|
# Kbuild fragment for nvidia-drm.ko
|
||||||
###########################################################################
|
###########################################################################
|
||||||
|
|
||||||
# Get our source file list and conftest list from the common file
|
|
||||||
include $(src)/nvidia-drm/nvidia-drm-sources.mk
|
|
||||||
|
|
||||||
# Linux-specific sources
|
|
||||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-linux.c
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Define NVIDIA_DRM_{SOURCES,OBJECTS}
|
# Define NVIDIA_DRM_{SOURCES,OBJECTS}
|
||||||
#
|
#
|
||||||
|
|
||||||
|
NVIDIA_DRM_SOURCES =
|
||||||
|
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm.c
|
||||||
|
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-drv.c
|
||||||
|
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-utils.c
|
||||||
|
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-crtc.c
|
||||||
|
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-encoder.c
|
||||||
|
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-connector.c
|
||||||
|
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem.c
|
||||||
|
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fb.c
|
||||||
|
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-modeset.c
|
||||||
|
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fence.c
|
||||||
|
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-linux.c
|
||||||
|
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-helper.c
|
||||||
|
NVIDIA_DRM_SOURCES += nvidia-drm/nv-pci-table.c
|
||||||
|
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-nvkms-memory.c
|
||||||
|
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-user-memory.c
|
||||||
|
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-dma-buf.c
|
||||||
|
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-format.c
|
||||||
|
|
||||||
NVIDIA_DRM_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_DRM_SOURCES))
|
NVIDIA_DRM_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_DRM_SOURCES))
|
||||||
|
|
||||||
obj-m += nvidia-drm.o
|
obj-m += nvidia-drm.o
|
||||||
@@ -30,4 +43,95 @@ NVIDIA_DRM_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_BUILD_MODULE_INSTANCES=0
|
|||||||
|
|
||||||
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_DRM_OBJECTS), $(NVIDIA_DRM_CFLAGS))
|
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_DRM_OBJECTS), $(NVIDIA_DRM_CFLAGS))
|
||||||
|
|
||||||
|
#
|
||||||
|
# Register the conftests needed by nvidia-drm.ko
|
||||||
|
#
|
||||||
|
|
||||||
NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_DRM_OBJECTS)
|
NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_DRM_OBJECTS)
|
||||||
|
|
||||||
|
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available
|
||||||
|
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_atomic_available
|
||||||
|
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_inc
|
||||||
|
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_dec_and_test
|
||||||
|
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_alpha_blending_available
|
||||||
|
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_fd_to_handle
|
||||||
|
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_handle_to_fd
|
||||||
|
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_unref
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_reinit_primary_mode_group
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages_remote
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages_remote
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_lookup
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_state_ref_counting
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_driver_has_gem_prime_res_obj
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_helper_connector_dpms
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_funcs_have_mode_in_name
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_has_vrr_capable_property
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_framebuffer_get
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_put
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_format_num_planes
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_for_each_possible_encoder
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_rotation_available
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_vma_offset_exact_lookup_locked
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += nvhost_dma_fence_unpack
|
||||||
|
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_present
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_bus_type
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_get_irq
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_get_name
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_device_list
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_legacy_dev_list
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_set_busid
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_connectors_changed
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_init_function_args
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_helper_mode_fill_fb_struct
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_master_drop_has_from_release_arg
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_unload_has_int_return_type
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_has_address
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_crtc_destroy_state_has_crtc_arg
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_plane_destroy_state_has_plane_arg
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_object_find_has_file_priv_arg
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_buf_owner
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_list_iter
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_swap_state_has_stall_arg
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_prime_flag_present
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_t
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_has_resv
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_async_flip
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_pageflip_flags
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_vrr_enabled
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_format_modifiers_present
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_node_is_allowed_has_tag_arg
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_offset_node_has_readonly
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_display_mode_has_vrefresh
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_master_set_has_int_return_type
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_free_object
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_prime_pages_to_sg_has_drm_device_arg
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_callbacks
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_atomic_check_has_atomic_state_arg
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_vmap_has_map_arg
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_plane_atomic_check_has_atomic_state_arg
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_device_has_pdev
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_no_vblank
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_config_has_allow_fb_modifiers
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_has_hdr_output_metadata
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_add_fence
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_reserve_fences
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += reservation_object_reserve_shared_has_num_fences_arg
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_has_override_edid
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_master_has_leases
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_file_get_master
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_modeset_lock_all_end
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_lookup
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_put
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_dumb_destroy
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_unlocked_ioctl_flag_present
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_output_poll_changed
|
||||||
|
|||||||
@@ -45,7 +45,6 @@ int nv_drm_init(void)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
nvKms->setSuspendResumeCallback(nv_drm_suspend_resume);
|
|
||||||
return nv_drm_probe_devices();
|
return nv_drm_probe_devices();
|
||||||
#else
|
#else
|
||||||
return 0;
|
return 0;
|
||||||
@@ -55,7 +54,6 @@ int nv_drm_init(void)
|
|||||||
void nv_drm_exit(void)
|
void nv_drm_exit(void)
|
||||||
{
|
{
|
||||||
#if defined(NV_DRM_AVAILABLE)
|
#if defined(NV_DRM_AVAILABLE)
|
||||||
nvKms->setSuspendResumeCallback(NULL);
|
|
||||||
nv_drm_remove_devices();
|
nv_drm_remove_devices();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -201,7 +201,7 @@ static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
|
|||||||
|
|
||||||
// Ran out of attempts - return thread even if its stack may not be
|
// Ran out of attempts - return thread even if its stack may not be
|
||||||
// allocated on the preferred node
|
// allocated on the preferred node
|
||||||
if (i == (attempts - 1))
|
if ((i == (attempts - 1)))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
// Get the NUMA node where the first page of the stack is resident. If
|
// Get the NUMA node where the first page of the stack is resident. If
|
||||||
@@ -247,11 +247,6 @@ int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferr
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname)
|
|
||||||
{
|
|
||||||
return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true (non-zero) if the item was actually scheduled, and false if the
|
// Returns true (non-zero) if the item was actually scheduled, and false if the
|
||||||
// item was already pending in a queue.
|
// item was already pending in a queue.
|
||||||
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)
|
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
* SPDX-FileCopyrightText: Copyright (c) 2015-21 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@@ -35,13 +35,12 @@
|
|||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/rwsem.h>
|
#include <linux/rwsem.h>
|
||||||
#include <linux/freezer.h>
|
#include <linux/freezer.h>
|
||||||
#include <linux/poll.h>
|
|
||||||
#include <linux/cdev.h>
|
|
||||||
|
|
||||||
#include <acpi/video.h>
|
#include <acpi/video.h>
|
||||||
|
|
||||||
#include "nvstatus.h"
|
#include "nvstatus.h"
|
||||||
|
|
||||||
|
#include "nv-register-module.h"
|
||||||
#include "nv-modeset-interface.h"
|
#include "nv-modeset-interface.h"
|
||||||
#include "nv-kref.h"
|
#include "nv-kref.h"
|
||||||
|
|
||||||
@@ -54,7 +53,6 @@
|
|||||||
#include "nv-kthread-q.h"
|
#include "nv-kthread-q.h"
|
||||||
#include "nv-time.h"
|
#include "nv-time.h"
|
||||||
#include "nv-lock.h"
|
#include "nv-lock.h"
|
||||||
#include "nv-chardev-numbers.h"
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Commit aefb2f2e619b ("x86/bugs: Rename CONFIG_RETPOLINE =>
|
* Commit aefb2f2e619b ("x86/bugs: Rename CONFIG_RETPOLINE =>
|
||||||
@@ -71,27 +69,12 @@
|
|||||||
static bool output_rounding_fix = true;
|
static bool output_rounding_fix = true;
|
||||||
module_param_named(output_rounding_fix, output_rounding_fix, bool, 0400);
|
module_param_named(output_rounding_fix, output_rounding_fix, bool, 0400);
|
||||||
|
|
||||||
static bool disable_hdmi_frl = false;
|
|
||||||
module_param_named(disable_hdmi_frl, disable_hdmi_frl, bool, 0400);
|
|
||||||
|
|
||||||
static bool disable_vrr_memclk_switch = false;
|
static bool disable_vrr_memclk_switch = false;
|
||||||
module_param_named(disable_vrr_memclk_switch, disable_vrr_memclk_switch, bool, 0400);
|
module_param_named(disable_vrr_memclk_switch, disable_vrr_memclk_switch, bool, 0400);
|
||||||
|
|
||||||
static bool hdmi_deepcolor = true;
|
|
||||||
module_param_named(hdmi_deepcolor, hdmi_deepcolor, bool, 0400);
|
|
||||||
|
|
||||||
static bool vblank_sem_control = true;
|
|
||||||
module_param_named(vblank_sem_control, vblank_sem_control, bool, 0400);
|
|
||||||
|
|
||||||
static bool opportunistic_display_sync = true;
|
static bool opportunistic_display_sync = true;
|
||||||
module_param_named(opportunistic_display_sync, opportunistic_display_sync, bool, 0400);
|
module_param_named(opportunistic_display_sync, opportunistic_display_sync, bool, 0400);
|
||||||
|
|
||||||
static enum NvKmsDebugForceColorSpace debug_force_color_space = NVKMS_DEBUG_FORCE_COLOR_SPACE_NONE;
|
|
||||||
module_param_named(debug_force_color_space, debug_force_color_space, uint, 0400);
|
|
||||||
|
|
||||||
static bool enable_overlay_layers = true;
|
|
||||||
module_param_named(enable_overlay_layers, enable_overlay_layers, bool, 0400);
|
|
||||||
|
|
||||||
/* These parameters are used for fault injection tests. Normally the defaults
|
/* These parameters are used for fault injection tests. Normally the defaults
|
||||||
* should be used. */
|
* should be used. */
|
||||||
MODULE_PARM_DESC(fail_malloc, "Fail the Nth call to nvkms_alloc");
|
MODULE_PARM_DESC(fail_malloc, "Fail the Nth call to nvkms_alloc");
|
||||||
@@ -102,97 +85,32 @@ MODULE_PARM_DESC(malloc_verbose, "Report information about malloc calls on modul
|
|||||||
static bool malloc_verbose = false;
|
static bool malloc_verbose = false;
|
||||||
module_param_named(malloc_verbose, malloc_verbose, bool, 0400);
|
module_param_named(malloc_verbose, malloc_verbose, bool, 0400);
|
||||||
|
|
||||||
/* Fail allocating the RM core channel for NVKMS using the i-th method (see
|
|
||||||
* FailAllocCoreChannelMethod). Failures not using the i-th method are ignored. */
|
|
||||||
MODULE_PARM_DESC(fail_alloc_core_channel, "Control testing for hardware core channel allocation failure");
|
|
||||||
static int fail_alloc_core_channel_method = -1;
|
|
||||||
module_param_named(fail_alloc_core_channel, fail_alloc_core_channel_method, int, 0400);
|
|
||||||
|
|
||||||
#if NVKMS_CONFIG_FILE_SUPPORTED
|
|
||||||
/* This parameter is used to find the dpy override conf file */
|
/* This parameter is used to find the dpy override conf file */
|
||||||
#define NVKMS_CONF_FILE_SPECIFIED (nvkms_conf != NULL)
|
#define NVKMS_CONF_FILE_SPECIFIED (nvkms_conf != NULL)
|
||||||
|
|
||||||
MODULE_PARM_DESC(config_file,
|
MODULE_PARM_DESC(config_file,
|
||||||
"Path to the nvidia-modeset configuration file (default: disabled)");
|
"Path to the nvidia-modeset configuration file "
|
||||||
|
"(default: disabled)");
|
||||||
static char *nvkms_conf = NULL;
|
static char *nvkms_conf = NULL;
|
||||||
module_param_named(config_file, nvkms_conf, charp, 0400);
|
module_param_named(config_file, nvkms_conf, charp, 0400);
|
||||||
#endif
|
|
||||||
|
|
||||||
static atomic_t nvkms_alloc_called_count;
|
static atomic_t nvkms_alloc_called_count;
|
||||||
|
|
||||||
NvBool nvkms_test_fail_alloc_core_channel(
|
|
||||||
enum FailAllocCoreChannelMethod method
|
|
||||||
)
|
|
||||||
{
|
|
||||||
if (method != fail_alloc_core_channel_method) {
|
|
||||||
// don't fail if it's not the currently specified method
|
|
||||||
return NV_FALSE;
|
|
||||||
}
|
|
||||||
|
|
||||||
printk(KERN_INFO NVKMS_LOG_PREFIX
|
|
||||||
"Failing core channel allocation using method %d",
|
|
||||||
fail_alloc_core_channel_method);
|
|
||||||
|
|
||||||
return NV_TRUE;
|
|
||||||
}
|
|
||||||
|
|
||||||
NvBool nvkms_output_rounding_fix(void)
|
NvBool nvkms_output_rounding_fix(void)
|
||||||
{
|
{
|
||||||
return output_rounding_fix;
|
return output_rounding_fix;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvBool nvkms_disable_hdmi_frl(void)
|
|
||||||
{
|
|
||||||
return disable_hdmi_frl;
|
|
||||||
}
|
|
||||||
|
|
||||||
NvBool nvkms_disable_vrr_memclk_switch(void)
|
NvBool nvkms_disable_vrr_memclk_switch(void)
|
||||||
{
|
{
|
||||||
return disable_vrr_memclk_switch;
|
return disable_vrr_memclk_switch;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvBool nvkms_hdmi_deepcolor(void)
|
|
||||||
{
|
|
||||||
return hdmi_deepcolor;
|
|
||||||
}
|
|
||||||
|
|
||||||
NvBool nvkms_vblank_sem_control(void)
|
|
||||||
{
|
|
||||||
return vblank_sem_control;
|
|
||||||
}
|
|
||||||
|
|
||||||
NvBool nvkms_opportunistic_display_sync(void)
|
NvBool nvkms_opportunistic_display_sync(void)
|
||||||
{
|
{
|
||||||
return opportunistic_display_sync;
|
return opportunistic_display_sync;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum NvKmsDebugForceColorSpace nvkms_debug_force_color_space(void)
|
|
||||||
{
|
|
||||||
if (debug_force_color_space >= NVKMS_DEBUG_FORCE_COLOR_SPACE_MAX) {
|
|
||||||
return NVKMS_DEBUG_FORCE_COLOR_SPACE_NONE;
|
|
||||||
}
|
|
||||||
return debug_force_color_space;
|
|
||||||
}
|
|
||||||
|
|
||||||
NvBool nvkms_enable_overlay_layers(void)
|
|
||||||
{
|
|
||||||
return enable_overlay_layers;
|
|
||||||
}
|
|
||||||
|
|
||||||
NvBool nvkms_kernel_supports_syncpts(void)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Note this only checks that the kernel has the prerequisite
|
|
||||||
* support for syncpts; callers must also check that the hardware
|
|
||||||
* supports syncpts.
|
|
||||||
*/
|
|
||||||
#if (defined(CONFIG_TEGRA_GRHOST) || defined(NV_LINUX_HOST1X_NEXT_H_PRESENT))
|
|
||||||
return NV_TRUE;
|
|
||||||
#else
|
|
||||||
return NV_FALSE;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
#define NVKMS_SYNCPT_STUBS_NEEDED
|
#define NVKMS_SYNCPT_STUBS_NEEDED
|
||||||
|
|
||||||
/*************************************************************************
|
/*************************************************************************
|
||||||
@@ -443,7 +361,7 @@ NvU64 nvkms_get_usec(void)
|
|||||||
struct timespec64 ts;
|
struct timespec64 ts;
|
||||||
NvU64 ns;
|
NvU64 ns;
|
||||||
|
|
||||||
ktime_get_raw_ts64(&ts);
|
ktime_get_real_ts64(&ts);
|
||||||
|
|
||||||
ns = timespec64_to_ns(&ts);
|
ns = timespec64_to_ns(&ts);
|
||||||
return ns / 1000;
|
return ns / 1000;
|
||||||
@@ -557,8 +475,6 @@ nvkms_event_queue_changed(nvkms_per_open_handle_t *pOpenKernel,
|
|||||||
|
|
||||||
static void nvkms_suspend(NvU32 gpuId)
|
static void nvkms_suspend(NvU32 gpuId)
|
||||||
{
|
{
|
||||||
nvKmsKapiSuspendResume(NV_TRUE /* suspend */);
|
|
||||||
|
|
||||||
if (gpuId == 0) {
|
if (gpuId == 0) {
|
||||||
nvkms_write_lock_pm_lock();
|
nvkms_write_lock_pm_lock();
|
||||||
}
|
}
|
||||||
@@ -577,8 +493,6 @@ static void nvkms_resume(NvU32 gpuId)
|
|||||||
if (gpuId == 0) {
|
if (gpuId == 0) {
|
||||||
nvkms_write_unlock_pm_lock();
|
nvkms_write_unlock_pm_lock();
|
||||||
}
|
}
|
||||||
|
|
||||||
nvKmsKapiSuspendResume(NV_FALSE /* suspend */);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -907,6 +821,49 @@ void nvkms_free_timer(nvkms_timer_handle_t *handle)
|
|||||||
timer->cancel = NV_TRUE;
|
timer->cancel = NV_TRUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void* nvkms_get_per_open_data(int fd)
|
||||||
|
{
|
||||||
|
struct file *filp = fget(fd);
|
||||||
|
struct nvkms_per_open *popen = NULL;
|
||||||
|
dev_t rdev = 0;
|
||||||
|
void *data = NULL;
|
||||||
|
|
||||||
|
if (filp == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (filp->f_inode == NULL) {
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
rdev = filp->f_inode->i_rdev;
|
||||||
|
|
||||||
|
if ((MAJOR(rdev) != NVKMS_MAJOR_DEVICE_NUMBER) ||
|
||||||
|
(MINOR(rdev) != NVKMS_MINOR_DEVICE_NUMBER)) {
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
|
||||||
|
popen = filp->private_data;
|
||||||
|
if (popen == NULL) {
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
|
||||||
|
data = popen->data;
|
||||||
|
|
||||||
|
done:
|
||||||
|
/*
|
||||||
|
* fget() incremented the struct file's reference count, which
|
||||||
|
* needs to be balanced with a call to fput(). It is safe to
|
||||||
|
* decrement the reference count before returning
|
||||||
|
* filp->private_data because core NVKMS is currently holding the
|
||||||
|
* nvkms_lock, which prevents the nvkms_close() => nvKmsClose()
|
||||||
|
* call chain from freeing the file out from under the caller of
|
||||||
|
* nvkms_get_per_open_data().
|
||||||
|
*/
|
||||||
|
fput(filp);
|
||||||
|
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
NvBool nvkms_fd_is_nvidia_chardev(int fd)
|
NvBool nvkms_fd_is_nvidia_chardev(int fd)
|
||||||
{
|
{
|
||||||
struct file *filp = fget(fd);
|
struct file *filp = fget(fd);
|
||||||
@@ -1050,11 +1007,6 @@ nvkms_register_backlight(NvU32 gpu_id, NvU32 display_id, void *drv_priv,
|
|||||||
|
|
||||||
#if defined(NV_ACPI_VIDEO_BACKLIGHT_USE_NATIVE)
|
#if defined(NV_ACPI_VIDEO_BACKLIGHT_USE_NATIVE)
|
||||||
if (!acpi_video_backlight_use_native()) {
|
if (!acpi_video_backlight_use_native()) {
|
||||||
#if defined(NV_ACPI_VIDEO_REGISTER_BACKLIGHT)
|
|
||||||
nvkms_log(NVKMS_LOG_LEVEL_INFO, NVKMS_LOG_PREFIX,
|
|
||||||
"ACPI reported no NVIDIA native backlight available; attempting to use ACPI backlight.");
|
|
||||||
acpi_video_register_backlight();
|
|
||||||
#endif
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@@ -1293,26 +1245,6 @@ void nvkms_close_from_kapi(struct nvkms_per_open *popen)
|
|||||||
nvkms_close_pm_unlocked(popen);
|
nvkms_close_pm_unlocked(popen);
|
||||||
}
|
}
|
||||||
|
|
||||||
NvBool nvkms_ioctl_from_kapi_try_pmlock
|
|
||||||
(
|
|
||||||
struct nvkms_per_open *popen,
|
|
||||||
NvU32 cmd, void *params_address, const size_t param_size
|
|
||||||
)
|
|
||||||
{
|
|
||||||
NvBool ret;
|
|
||||||
|
|
||||||
if (nvkms_read_trylock_pm_lock()) {
|
|
||||||
return NV_FALSE;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = nvkms_ioctl_common(popen,
|
|
||||||
cmd,
|
|
||||||
(NvU64)(NvUPtr)params_address, param_size) == 0;
|
|
||||||
nvkms_read_unlock_pm_lock();
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
NvBool nvkms_ioctl_from_kapi
|
NvBool nvkms_ioctl_from_kapi
|
||||||
(
|
(
|
||||||
struct nvkms_per_open *popen,
|
struct nvkms_per_open *popen,
|
||||||
@@ -1476,7 +1408,6 @@ static void nvkms_proc_exit(void)
|
|||||||
/*************************************************************************
|
/*************************************************************************
|
||||||
* NVKMS Config File Read
|
* NVKMS Config File Read
|
||||||
************************************************************************/
|
************************************************************************/
|
||||||
#if NVKMS_CONFIG_FILE_SUPPORTED
|
|
||||||
static NvBool nvkms_fs_mounted(void)
|
static NvBool nvkms_fs_mounted(void)
|
||||||
{
|
{
|
||||||
return current->fs != NULL;
|
return current->fs != NULL;
|
||||||
@@ -1497,8 +1428,6 @@ static size_t nvkms_config_file_open
|
|||||||
loff_t pos = 0;
|
loff_t pos = 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
*buff = NULL;
|
|
||||||
|
|
||||||
if (!nvkms_fs_mounted()) {
|
if (!nvkms_fs_mounted()) {
|
||||||
printk(KERN_ERR NVKMS_LOG_PREFIX "ERROR: Filesystems not mounted\n");
|
printk(KERN_ERR NVKMS_LOG_PREFIX "ERROR: Filesystems not mounted\n");
|
||||||
return 0;
|
return 0;
|
||||||
@@ -1522,11 +1451,6 @@ static size_t nvkms_config_file_open
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do not alloc a 0 sized buffer
|
|
||||||
if (file_size == 0) {
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
|
|
||||||
*buff = nvkms_alloc(file_size, NV_FALSE);
|
*buff = nvkms_alloc(file_size, NV_FALSE);
|
||||||
if (*buff == NULL) {
|
if (*buff == NULL) {
|
||||||
printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: Out of memory\n");
|
printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: Out of memory\n");
|
||||||
@@ -1591,11 +1515,6 @@ static void nvkms_read_config_file_locked(void)
|
|||||||
|
|
||||||
nvkms_free(buffer, buf_size);
|
nvkms_free(buffer, buf_size);
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
static void nvkms_read_config_file_locked(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*************************************************************************
|
/*************************************************************************
|
||||||
* NVKMS KAPI functions
|
* NVKMS KAPI functions
|
||||||
@@ -1610,48 +1529,6 @@ NvBool nvKmsKapiGetFunctionsTable
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(nvKmsKapiGetFunctionsTable);
|
EXPORT_SYMBOL(nvKmsKapiGetFunctionsTable);
|
||||||
|
|
||||||
NvU32 nvKmsKapiF16ToF32(NvU16 a)
|
|
||||||
{
|
|
||||||
return nvKmsKapiF16ToF32Internal(a);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(nvKmsKapiF16ToF32);
|
|
||||||
|
|
||||||
NvU16 nvKmsKapiF32ToF16(NvU32 a)
|
|
||||||
{
|
|
||||||
return nvKmsKapiF32ToF16Internal(a);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(nvKmsKapiF32ToF16);
|
|
||||||
|
|
||||||
NvU32 nvKmsKapiF32Mul(NvU32 a, NvU32 b)
|
|
||||||
{
|
|
||||||
return nvKmsKapiF32MulInternal(a, b);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(nvKmsKapiF32Mul);
|
|
||||||
|
|
||||||
NvU32 nvKmsKapiF32Div(NvU32 a, NvU32 b)
|
|
||||||
{
|
|
||||||
return nvKmsKapiF32DivInternal(a, b);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(nvKmsKapiF32Div);
|
|
||||||
|
|
||||||
NvU32 nvKmsKapiF32Add(NvU32 a, NvU32 b)
|
|
||||||
{
|
|
||||||
return nvKmsKapiF32AddInternal(a, b);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(nvKmsKapiF32Add);
|
|
||||||
|
|
||||||
NvU32 nvKmsKapiF32ToUI32RMinMag(NvU32 a, NvBool exact)
|
|
||||||
{
|
|
||||||
return nvKmsKapiF32ToUI32RMinMagInternal(a, exact);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(nvKmsKapiF32ToUI32RMinMag);
|
|
||||||
|
|
||||||
NvU32 nvKmsKapiUI32ToF32(NvU32 a)
|
|
||||||
{
|
|
||||||
return nvKmsKapiUI32ToF32Internal(a);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(nvKmsKapiUI32ToF32);
|
|
||||||
|
|
||||||
/*************************************************************************
|
/*************************************************************************
|
||||||
* File operation callback functions.
|
* File operation callback functions.
|
||||||
*************************************************************************/
|
*************************************************************************/
|
||||||
@@ -1732,12 +1609,6 @@ static int nvkms_ioctl(struct inode *inode, struct file *filp,
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
static long nvkms_unlocked_ioctl(struct file *filp, unsigned int cmd,
|
|
||||||
unsigned long arg)
|
|
||||||
{
|
|
||||||
return nvkms_ioctl(filp->f_inode, filp, cmd, arg);
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned int nvkms_poll(struct file *filp, poll_table *wait)
|
static unsigned int nvkms_poll(struct file *filp, poll_table *wait)
|
||||||
{
|
{
|
||||||
unsigned int mask = 0;
|
unsigned int mask = 0;
|
||||||
@@ -1765,73 +1636,17 @@ static unsigned int nvkms_poll(struct file *filp, poll_table *wait)
|
|||||||
* Module loading support code.
|
* Module loading support code.
|
||||||
*************************************************************************/
|
*************************************************************************/
|
||||||
|
|
||||||
#define NVKMS_RDEV (MKDEV(NV_MAJOR_DEVICE_NUMBER, \
|
static nvidia_module_t nvidia_modeset_module = {
|
||||||
NV_MINOR_DEVICE_NUMBER_MODESET_DEVICE))
|
|
||||||
|
|
||||||
static struct file_operations nvkms_fops = {
|
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.poll = nvkms_poll,
|
.module_name = "nvidia-modeset",
|
||||||
.unlocked_ioctl = nvkms_unlocked_ioctl,
|
.instance = 1, /* minor number: 255-1=254 */
|
||||||
#if NVCPU_IS_X86_64 || NVCPU_IS_AARCH64
|
|
||||||
.compat_ioctl = nvkms_unlocked_ioctl,
|
|
||||||
#endif
|
|
||||||
.mmap = nvkms_mmap,
|
|
||||||
.open = nvkms_open,
|
.open = nvkms_open,
|
||||||
.release = nvkms_close,
|
.close = nvkms_close,
|
||||||
|
.mmap = nvkms_mmap,
|
||||||
|
.ioctl = nvkms_ioctl,
|
||||||
|
.poll = nvkms_poll,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct cdev nvkms_device_cdev;
|
|
||||||
|
|
||||||
static int __init nvkms_register_chrdev(void)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = register_chrdev_region(NVKMS_RDEV, 1, "nvidia-modeset");
|
|
||||||
if (ret < 0) {
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
cdev_init(&nvkms_device_cdev, &nvkms_fops);
|
|
||||||
ret = cdev_add(&nvkms_device_cdev, NVKMS_RDEV, 1);
|
|
||||||
if (ret < 0) {
|
|
||||||
unregister_chrdev_region(NVKMS_RDEV, 1);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void nvkms_unregister_chrdev(void)
|
|
||||||
{
|
|
||||||
cdev_del(&nvkms_device_cdev);
|
|
||||||
unregister_chrdev_region(NVKMS_RDEV, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
void* nvkms_get_per_open_data(int fd)
|
|
||||||
{
|
|
||||||
struct file *filp = fget(fd);
|
|
||||||
void *data = NULL;
|
|
||||||
|
|
||||||
if (filp) {
|
|
||||||
if (filp->f_op == &nvkms_fops && filp->private_data) {
|
|
||||||
struct nvkms_per_open *popen = filp->private_data;
|
|
||||||
data = popen->data;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* fget() incremented the struct file's reference count, which needs to
|
|
||||||
* be balanced with a call to fput(). It is safe to decrement the
|
|
||||||
* reference count before returning filp->private_data because core
|
|
||||||
* NVKMS is currently holding the nvkms_lock, which prevents the
|
|
||||||
* nvkms_close() => nvKmsClose() call chain from freeing the file out
|
|
||||||
* from under the caller of nvkms_get_per_open_data().
|
|
||||||
*/
|
|
||||||
fput(filp);
|
|
||||||
}
|
|
||||||
|
|
||||||
return data;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init nvkms_init(void)
|
static int __init nvkms_init(void)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
@@ -1862,9 +1677,10 @@ static int __init nvkms_init(void)
|
|||||||
INIT_LIST_HEAD(&nvkms_timers.list);
|
INIT_LIST_HEAD(&nvkms_timers.list);
|
||||||
spin_lock_init(&nvkms_timers.lock);
|
spin_lock_init(&nvkms_timers.lock);
|
||||||
|
|
||||||
ret = nvkms_register_chrdev();
|
ret = nvidia_register_module(&nvidia_modeset_module);
|
||||||
|
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
goto fail_register_chrdev;
|
goto fail_register_module;
|
||||||
}
|
}
|
||||||
|
|
||||||
down(&nvkms_lock);
|
down(&nvkms_lock);
|
||||||
@@ -1883,8 +1699,8 @@ static int __init nvkms_init(void)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail_module_load:
|
fail_module_load:
|
||||||
nvkms_unregister_chrdev();
|
nvidia_unregister_module(&nvidia_modeset_module);
|
||||||
fail_register_chrdev:
|
fail_register_module:
|
||||||
nv_kthread_q_stop(&nvkms_deferred_close_kthread_q);
|
nv_kthread_q_stop(&nvkms_deferred_close_kthread_q);
|
||||||
fail_deferred_close_kthread:
|
fail_deferred_close_kthread:
|
||||||
nv_kthread_q_stop(&nvkms_kthread_q);
|
nv_kthread_q_stop(&nvkms_kthread_q);
|
||||||
@@ -1948,7 +1764,7 @@ restart:
|
|||||||
nv_kthread_q_stop(&nvkms_deferred_close_kthread_q);
|
nv_kthread_q_stop(&nvkms_deferred_close_kthread_q);
|
||||||
nv_kthread_q_stop(&nvkms_kthread_q);
|
nv_kthread_q_stop(&nvkms_kthread_q);
|
||||||
|
|
||||||
nvkms_unregister_chrdev();
|
nvidia_unregister_module(&nvidia_modeset_module);
|
||||||
nvkms_free_rm();
|
nvkms_free_rm();
|
||||||
|
|
||||||
if (malloc_verbose) {
|
if (malloc_verbose) {
|
||||||
|
|||||||
@@ -67,14 +67,6 @@ enum NvKmsSyncPtOp {
|
|||||||
NVKMS_SYNCPT_OP_READ_MINVAL,
|
NVKMS_SYNCPT_OP_READ_MINVAL,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum NvKmsDebugForceColorSpace {
|
|
||||||
NVKMS_DEBUG_FORCE_COLOR_SPACE_NONE,
|
|
||||||
NVKMS_DEBUG_FORCE_COLOR_SPACE_RGB,
|
|
||||||
NVKMS_DEBUG_FORCE_COLOR_SPACE_YUV444,
|
|
||||||
NVKMS_DEBUG_FORCE_COLOR_SPACE_YUV422,
|
|
||||||
NVKMS_DEBUG_FORCE_COLOR_SPACE_MAX,
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
@@ -104,20 +96,10 @@ typedef struct {
|
|||||||
} read_minval;
|
} read_minval;
|
||||||
} NvKmsSyncPtOpParams;
|
} NvKmsSyncPtOpParams;
|
||||||
|
|
||||||
enum FailAllocCoreChannelMethod {
|
|
||||||
FAIL_ALLOC_CORE_CHANNEL_RM_SETUP_CORE_CHANNEL = 0,
|
|
||||||
FAIL_ALLOC_CORE_CHANNEL_RESTORE_CONSOLE = 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
NvBool nvkms_test_fail_alloc_core_channel(enum FailAllocCoreChannelMethod method);
|
|
||||||
NvBool nvkms_output_rounding_fix(void);
|
NvBool nvkms_output_rounding_fix(void);
|
||||||
NvBool nvkms_disable_hdmi_frl(void);
|
|
||||||
NvBool nvkms_disable_vrr_memclk_switch(void);
|
NvBool nvkms_disable_vrr_memclk_switch(void);
|
||||||
NvBool nvkms_hdmi_deepcolor(void);
|
|
||||||
NvBool nvkms_vblank_sem_control(void);
|
|
||||||
NvBool nvkms_opportunistic_display_sync(void);
|
NvBool nvkms_opportunistic_display_sync(void);
|
||||||
enum NvKmsDebugForceColorSpace nvkms_debug_force_color_space(void);
|
|
||||||
NvBool nvkms_enable_overlay_layers(void);
|
|
||||||
|
|
||||||
void nvkms_call_rm (void *ops);
|
void nvkms_call_rm (void *ops);
|
||||||
void* nvkms_alloc (size_t size,
|
void* nvkms_alloc (size_t size,
|
||||||
@@ -320,11 +302,6 @@ NvU32 nvkms_enumerate_gpus(nv_gpu_info_t *gpu_info);
|
|||||||
|
|
||||||
NvBool nvkms_allow_write_combining(void);
|
NvBool nvkms_allow_write_combining(void);
|
||||||
|
|
||||||
/*!
|
|
||||||
* Check if OS supports syncpoints.
|
|
||||||
*/
|
|
||||||
NvBool nvkms_kernel_supports_syncpts(void);
|
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* Checks whether the fd is associated with an nvidia character device.
|
* Checks whether the fd is associated with an nvidia character device.
|
||||||
*/
|
*/
|
||||||
@@ -349,16 +326,6 @@ NvBool nvkms_ioctl_from_kapi
|
|||||||
NvU32 cmd, void *params_address, const size_t params_size
|
NvU32 cmd, void *params_address, const size_t params_size
|
||||||
);
|
);
|
||||||
|
|
||||||
/*!
|
|
||||||
* Like nvkms_ioctl_from_kapi, but return NV_FALSE instead of waiting if the
|
|
||||||
* power management read lock cannot be acquired.
|
|
||||||
*/
|
|
||||||
NvBool nvkms_ioctl_from_kapi_try_pmlock
|
|
||||||
(
|
|
||||||
struct nvkms_per_open *popen,
|
|
||||||
NvU32 cmd, void *params_address, const size_t params_size
|
|
||||||
);
|
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* APIs for locking.
|
* APIs for locking.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -40,6 +40,9 @@ NV_KERNEL_MODULE_TARGETS += $(NVIDIA_MODESET_KO)
|
|||||||
NVIDIA_MODESET_BINARY_OBJECT := $(src)/nvidia-modeset/nv-modeset-kernel.o_binary
|
NVIDIA_MODESET_BINARY_OBJECT := $(src)/nvidia-modeset/nv-modeset-kernel.o_binary
|
||||||
NVIDIA_MODESET_BINARY_OBJECT_O := nvidia-modeset/nv-modeset-kernel.o
|
NVIDIA_MODESET_BINARY_OBJECT_O := nvidia-modeset/nv-modeset-kernel.o
|
||||||
|
|
||||||
|
quiet_cmd_symlink = SYMLINK $@
|
||||||
|
cmd_symlink = ln -sf $< $@
|
||||||
|
|
||||||
targets += $(NVIDIA_MODESET_BINARY_OBJECT_O)
|
targets += $(NVIDIA_MODESET_BINARY_OBJECT_O)
|
||||||
|
|
||||||
$(obj)/$(NVIDIA_MODESET_BINARY_OBJECT_O): $(NVIDIA_MODESET_BINARY_OBJECT) FORCE
|
$(obj)/$(NVIDIA_MODESET_BINARY_OBJECT_O): $(NVIDIA_MODESET_BINARY_OBJECT) FORCE
|
||||||
@@ -55,18 +58,6 @@ nvidia-modeset-y += $(NVIDIA_MODESET_BINARY_OBJECT_O)
|
|||||||
NVIDIA_MODESET_CFLAGS += -I$(src)/nvidia-modeset
|
NVIDIA_MODESET_CFLAGS += -I$(src)/nvidia-modeset
|
||||||
NVIDIA_MODESET_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_BUILD_MODULE_INSTANCES=0
|
NVIDIA_MODESET_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_BUILD_MODULE_INSTANCES=0
|
||||||
|
|
||||||
# Some Android kernels prohibit driver use of filesystem functions like
|
|
||||||
# filp_open() and kernel_read(). Disable the NVKMS_CONFIG_FILE_SUPPORTED
|
|
||||||
# functionality that uses those functions when building for Android.
|
|
||||||
|
|
||||||
PLATFORM_IS_ANDROID ?= 0
|
|
||||||
|
|
||||||
ifeq ($(PLATFORM_IS_ANDROID),1)
|
|
||||||
NVIDIA_MODESET_CFLAGS += -DNVKMS_CONFIG_FILE_SUPPORTED=0
|
|
||||||
else
|
|
||||||
NVIDIA_MODESET_CFLAGS += -DNVKMS_CONFIG_FILE_SUPPORTED=1
|
|
||||||
endif
|
|
||||||
|
|
||||||
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_MODESET_OBJECTS), $(NVIDIA_MODESET_CFLAGS))
|
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_MODESET_OBJECTS), $(NVIDIA_MODESET_CFLAGS))
|
||||||
|
|
||||||
|
|
||||||
@@ -102,5 +93,3 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
|
|||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_real_ts64
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_real_ts64
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += acpi_video_backlight_use_native
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += acpi_video_backlight_use_native
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += acpi_video_register_backlight
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += kernel_read_has_pointer_pos_arg
|
|
||||||
|
|||||||
@@ -66,8 +66,6 @@ enum NvKmsClientType {
|
|||||||
NVKMS_CLIENT_KERNEL_SPACE,
|
NVKMS_CLIENT_KERNEL_SPACE,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct NvKmsPerOpenDev;
|
|
||||||
|
|
||||||
NvBool nvKmsIoctl(
|
NvBool nvKmsIoctl(
|
||||||
void *pOpenVoid,
|
void *pOpenVoid,
|
||||||
NvU32 cmd,
|
NvU32 cmd,
|
||||||
@@ -103,25 +101,7 @@ NvBool nvKmsKapiGetFunctionsTableInternal
|
|||||||
struct NvKmsKapiFunctionsTable *funcsTable
|
struct NvKmsKapiFunctionsTable *funcsTable
|
||||||
);
|
);
|
||||||
|
|
||||||
void nvKmsKapiSuspendResume(NvBool suspend);
|
|
||||||
|
|
||||||
NvBool nvKmsGetBacklight(NvU32 display_id, void *drv_priv, NvU32 *brightness);
|
NvBool nvKmsGetBacklight(NvU32 display_id, void *drv_priv, NvU32 *brightness);
|
||||||
NvBool nvKmsSetBacklight(NvU32 display_id, void *drv_priv, NvU32 brightness);
|
NvBool nvKmsSetBacklight(NvU32 display_id, void *drv_priv, NvU32 brightness);
|
||||||
|
|
||||||
NvBool nvKmsOpenDevHasSubOwnerPermissionOrBetter(const struct NvKmsPerOpenDev *pOpenDev);
|
|
||||||
|
|
||||||
NvU32 nvKmsKapiF16ToF32Internal(NvU16 a);
|
|
||||||
|
|
||||||
NvU16 nvKmsKapiF32ToF16Internal(NvU32 a);
|
|
||||||
|
|
||||||
NvU32 nvKmsKapiF32MulInternal(NvU32 a, NvU32 b);
|
|
||||||
|
|
||||||
NvU32 nvKmsKapiF32DivInternal(NvU32 a, NvU32 b);
|
|
||||||
|
|
||||||
NvU32 nvKmsKapiF32AddInternal(NvU32 a, NvU32 b);
|
|
||||||
|
|
||||||
NvU32 nvKmsKapiF32ToUI32RMinMagInternal(NvU32 a, NvBool exact);
|
|
||||||
|
|
||||||
NvU32 nvKmsKapiUI32ToF32Internal(NvU32 a);
|
|
||||||
|
|
||||||
#endif /* __NV_KMS_H__ */
|
#endif /* __NV_KMS_H__ */
|
||||||
|
|||||||
@@ -189,12 +189,6 @@ int nvidia_p2p_get_pages( uint64_t p2p_token, uint32_t va_space,
|
|||||||
struct nvidia_p2p_page_table **page_table,
|
struct nvidia_p2p_page_table **page_table,
|
||||||
void (*free_callback)(void *data), void *data);
|
void (*free_callback)(void *data), void *data);
|
||||||
|
|
||||||
/*
|
|
||||||
* Flags to be used with persistent APIs
|
|
||||||
*/
|
|
||||||
#define NVIDIA_P2P_FLAGS_DEFAULT 0
|
|
||||||
#define NVIDIA_P2P_FLAGS_FORCE_BAR1_MAPPING 1
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @brief
|
* @brief
|
||||||
* Pin and make the pages underlying a range of GPU virtual memory
|
* Pin and make the pages underlying a range of GPU virtual memory
|
||||||
@@ -218,11 +212,7 @@ int nvidia_p2p_get_pages( uint64_t p2p_token, uint32_t va_space,
|
|||||||
* @param[out] page_table
|
* @param[out] page_table
|
||||||
* A pointer to an array of structures with P2P PTEs.
|
* A pointer to an array of structures with P2P PTEs.
|
||||||
* @param[in] flags
|
* @param[in] flags
|
||||||
* NVIDIA_P2P_FLAGS_DEFAULT:
|
* Must be set to zero for now.
|
||||||
* Default value to be used if no specific behavior is expected.
|
|
||||||
* NVIDIA_P2P_FLAGS_FORCE_BAR1_MAPPING:
|
|
||||||
* Force BAR1 mappings on certain coherent platforms,
|
|
||||||
* subject to capability and supported topology.
|
|
||||||
*
|
*
|
||||||
* @return
|
* @return
|
||||||
* 0 upon successful completion.
|
* 0 upon successful completion.
|
||||||
|
|||||||
@@ -60,13 +60,6 @@ static int peerdirect_support = NV_MEM_PEERDIRECT_SUPPORT_DEFAULT;
|
|||||||
module_param(peerdirect_support, int, S_IRUGO);
|
module_param(peerdirect_support, int, S_IRUGO);
|
||||||
MODULE_PARM_DESC(peerdirect_support, "Set level of support for Peer-direct, 0 [default] or 1 [legacy, for example MLNX_OFED 4.9 LTS]");
|
MODULE_PARM_DESC(peerdirect_support, "Set level of support for Peer-direct, 0 [default] or 1 [legacy, for example MLNX_OFED 4.9 LTS]");
|
||||||
|
|
||||||
enum {
|
|
||||||
NV_MEM_PERSISTENT_API_SUPPORT_LEGACY = 0,
|
|
||||||
NV_MEM_PERSISTENT_API_SUPPORT_DEFAULT = 1,
|
|
||||||
};
|
|
||||||
static int persistent_api_support = NV_MEM_PERSISTENT_API_SUPPORT_DEFAULT;
|
|
||||||
module_param(persistent_api_support, int, S_IRUGO);
|
|
||||||
MODULE_PARM_DESC(persistent_api_support, "Set level of support for persistent APIs, 0 [legacy] or 1 [default]");
|
|
||||||
|
|
||||||
#define peer_err(FMT, ARGS...) printk(KERN_ERR "nvidia-peermem" " %s:%d ERROR " FMT, __FUNCTION__, __LINE__, ## ARGS)
|
#define peer_err(FMT, ARGS...) printk(KERN_ERR "nvidia-peermem" " %s:%d ERROR " FMT, __FUNCTION__, __LINE__, ## ARGS)
|
||||||
#ifdef NV_MEM_DEBUG
|
#ifdef NV_MEM_DEBUG
|
||||||
@@ -290,8 +283,8 @@ static int nv_dma_map(struct sg_table *sg_head, void *context,
|
|||||||
nv_mem_context->sg_allocated = 1;
|
nv_mem_context->sg_allocated = 1;
|
||||||
for_each_sg(sg_head->sgl, sg, nv_mem_context->npages, i) {
|
for_each_sg(sg_head->sgl, sg, nv_mem_context->npages, i) {
|
||||||
sg_set_page(sg, NULL, nv_mem_context->page_size, 0);
|
sg_set_page(sg, NULL, nv_mem_context->page_size, 0);
|
||||||
sg_dma_address(sg) = dma_mapping->dma_addresses[i];
|
sg->dma_address = dma_mapping->dma_addresses[i];
|
||||||
sg_dma_len(sg) = nv_mem_context->page_size;
|
sg->dma_length = nv_mem_context->page_size;
|
||||||
}
|
}
|
||||||
nv_mem_context->sg_head = *sg_head;
|
nv_mem_context->sg_head = *sg_head;
|
||||||
*nmap = nv_mem_context->npages;
|
*nmap = nv_mem_context->npages;
|
||||||
@@ -345,13 +338,8 @@ static void nv_mem_put_pages_common(int nc,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
if (nc) {
|
if (nc) {
|
||||||
#ifdef NVIDIA_P2P_CAP_GET_PAGES_PERSISTENT_API
|
|
||||||
ret = nvidia_p2p_put_pages_persistent(nv_mem_context->page_virt_start,
|
ret = nvidia_p2p_put_pages_persistent(nv_mem_context->page_virt_start,
|
||||||
nv_mem_context->page_table, 0);
|
nv_mem_context->page_table, 0);
|
||||||
#else
|
|
||||||
ret = nvidia_p2p_put_pages(0, 0, nv_mem_context->page_virt_start,
|
|
||||||
nv_mem_context->page_table);
|
|
||||||
#endif
|
|
||||||
} else {
|
} else {
|
||||||
ret = nvidia_p2p_put_pages(0, 0, nv_mem_context->page_virt_start,
|
ret = nvidia_p2p_put_pages(0, 0, nv_mem_context->page_virt_start,
|
||||||
nv_mem_context->page_table);
|
nv_mem_context->page_table);
|
||||||
@@ -459,15 +447,9 @@ static int nv_mem_get_pages_nc(unsigned long addr,
|
|||||||
nv_mem_context->core_context = core_context;
|
nv_mem_context->core_context = core_context;
|
||||||
nv_mem_context->page_size = GPU_PAGE_SIZE;
|
nv_mem_context->page_size = GPU_PAGE_SIZE;
|
||||||
|
|
||||||
#ifdef NVIDIA_P2P_CAP_GET_PAGES_PERSISTENT_API
|
|
||||||
ret = nvidia_p2p_get_pages_persistent(nv_mem_context->page_virt_start,
|
ret = nvidia_p2p_get_pages_persistent(nv_mem_context->page_virt_start,
|
||||||
nv_mem_context->mapped_size,
|
nv_mem_context->mapped_size,
|
||||||
&nv_mem_context->page_table, 0);
|
&nv_mem_context->page_table, 0);
|
||||||
#else
|
|
||||||
ret = nvidia_p2p_get_pages(0, 0, nv_mem_context->page_virt_start, nv_mem_context->mapped_size,
|
|
||||||
&nv_mem_context->page_table, NULL, NULL);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
peer_err("error %d while calling nvidia_p2p_get_pages() with NULL callback\n", ret);
|
peer_err("error %d while calling nvidia_p2p_get_pages() with NULL callback\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
@@ -486,8 +468,34 @@ static struct peer_memory_client nv_mem_client_nc = {
|
|||||||
.release = nv_mem_release,
|
.release = nv_mem_release,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int nv_mem_legacy_client_init(void)
|
#endif /* NV_MLNX_IB_PEER_MEM_SYMBOLS_PRESENT */
|
||||||
|
|
||||||
|
static int nv_mem_param_conf_check(void)
|
||||||
{
|
{
|
||||||
|
int rc = 0;
|
||||||
|
switch (peerdirect_support) {
|
||||||
|
case NV_MEM_PEERDIRECT_SUPPORT_DEFAULT:
|
||||||
|
case NV_MEM_PEERDIRECT_SUPPORT_LEGACY:
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
peer_err("invalid peerdirect_support param value %d\n", peerdirect_support);
|
||||||
|
rc = -EINVAL;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __init nv_mem_client_init(void)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
rc = nv_mem_param_conf_check();
|
||||||
|
if (rc) {
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined (NV_MLNX_IB_PEER_MEM_SYMBOLS_PRESENT)
|
||||||
|
int status = 0;
|
||||||
|
|
||||||
// off by one, to leave space for the trailing '1' which is flagging
|
// off by one, to leave space for the trailing '1' which is flagging
|
||||||
// the new client type
|
// the new client type
|
||||||
BUG_ON(strlen(DRV_NAME) > IB_PEER_MEMORY_NAME_MAX-1);
|
BUG_ON(strlen(DRV_NAME) > IB_PEER_MEMORY_NAME_MAX-1);
|
||||||
@@ -516,99 +524,22 @@ static int nv_mem_legacy_client_init(void)
|
|||||||
&mem_invalidate_callback);
|
&mem_invalidate_callback);
|
||||||
if (!reg_handle) {
|
if (!reg_handle) {
|
||||||
peer_err("nv_mem_client_init -- error while registering traditional client\n");
|
peer_err("nv_mem_client_init -- error while registering traditional client\n");
|
||||||
return -EINVAL;
|
status = -EINVAL;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int nv_mem_nc_client_init(void)
|
|
||||||
{
|
|
||||||
// The nc client enables support for persistent pages.
|
// The nc client enables support for persistent pages.
|
||||||
if (persistent_api_support == NV_MEM_PERSISTENT_API_SUPPORT_LEGACY)
|
strcpy(nv_mem_client_nc.name, DRV_NAME "_nc");
|
||||||
{
|
|
||||||
//
|
|
||||||
// If legacy behavior is forced via module param,
|
|
||||||
// both legacy and persistent clients are registered and are named
|
|
||||||
// "nv_mem"(legacy) and "nv_mem_nc"(persistent).
|
|
||||||
//
|
|
||||||
strcpy(nv_mem_client_nc.name, DRV_NAME "_nc");
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
//
|
|
||||||
// With default persistent behavior, the client name shall be "nv_mem"
|
|
||||||
// so that libraries can use the persistent client under the same name.
|
|
||||||
//
|
|
||||||
strcpy(nv_mem_client_nc.name, DRV_NAME);
|
|
||||||
}
|
|
||||||
|
|
||||||
strcpy(nv_mem_client_nc.version, DRV_VERSION);
|
strcpy(nv_mem_client_nc.version, DRV_VERSION);
|
||||||
reg_handle_nc = ib_register_peer_memory_client(&nv_mem_client_nc, NULL);
|
reg_handle_nc = ib_register_peer_memory_client(&nv_mem_client_nc, NULL);
|
||||||
if (!reg_handle_nc) {
|
if (!reg_handle_nc) {
|
||||||
peer_err("nv_mem_client_init -- error while registering nc client\n");
|
peer_err("nv_mem_client_init -- error while registering nc client\n");
|
||||||
return -EINVAL;
|
status = -EINVAL;
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* NV_MLNX_IB_PEER_MEM_SYMBOLS_PRESENT */
|
|
||||||
|
|
||||||
static int nv_mem_param_peerdirect_conf_check(void)
|
|
||||||
{
|
|
||||||
int rc = 0;
|
|
||||||
switch (peerdirect_support) {
|
|
||||||
case NV_MEM_PEERDIRECT_SUPPORT_DEFAULT:
|
|
||||||
case NV_MEM_PEERDIRECT_SUPPORT_LEGACY:
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
peer_err("invalid peerdirect_support param value %d\n", peerdirect_support);
|
|
||||||
rc = -EINVAL;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int nv_mem_param_persistent_api_conf_check(void)
|
|
||||||
{
|
|
||||||
int rc = 0;
|
|
||||||
switch (persistent_api_support) {
|
|
||||||
case NV_MEM_PERSISTENT_API_SUPPORT_DEFAULT:
|
|
||||||
case NV_MEM_PERSISTENT_API_SUPPORT_LEGACY:
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
peer_err("invalid persistent_api_support param value %d\n", persistent_api_support);
|
|
||||||
rc = -EINVAL;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init nv_mem_client_init(void)
|
|
||||||
{
|
|
||||||
#if defined (NV_MLNX_IB_PEER_MEM_SYMBOLS_PRESENT)
|
|
||||||
int rc;
|
|
||||||
rc = nv_mem_param_peerdirect_conf_check();
|
|
||||||
if (rc) {
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
rc = nv_mem_param_persistent_api_conf_check();
|
|
||||||
if (rc) {
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (persistent_api_support == NV_MEM_PERSISTENT_API_SUPPORT_LEGACY) {
|
|
||||||
rc = nv_mem_legacy_client_init();
|
|
||||||
if (rc)
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
rc = nv_mem_nc_client_init();
|
|
||||||
if (rc)
|
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (rc) {
|
if (status) {
|
||||||
if (reg_handle) {
|
if (reg_handle) {
|
||||||
ib_unregister_peer_memory_client(reg_handle);
|
ib_unregister_peer_memory_client(reg_handle);
|
||||||
reg_handle = NULL;
|
reg_handle = NULL;
|
||||||
@@ -620,7 +551,7 @@ out:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return rc;
|
return status;
|
||||||
#else
|
#else
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -1,25 +1,30 @@
|
|||||||
/*
|
/*******************************************************************************
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
Copyright (c) 2022 NVIDIA Corporation
|
||||||
* SPDX-License-Identifier: MIT
|
|
||||||
*
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
of this software and associated documentation files (the "Software"), to
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
deal in the Software without restriction, including without limitation the
|
||||||
* to deal in the Software without restriction, including without limitation
|
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
sell copies of the Software, and to permit persons to whom the Software is
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
furnished to do so, subject to the following conditions:
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
The above copyright notice and this permission notice shall be
|
||||||
* The above copyright notice and this permission notice shall be included in
|
included in all copies or substantial portions of the Software.
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
DEALINGS IN THE SOFTWARE.
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
*/
|
*******************************************************************************/
|
||||||
|
|
||||||
|
|
||||||
|
// AUTO GENERATED -- DO NOT EDIT - this file automatically generated by refhdr2class.pl
|
||||||
|
// Command: ../../../bin/manuals/refhdr2class.pl clc365.h c365 ACCESS_COUNTER_NOTIFY_BUFFER --search_str=NV_ACCESS_COUNTER --input_file=nv_ref_dev_access_counter.h
|
||||||
|
|
||||||
|
|
||||||
#ifndef _clc365_h_
|
#ifndef _clc365_h_
|
||||||
#define _clc365_h_
|
#define _clc365_h_
|
||||||
|
|||||||
@@ -1,25 +1,30 @@
|
|||||||
/*
|
/*******************************************************************************
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
Copyright (c) 2022 NVIDIA Corporation
|
||||||
* SPDX-License-Identifier: MIT
|
|
||||||
*
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
of this software and associated documentation files (the "Software"), to
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
deal in the Software without restriction, including without limitation the
|
||||||
* to deal in the Software without restriction, including without limitation
|
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
sell copies of the Software, and to permit persons to whom the Software is
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
furnished to do so, subject to the following conditions:
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
The above copyright notice and this permission notice shall be
|
||||||
* The above copyright notice and this permission notice shall be included in
|
included in all copies or substantial portions of the Software.
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
DEALINGS IN THE SOFTWARE.
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
*/
|
*******************************************************************************/
|
||||||
|
|
||||||
|
|
||||||
|
// AUTO GENERATED -- DO NOT EDIT - this file automatically generated by refhdr2class.pl
|
||||||
|
// Command: ../../../bin/manuals/refhdr2class.pl clc369.h c369 MMU_FAULT_BUFFER --search_str=NV_MMU_FAULT --input_file=nv_ref_dev_mmu_fault.h
|
||||||
|
|
||||||
|
|
||||||
#ifndef _clc369_h_
|
#ifndef _clc369_h_
|
||||||
#define _clc369_h_
|
#define _clc369_h_
|
||||||
|
|||||||
@@ -1,25 +1,26 @@
|
|||||||
/*
|
/*******************************************************************************
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
Copyright (c) 2012-2015 NVIDIA Corporation
|
||||||
* SPDX-License-Identifier: MIT
|
|
||||||
*
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
of this software and associated documentation files (the "Software"), to
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
deal in the Software without restriction, including without limitation the
|
||||||
* to deal in the Software without restriction, including without limitation
|
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
sell copies of the Software, and to permit persons to whom the Software is
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
furnished to do so, subject to the following conditions:
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
The above copyright notice and this permission notice shall be
|
||||||
* The above copyright notice and this permission notice shall be included in
|
included in all copies or substantial portions of the Software.
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
DEALINGS IN THE SOFTWARE.
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
*/
|
*******************************************************************************/
|
||||||
|
|
||||||
|
|
||||||
#ifndef _clc36f_h_
|
#ifndef _clc36f_h_
|
||||||
#define _clc36f_h_
|
#define _clc36f_h_
|
||||||
@@ -256,6 +257,7 @@ typedef volatile struct Nvc36fControl_struct {
|
|||||||
#define NVC36F_CLEAR_FAULTED_TYPE 31:31
|
#define NVC36F_CLEAR_FAULTED_TYPE 31:31
|
||||||
#define NVC36F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000
|
#define NVC36F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000
|
||||||
#define NVC36F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001
|
#define NVC36F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001
|
||||||
|
#define NVC36F_QUADRO_VERIFY (0x000000a0)
|
||||||
|
|
||||||
|
|
||||||
/* GPFIFO entry format */
|
/* GPFIFO entry format */
|
||||||
|
|||||||
@@ -1,25 +1,26 @@
|
|||||||
/*
|
/*******************************************************************************
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
Copyright (c) 2012-2015 NVIDIA Corporation
|
||||||
* SPDX-License-Identifier: MIT
|
|
||||||
*
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
of this software and associated documentation files (the "Software"), to
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
deal in the Software without restriction, including without limitation the
|
||||||
* to deal in the Software without restriction, including without limitation
|
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
sell copies of the Software, and to permit persons to whom the Software is
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
furnished to do so, subject to the following conditions:
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
The above copyright notice and this permission notice shall be
|
||||||
* The above copyright notice and this permission notice shall be included in
|
included in all copies or substantial portions of the Software.
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
DEALINGS IN THE SOFTWARE.
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
*/
|
*******************************************************************************/
|
||||||
|
|
||||||
|
|
||||||
#ifndef _clc46f_h_
|
#ifndef _clc46f_h_
|
||||||
#define _clc46f_h_
|
#define _clc46f_h_
|
||||||
@@ -258,6 +259,7 @@ typedef volatile struct Nvc46fControl_struct {
|
|||||||
#define NVC46F_CLEAR_FAULTED_TYPE 31:31
|
#define NVC46F_CLEAR_FAULTED_TYPE 31:31
|
||||||
#define NVC46F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000
|
#define NVC46F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000
|
||||||
#define NVC46F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001
|
#define NVC46F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001
|
||||||
|
#define NVC46F_QUADRO_VERIFY (0x000000a0)
|
||||||
|
|
||||||
|
|
||||||
/* GPFIFO entry format */
|
/* GPFIFO entry format */
|
||||||
|
|||||||
@@ -1,25 +1,26 @@
|
|||||||
/*
|
/*******************************************************************************
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
Copyright (c) 2012-2015 NVIDIA Corporation
|
||||||
* SPDX-License-Identifier: MIT
|
|
||||||
*
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
of this software and associated documentation files (the "Software"), to
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
deal in the Software without restriction, including without limitation the
|
||||||
* to deal in the Software without restriction, including without limitation
|
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
sell copies of the Software, and to permit persons to whom the Software is
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
furnished to do so, subject to the following conditions:
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
The above copyright notice and this permission notice shall be
|
||||||
* The above copyright notice and this permission notice shall be included in
|
included in all copies or substantial portions of the Software.
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
DEALINGS IN THE SOFTWARE.
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
*/
|
*******************************************************************************/
|
||||||
|
|
||||||
|
|
||||||
#ifndef _clc56f_h_
|
#ifndef _clc56f_h_
|
||||||
#define _clc56f_h_
|
#define _clc56f_h_
|
||||||
@@ -260,6 +261,7 @@ typedef volatile struct Nvc56fControl_struct {
|
|||||||
#define NVC56F_CLEAR_FAULTED_TYPE 31:31
|
#define NVC56F_CLEAR_FAULTED_TYPE 31:31
|
||||||
#define NVC56F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000
|
#define NVC56F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000
|
||||||
#define NVC56F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001
|
#define NVC56F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001
|
||||||
|
#define NVC56F_QUADRO_VERIFY (0x000000a0)
|
||||||
|
|
||||||
|
|
||||||
/* GPFIFO entry format */
|
/* GPFIFO entry format */
|
||||||
|
|||||||
@@ -1,19 +1,19 @@
|
|||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
Copyright (c) 1993-2004 NVIDIA Corporation
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
copy of this software and associated documentation files (the "Software"),
|
of this software and associated documentation files (the "Software"), to
|
||||||
to deal in the Software without restriction, including without limitation
|
deal in the Software without restriction, including without limitation the
|
||||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||||
and/or sell copies of the Software, and to permit persons to whom the
|
sell copies of the Software, and to permit persons to whom the Software is
|
||||||
Software is furnished to do so, subject to the following conditions:
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
The above copyright notice and this permission notice shall be
|
||||||
all copies or substantial portions of the Software.
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
@@ -21,6 +21,8 @@
|
|||||||
|
|
||||||
*******************************************************************************/
|
*******************************************************************************/
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#include "nvtypes.h"
|
#include "nvtypes.h"
|
||||||
|
|
||||||
#ifndef _clc5b5_h_
|
#ifndef _clc5b5_h_
|
||||||
@@ -32,6 +34,64 @@ extern "C" {
|
|||||||
|
|
||||||
#define TURING_DMA_COPY_A (0x0000C5B5)
|
#define TURING_DMA_COPY_A (0x0000C5B5)
|
||||||
|
|
||||||
|
typedef volatile struct _clc5b5_tag0 {
|
||||||
|
NvV32 Reserved00[0x40];
|
||||||
|
NvV32 Nop; // 0x00000100 - 0x00000103
|
||||||
|
NvV32 Reserved01[0xF];
|
||||||
|
NvV32 PmTrigger; // 0x00000140 - 0x00000143
|
||||||
|
NvV32 Reserved02[0x3F];
|
||||||
|
NvV32 SetSemaphoreA; // 0x00000240 - 0x00000243
|
||||||
|
NvV32 SetSemaphoreB; // 0x00000244 - 0x00000247
|
||||||
|
NvV32 SetSemaphorePayload; // 0x00000248 - 0x0000024B
|
||||||
|
NvV32 Reserved03[0x2];
|
||||||
|
NvV32 SetRenderEnableA; // 0x00000254 - 0x00000257
|
||||||
|
NvV32 SetRenderEnableB; // 0x00000258 - 0x0000025B
|
||||||
|
NvV32 SetRenderEnableC; // 0x0000025C - 0x0000025F
|
||||||
|
NvV32 SetSrcPhysMode; // 0x00000260 - 0x00000263
|
||||||
|
NvV32 SetDstPhysMode; // 0x00000264 - 0x00000267
|
||||||
|
NvV32 Reserved04[0x6];
|
||||||
|
NvV32 SetGlobalCounterUpper; // 0x00000280 - 0x00000283
|
||||||
|
NvV32 SetGlobalCounterLower; // 0x00000284 - 0x00000287
|
||||||
|
NvV32 SetPageoutStartPAUpper; // 0x00000288 - 0x0000028B
|
||||||
|
NvV32 SetPageoutStartPALower; // 0x0000028C - 0x0000028F
|
||||||
|
NvV32 Reserved05[0x1C];
|
||||||
|
NvV32 LaunchDma; // 0x00000300 - 0x00000303
|
||||||
|
NvV32 Reserved06[0x3F];
|
||||||
|
NvV32 OffsetInUpper; // 0x00000400 - 0x00000403
|
||||||
|
NvV32 OffsetInLower; // 0x00000404 - 0x00000407
|
||||||
|
NvV32 OffsetOutUpper; // 0x00000408 - 0x0000040B
|
||||||
|
NvV32 OffsetOutLower; // 0x0000040C - 0x0000040F
|
||||||
|
NvV32 PitchIn; // 0x00000410 - 0x00000413
|
||||||
|
NvV32 PitchOut; // 0x00000414 - 0x00000417
|
||||||
|
NvV32 LineLengthIn; // 0x00000418 - 0x0000041B
|
||||||
|
NvV32 LineCount; // 0x0000041C - 0x0000041F
|
||||||
|
NvV32 Reserved07[0xB8];
|
||||||
|
NvV32 SetRemapConstA; // 0x00000700 - 0x00000703
|
||||||
|
NvV32 SetRemapConstB; // 0x00000704 - 0x00000707
|
||||||
|
NvV32 SetRemapComponents; // 0x00000708 - 0x0000070B
|
||||||
|
NvV32 SetDstBlockSize; // 0x0000070C - 0x0000070F
|
||||||
|
NvV32 SetDstWidth; // 0x00000710 - 0x00000713
|
||||||
|
NvV32 SetDstHeight; // 0x00000714 - 0x00000717
|
||||||
|
NvV32 SetDstDepth; // 0x00000718 - 0x0000071B
|
||||||
|
NvV32 SetDstLayer; // 0x0000071C - 0x0000071F
|
||||||
|
NvV32 SetDstOrigin; // 0x00000720 - 0x00000723
|
||||||
|
NvV32 Reserved08[0x1];
|
||||||
|
NvV32 SetSrcBlockSize; // 0x00000728 - 0x0000072B
|
||||||
|
NvV32 SetSrcWidth; // 0x0000072C - 0x0000072F
|
||||||
|
NvV32 SetSrcHeight; // 0x00000730 - 0x00000733
|
||||||
|
NvV32 SetSrcDepth; // 0x00000734 - 0x00000737
|
||||||
|
NvV32 SetSrcLayer; // 0x00000738 - 0x0000073B
|
||||||
|
NvV32 SetSrcOrigin; // 0x0000073C - 0x0000073F
|
||||||
|
NvV32 Reserved09[0x1];
|
||||||
|
NvV32 SrcOriginX; // 0x00000744 - 0x00000747
|
||||||
|
NvV32 SrcOriginY; // 0x00000748 - 0x0000074B
|
||||||
|
NvV32 DstOriginX; // 0x0000074C - 0x0000074F
|
||||||
|
NvV32 DstOriginY; // 0x00000750 - 0x00000753
|
||||||
|
NvV32 Reserved10[0x270];
|
||||||
|
NvV32 PmTriggerEnd; // 0x00001114 - 0x00001117
|
||||||
|
NvV32 Reserved11[0x3BA];
|
||||||
|
} turing_dma_copy_aControlPio;
|
||||||
|
|
||||||
#define NVC5B5_NOP (0x00000100)
|
#define NVC5B5_NOP (0x00000100)
|
||||||
#define NVC5B5_NOP_PARAMETER 31:0
|
#define NVC5B5_NOP_PARAMETER 31:0
|
||||||
#define NVC5B5_PM_TRIGGER (0x00000140)
|
#define NVC5B5_PM_TRIGGER (0x00000140)
|
||||||
@@ -65,6 +125,14 @@ extern "C" {
|
|||||||
#define NVC5B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001)
|
#define NVC5B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001)
|
||||||
#define NVC5B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002)
|
#define NVC5B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002)
|
||||||
#define NVC5B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2
|
#define NVC5B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2
|
||||||
|
#define NVC5B5_SET_GLOBAL_COUNTER_UPPER (0x00000280)
|
||||||
|
#define NVC5B5_SET_GLOBAL_COUNTER_UPPER_V 31:0
|
||||||
|
#define NVC5B5_SET_GLOBAL_COUNTER_LOWER (0x00000284)
|
||||||
|
#define NVC5B5_SET_GLOBAL_COUNTER_LOWER_V 31:0
|
||||||
|
#define NVC5B5_SET_PAGEOUT_START_PAUPPER (0x00000288)
|
||||||
|
#define NVC5B5_SET_PAGEOUT_START_PAUPPER_V 4:0
|
||||||
|
#define NVC5B5_SET_PAGEOUT_START_PALOWER (0x0000028C)
|
||||||
|
#define NVC5B5_SET_PAGEOUT_START_PALOWER_V 31:0
|
||||||
#define NVC5B5_LAUNCH_DMA (0x00000300)
|
#define NVC5B5_LAUNCH_DMA (0x00000300)
|
||||||
#define NVC5B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0
|
#define NVC5B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0
|
||||||
#define NVC5B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000)
|
#define NVC5B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000)
|
||||||
@@ -131,6 +199,8 @@ extern "C" {
|
|||||||
#define NVC5B5_LAUNCH_DMA_VPRMODE 23:22
|
#define NVC5B5_LAUNCH_DMA_VPRMODE 23:22
|
||||||
#define NVC5B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000)
|
#define NVC5B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000)
|
||||||
#define NVC5B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001)
|
#define NVC5B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001)
|
||||||
|
#define NVC5B5_LAUNCH_DMA_VPRMODE_VPR_VID2SYS (0x00000002)
|
||||||
|
#define NVC5B5_LAUNCH_DMA_VPRMODE_VPR_SYS2VID (0x00000003)
|
||||||
#define NVC5B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24
|
#define NVC5B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24
|
||||||
#define NVC5B5_LAUNCH_DMA_DISABLE_PLC 26:26
|
#define NVC5B5_LAUNCH_DMA_DISABLE_PLC 26:26
|
||||||
#define NVC5B5_LAUNCH_DMA_DISABLE_PLC_FALSE (0x00000000)
|
#define NVC5B5_LAUNCH_DMA_DISABLE_PLC_FALSE (0x00000000)
|
||||||
|
|||||||
@@ -1,19 +1,19 @@
|
|||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
Copyright (c) 1993-2004 NVIDIA Corporation
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
copy of this software and associated documentation files (the "Software"),
|
of this software and associated documentation files (the "Software"), to
|
||||||
to deal in the Software without restriction, including without limitation
|
deal in the Software without restriction, including without limitation the
|
||||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||||
and/or sell copies of the Software, and to permit persons to whom the
|
sell copies of the Software, and to permit persons to whom the Software is
|
||||||
Software is furnished to do so, subject to the following conditions:
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
The above copyright notice and this permission notice shall be
|
||||||
all copies or substantial portions of the Software.
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
@@ -21,6 +21,8 @@
|
|||||||
|
|
||||||
*******************************************************************************/
|
*******************************************************************************/
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#include "nvtypes.h"
|
#include "nvtypes.h"
|
||||||
|
|
||||||
#ifndef _clc6b5_h_
|
#ifndef _clc6b5_h_
|
||||||
@@ -32,6 +34,64 @@ extern "C" {
|
|||||||
|
|
||||||
#define AMPERE_DMA_COPY_A (0x0000C6B5)
|
#define AMPERE_DMA_COPY_A (0x0000C6B5)
|
||||||
|
|
||||||
|
typedef volatile struct _clc6b5_tag0 {
|
||||||
|
NvV32 Reserved00[0x40];
|
||||||
|
NvV32 Nop; // 0x00000100 - 0x00000103
|
||||||
|
NvV32 Reserved01[0xF];
|
||||||
|
NvV32 PmTrigger; // 0x00000140 - 0x00000143
|
||||||
|
NvV32 Reserved02[0x3F];
|
||||||
|
NvV32 SetSemaphoreA; // 0x00000240 - 0x00000243
|
||||||
|
NvV32 SetSemaphoreB; // 0x00000244 - 0x00000247
|
||||||
|
NvV32 SetSemaphorePayload; // 0x00000248 - 0x0000024B
|
||||||
|
NvV32 Reserved03[0x2];
|
||||||
|
NvV32 SetRenderEnableA; // 0x00000254 - 0x00000257
|
||||||
|
NvV32 SetRenderEnableB; // 0x00000258 - 0x0000025B
|
||||||
|
NvV32 SetRenderEnableC; // 0x0000025C - 0x0000025F
|
||||||
|
NvV32 SetSrcPhysMode; // 0x00000260 - 0x00000263
|
||||||
|
NvV32 SetDstPhysMode; // 0x00000264 - 0x00000267
|
||||||
|
NvV32 Reserved04[0x6];
|
||||||
|
NvV32 SetGlobalCounterUpper; // 0x00000280 - 0x00000283
|
||||||
|
NvV32 SetGlobalCounterLower; // 0x00000284 - 0x00000287
|
||||||
|
NvV32 SetPageoutStartPAUpper; // 0x00000288 - 0x0000028B
|
||||||
|
NvV32 SetPageoutStartPALower; // 0x0000028C - 0x0000028F
|
||||||
|
NvV32 Reserved05[0x1C];
|
||||||
|
NvV32 LaunchDma; // 0x00000300 - 0x00000303
|
||||||
|
NvV32 Reserved06[0x3F];
|
||||||
|
NvV32 OffsetInUpper; // 0x00000400 - 0x00000403
|
||||||
|
NvV32 OffsetInLower; // 0x00000404 - 0x00000407
|
||||||
|
NvV32 OffsetOutUpper; // 0x00000408 - 0x0000040B
|
||||||
|
NvV32 OffsetOutLower; // 0x0000040C - 0x0000040F
|
||||||
|
NvV32 PitchIn; // 0x00000410 - 0x00000413
|
||||||
|
NvV32 PitchOut; // 0x00000414 - 0x00000417
|
||||||
|
NvV32 LineLengthIn; // 0x00000418 - 0x0000041B
|
||||||
|
NvV32 LineCount; // 0x0000041C - 0x0000041F
|
||||||
|
NvV32 Reserved07[0xB8];
|
||||||
|
NvV32 SetRemapConstA; // 0x00000700 - 0x00000703
|
||||||
|
NvV32 SetRemapConstB; // 0x00000704 - 0x00000707
|
||||||
|
NvV32 SetRemapComponents; // 0x00000708 - 0x0000070B
|
||||||
|
NvV32 SetDstBlockSize; // 0x0000070C - 0x0000070F
|
||||||
|
NvV32 SetDstWidth; // 0x00000710 - 0x00000713
|
||||||
|
NvV32 SetDstHeight; // 0x00000714 - 0x00000717
|
||||||
|
NvV32 SetDstDepth; // 0x00000718 - 0x0000071B
|
||||||
|
NvV32 SetDstLayer; // 0x0000071C - 0x0000071F
|
||||||
|
NvV32 SetDstOrigin; // 0x00000720 - 0x00000723
|
||||||
|
NvV32 Reserved08[0x1];
|
||||||
|
NvV32 SetSrcBlockSize; // 0x00000728 - 0x0000072B
|
||||||
|
NvV32 SetSrcWidth; // 0x0000072C - 0x0000072F
|
||||||
|
NvV32 SetSrcHeight; // 0x00000730 - 0x00000733
|
||||||
|
NvV32 SetSrcDepth; // 0x00000734 - 0x00000737
|
||||||
|
NvV32 SetSrcLayer; // 0x00000738 - 0x0000073B
|
||||||
|
NvV32 SetSrcOrigin; // 0x0000073C - 0x0000073F
|
||||||
|
NvV32 Reserved09[0x1];
|
||||||
|
NvV32 SrcOriginX; // 0x00000744 - 0x00000747
|
||||||
|
NvV32 SrcOriginY; // 0x00000748 - 0x0000074B
|
||||||
|
NvV32 DstOriginX; // 0x0000074C - 0x0000074F
|
||||||
|
NvV32 DstOriginY; // 0x00000750 - 0x00000753
|
||||||
|
NvV32 Reserved10[0x270];
|
||||||
|
NvV32 PmTriggerEnd; // 0x00001114 - 0x00001117
|
||||||
|
NvV32 Reserved11[0x3BA];
|
||||||
|
} ampere_dma_copy_aControlPio;
|
||||||
|
|
||||||
#define NVC6B5_NOP (0x00000100)
|
#define NVC6B5_NOP (0x00000100)
|
||||||
#define NVC6B5_NOP_PARAMETER 31:0
|
#define NVC6B5_NOP_PARAMETER 31:0
|
||||||
#define NVC6B5_PM_TRIGGER (0x00000140)
|
#define NVC6B5_PM_TRIGGER (0x00000140)
|
||||||
@@ -71,6 +131,14 @@ extern "C" {
|
|||||||
#define NVC6B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2
|
#define NVC6B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2
|
||||||
#define NVC6B5_SET_DST_PHYS_MODE_PEER_ID 8:6
|
#define NVC6B5_SET_DST_PHYS_MODE_PEER_ID 8:6
|
||||||
#define NVC6B5_SET_DST_PHYS_MODE_FLA 9:9
|
#define NVC6B5_SET_DST_PHYS_MODE_FLA 9:9
|
||||||
|
#define NVC6B5_SET_GLOBAL_COUNTER_UPPER (0x00000280)
|
||||||
|
#define NVC6B5_SET_GLOBAL_COUNTER_UPPER_V 31:0
|
||||||
|
#define NVC6B5_SET_GLOBAL_COUNTER_LOWER (0x00000284)
|
||||||
|
#define NVC6B5_SET_GLOBAL_COUNTER_LOWER_V 31:0
|
||||||
|
#define NVC6B5_SET_PAGEOUT_START_PAUPPER (0x00000288)
|
||||||
|
#define NVC6B5_SET_PAGEOUT_START_PAUPPER_V 4:0
|
||||||
|
#define NVC6B5_SET_PAGEOUT_START_PALOWER (0x0000028C)
|
||||||
|
#define NVC6B5_SET_PAGEOUT_START_PALOWER_V 31:0
|
||||||
#define NVC6B5_LAUNCH_DMA (0x00000300)
|
#define NVC6B5_LAUNCH_DMA (0x00000300)
|
||||||
#define NVC6B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0
|
#define NVC6B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0
|
||||||
#define NVC6B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000)
|
#define NVC6B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000)
|
||||||
@@ -131,6 +199,8 @@ extern "C" {
|
|||||||
#define NVC6B5_LAUNCH_DMA_VPRMODE 23:22
|
#define NVC6B5_LAUNCH_DMA_VPRMODE 23:22
|
||||||
#define NVC6B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000)
|
#define NVC6B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000)
|
||||||
#define NVC6B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001)
|
#define NVC6B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001)
|
||||||
|
#define NVC6B5_LAUNCH_DMA_VPRMODE_VPR_VID2SYS (0x00000002)
|
||||||
|
#define NVC6B5_LAUNCH_DMA_VPRMODE_VPR_SYS2VID (0x00000003)
|
||||||
#define NVC6B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24
|
#define NVC6B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24
|
||||||
#define NVC6B5_LAUNCH_DMA_DISABLE_PLC 26:26
|
#define NVC6B5_LAUNCH_DMA_DISABLE_PLC 26:26
|
||||||
#define NVC6B5_LAUNCH_DMA_DISABLE_PLC_FALSE (0x00000000)
|
#define NVC6B5_LAUNCH_DMA_DISABLE_PLC_FALSE (0x00000000)
|
||||||
|
|||||||
@@ -1,19 +1,19 @@
|
|||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
Copyright (c) 1993-2004 NVIDIA Corporation
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
copy of this software and associated documentation files (the "Software"),
|
of this software and associated documentation files (the "Software"), to
|
||||||
to deal in the Software without restriction, including without limitation
|
deal in the Software without restriction, including without limitation the
|
||||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||||
and/or sell copies of the Software, and to permit persons to whom the
|
sell copies of the Software, and to permit persons to whom the Software is
|
||||||
Software is furnished to do so, subject to the following conditions:
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
The above copyright notice and this permission notice shall be
|
||||||
all copies or substantial portions of the Software.
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
@@ -21,6 +21,8 @@
|
|||||||
|
|
||||||
*******************************************************************************/
|
*******************************************************************************/
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#include "nvtypes.h"
|
#include "nvtypes.h"
|
||||||
|
|
||||||
#ifndef _clc7b5_h_
|
#ifndef _clc7b5_h_
|
||||||
@@ -32,6 +34,69 @@ extern "C" {
|
|||||||
|
|
||||||
#define AMPERE_DMA_COPY_B (0x0000C7B5)
|
#define AMPERE_DMA_COPY_B (0x0000C7B5)
|
||||||
|
|
||||||
|
typedef volatile struct _clc7b5_tag0 {
|
||||||
|
NvV32 Reserved00[0x40];
|
||||||
|
NvV32 Nop; // 0x00000100 - 0x00000103
|
||||||
|
NvV32 Reserved01[0xF];
|
||||||
|
NvV32 PmTrigger; // 0x00000140 - 0x00000143
|
||||||
|
NvV32 Reserved02[0x36];
|
||||||
|
NvV32 SetMonitoredFenceType; // 0x0000021C - 0x0000021F
|
||||||
|
NvV32 SetMonitoredFenceSignalAddrBaseUpper; // 0x00000220 - 0x00000223
|
||||||
|
NvV32 SetMonitoredFenceSignalAddrBaseLower; // 0x00000224 - 0x00000227
|
||||||
|
NvV32 Reserved03[0x6];
|
||||||
|
NvV32 SetSemaphoreA; // 0x00000240 - 0x00000243
|
||||||
|
NvV32 SetSemaphoreB; // 0x00000244 - 0x00000247
|
||||||
|
NvV32 SetSemaphorePayload; // 0x00000248 - 0x0000024B
|
||||||
|
NvV32 SetSemaphorePayloadUpper; // 0x0000024C - 0x0000024F
|
||||||
|
NvV32 Reserved04[0x1];
|
||||||
|
NvV32 SetRenderEnableA; // 0x00000254 - 0x00000257
|
||||||
|
NvV32 SetRenderEnableB; // 0x00000258 - 0x0000025B
|
||||||
|
NvV32 SetRenderEnableC; // 0x0000025C - 0x0000025F
|
||||||
|
NvV32 SetSrcPhysMode; // 0x00000260 - 0x00000263
|
||||||
|
NvV32 SetDstPhysMode; // 0x00000264 - 0x00000267
|
||||||
|
NvV32 Reserved05[0x6];
|
||||||
|
NvV32 SetGlobalCounterUpper; // 0x00000280 - 0x00000283
|
||||||
|
NvV32 SetGlobalCounterLower; // 0x00000284 - 0x00000287
|
||||||
|
NvV32 SetPageoutStartPAUpper; // 0x00000288 - 0x0000028B
|
||||||
|
NvV32 SetPageoutStartPALower; // 0x0000028C - 0x0000028F
|
||||||
|
NvV32 Reserved06[0x1C];
|
||||||
|
NvV32 LaunchDma; // 0x00000300 - 0x00000303
|
||||||
|
NvV32 Reserved07[0x3F];
|
||||||
|
NvV32 OffsetInUpper; // 0x00000400 - 0x00000403
|
||||||
|
NvV32 OffsetInLower; // 0x00000404 - 0x00000407
|
||||||
|
NvV32 OffsetOutUpper; // 0x00000408 - 0x0000040B
|
||||||
|
NvV32 OffsetOutLower; // 0x0000040C - 0x0000040F
|
||||||
|
NvV32 PitchIn; // 0x00000410 - 0x00000413
|
||||||
|
NvV32 PitchOut; // 0x00000414 - 0x00000417
|
||||||
|
NvV32 LineLengthIn; // 0x00000418 - 0x0000041B
|
||||||
|
NvV32 LineCount; // 0x0000041C - 0x0000041F
|
||||||
|
NvV32 Reserved08[0xB8];
|
||||||
|
NvV32 SetRemapConstA; // 0x00000700 - 0x00000703
|
||||||
|
NvV32 SetRemapConstB; // 0x00000704 - 0x00000707
|
||||||
|
NvV32 SetRemapComponents; // 0x00000708 - 0x0000070B
|
||||||
|
NvV32 SetDstBlockSize; // 0x0000070C - 0x0000070F
|
||||||
|
NvV32 SetDstWidth; // 0x00000710 - 0x00000713
|
||||||
|
NvV32 SetDstHeight; // 0x00000714 - 0x00000717
|
||||||
|
NvV32 SetDstDepth; // 0x00000718 - 0x0000071B
|
||||||
|
NvV32 SetDstLayer; // 0x0000071C - 0x0000071F
|
||||||
|
NvV32 SetDstOrigin; // 0x00000720 - 0x00000723
|
||||||
|
NvV32 Reserved09[0x1];
|
||||||
|
NvV32 SetSrcBlockSize; // 0x00000728 - 0x0000072B
|
||||||
|
NvV32 SetSrcWidth; // 0x0000072C - 0x0000072F
|
||||||
|
NvV32 SetSrcHeight; // 0x00000730 - 0x00000733
|
||||||
|
NvV32 SetSrcDepth; // 0x00000734 - 0x00000737
|
||||||
|
NvV32 SetSrcLayer; // 0x00000738 - 0x0000073B
|
||||||
|
NvV32 SetSrcOrigin; // 0x0000073C - 0x0000073F
|
||||||
|
NvV32 Reserved10[0x1];
|
||||||
|
NvV32 SrcOriginX; // 0x00000744 - 0x00000747
|
||||||
|
NvV32 SrcOriginY; // 0x00000748 - 0x0000074B
|
||||||
|
NvV32 DstOriginX; // 0x0000074C - 0x0000074F
|
||||||
|
NvV32 DstOriginY; // 0x00000750 - 0x00000753
|
||||||
|
NvV32 Reserved11[0x270];
|
||||||
|
NvV32 PmTriggerEnd; // 0x00001114 - 0x00001117
|
||||||
|
NvV32 Reserved12[0x3BA];
|
||||||
|
} ampere_dma_copy_bControlPio;
|
||||||
|
|
||||||
#define NVC7B5_NOP (0x00000100)
|
#define NVC7B5_NOP (0x00000100)
|
||||||
#define NVC7B5_NOP_PARAMETER 31:0
|
#define NVC7B5_NOP_PARAMETER 31:0
|
||||||
#define NVC7B5_PM_TRIGGER (0x00000140)
|
#define NVC7B5_PM_TRIGGER (0x00000140)
|
||||||
@@ -81,6 +146,14 @@ extern "C" {
|
|||||||
#define NVC7B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2
|
#define NVC7B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2
|
||||||
#define NVC7B5_SET_DST_PHYS_MODE_PEER_ID 8:6
|
#define NVC7B5_SET_DST_PHYS_MODE_PEER_ID 8:6
|
||||||
#define NVC7B5_SET_DST_PHYS_MODE_FLA 9:9
|
#define NVC7B5_SET_DST_PHYS_MODE_FLA 9:9
|
||||||
|
#define NVC7B5_SET_GLOBAL_COUNTER_UPPER (0x00000280)
|
||||||
|
#define NVC7B5_SET_GLOBAL_COUNTER_UPPER_V 31:0
|
||||||
|
#define NVC7B5_SET_GLOBAL_COUNTER_LOWER (0x00000284)
|
||||||
|
#define NVC7B5_SET_GLOBAL_COUNTER_LOWER_V 31:0
|
||||||
|
#define NVC7B5_SET_PAGEOUT_START_PAUPPER (0x00000288)
|
||||||
|
#define NVC7B5_SET_PAGEOUT_START_PAUPPER_V 4:0
|
||||||
|
#define NVC7B5_SET_PAGEOUT_START_PALOWER (0x0000028C)
|
||||||
|
#define NVC7B5_SET_PAGEOUT_START_PALOWER_V 31:0
|
||||||
#define NVC7B5_LAUNCH_DMA (0x00000300)
|
#define NVC7B5_LAUNCH_DMA (0x00000300)
|
||||||
#define NVC7B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0
|
#define NVC7B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0
|
||||||
#define NVC7B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000)
|
#define NVC7B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000)
|
||||||
@@ -150,6 +223,8 @@ extern "C" {
|
|||||||
#define NVC7B5_LAUNCH_DMA_VPRMODE 23:22
|
#define NVC7B5_LAUNCH_DMA_VPRMODE 23:22
|
||||||
#define NVC7B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000)
|
#define NVC7B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000)
|
||||||
#define NVC7B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001)
|
#define NVC7B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001)
|
||||||
|
#define NVC7B5_LAUNCH_DMA_VPRMODE_VPR_VID2SYS (0x00000002)
|
||||||
|
#define NVC7B5_LAUNCH_DMA_VPRMODE_VPR_SYS2VID (0x00000003)
|
||||||
#define NVC7B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24
|
#define NVC7B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24
|
||||||
#define NVC7B5_LAUNCH_DMA_DISABLE_PLC 26:26
|
#define NVC7B5_LAUNCH_DMA_DISABLE_PLC 26:26
|
||||||
#define NVC7B5_LAUNCH_DMA_DISABLE_PLC_FALSE (0x00000000)
|
#define NVC7B5_LAUNCH_DMA_DISABLE_PLC_FALSE (0x00000000)
|
||||||
|
|||||||
@@ -1,329 +0,0 @@
|
|||||||
/*******************************************************************************
|
|
||||||
Copyright (c) 2012-2015 NVIDIA Corporation
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to
|
|
||||||
deal in the Software without restriction, including without limitation the
|
|
||||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
||||||
sell copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
DEALINGS IN THE SOFTWARE.
|
|
||||||
|
|
||||||
*******************************************************************************/
|
|
||||||
|
|
||||||
|
|
||||||
#ifndef _clc96f_h_
|
|
||||||
#define _clc96f_h_
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "nvtypes.h"
|
|
||||||
|
|
||||||
/* class BLACKWELL_CHANNEL_GPFIFO */
|
|
||||||
/*
|
|
||||||
* Documentation for BLACKWELL_CHANNEL_GPFIFO can be found in dev_pbdma.ref,
|
|
||||||
* chapter "User Control Registers". It is documented as device NV_UDMA.
|
|
||||||
* The GPFIFO format itself is also documented in dev_pbdma.ref,
|
|
||||||
* NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref,
|
|
||||||
* chapter "FIFO DMA RAM", NV_FIFO_DMA_*.
|
|
||||||
*
|
|
||||||
* Note there is no .mfs file for this class.
|
|
||||||
*/
|
|
||||||
#define BLACKWELL_CHANNEL_GPFIFO_A (0x0000C96F)
|
|
||||||
|
|
||||||
#define NVC96F_TYPEDEF BLACKWELL_CHANNELChannelGPFifoA
|
|
||||||
|
|
||||||
/* dma flow control data structure */
|
|
||||||
typedef volatile struct Nvc96fControl_struct {
|
|
||||||
NvU32 Ignored00[0x23]; /* 0000-008b*/
|
|
||||||
NvU32 GPPut; /* GP FIFO put offset 008c-008f*/
|
|
||||||
NvU32 Ignored01[0x5c];
|
|
||||||
} Nvc96fControl, BlackwellAControlGPFifo;
|
|
||||||
|
|
||||||
/* fields and values */
|
|
||||||
#define NVC96F_NUMBER_OF_SUBCHANNELS (8)
|
|
||||||
#define NVC96F_SET_OBJECT (0x00000000)
|
|
||||||
#define NVC96F_SET_OBJECT_NVCLASS 15:0
|
|
||||||
#define NVC96F_SET_OBJECT_ENGINE 20:16
|
|
||||||
#define NVC96F_SET_OBJECT_ENGINE_SW 0x0000001f
|
|
||||||
#define NVC96F_NOP (0x00000008)
|
|
||||||
#define NVC96F_NOP_HANDLE 31:0
|
|
||||||
#define NVC96F_NON_STALL_INTERRUPT (0x00000020)
|
|
||||||
#define NVC96F_NON_STALL_INTERRUPT_HANDLE 31:0
|
|
||||||
#define NVC96F_FB_FLUSH (0x00000024) // Deprecated - use MEMBAR TYPE SYS_MEMBAR
|
|
||||||
#define NVC96F_FB_FLUSH_HANDLE 31:0
|
|
||||||
// NOTE - MEM_OP_A and MEM_OP_B have been replaced in gp100 with methods for
|
|
||||||
// specifying the page address for a targeted TLB invalidate and the uTLB for
|
|
||||||
// a targeted REPLAY_CANCEL for UVM.
|
|
||||||
// The previous MEM_OP_A/B functionality is in MEM_OP_C/D, with slightly
|
|
||||||
// rearranged fields.
|
|
||||||
#define NVC96F_MEM_OP_A (0x00000028)
|
|
||||||
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_CLIENT_UNIT_ID 5:0 // only relevant for REPLAY_CANCEL_TARGETED
|
|
||||||
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_INVALIDATION_SIZE 5:0 // Used to specify size of invalidate, used for invalidates which are not of the REPLAY_CANCEL_TARGETED type
|
|
||||||
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_GPC_ID 10:6 // only relevant for REPLAY_CANCEL_TARGETED
|
|
||||||
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE 7:6 // only relevant for invalidates with NVC96F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE for invalidating link TLB only, or non-link TLB only or all TLBs
|
|
||||||
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_ALL_TLBS 0
|
|
||||||
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_LINK_TLBS 1
|
|
||||||
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_NON_LINK_TLBS 2
|
|
||||||
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_RSVRVD 3
|
|
||||||
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_CANCEL_MMU_ENGINE_ID 8:0 // only relevant for REPLAY_CANCEL_VA_GLOBAL
|
|
||||||
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR 11:11
|
|
||||||
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_EN 0x00000001
|
|
||||||
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_DIS 0x00000000
|
|
||||||
#define NVC96F_MEM_OP_A_TLB_INVALIDATE_TARGET_ADDR_LO 31:12
|
|
||||||
#define NVC96F_MEM_OP_B (0x0000002c)
|
|
||||||
#define NVC96F_MEM_OP_B_TLB_INVALIDATE_TARGET_ADDR_HI 31:0
|
|
||||||
#define NVC96F_MEM_OP_C (0x00000030)
|
|
||||||
#define NVC96F_MEM_OP_C_MEMBAR_TYPE 2:0
|
|
||||||
#define NVC96F_MEM_OP_C_MEMBAR_TYPE_SYS_MEMBAR 0x00000000
|
|
||||||
#define NVC96F_MEM_OP_C_MEMBAR_TYPE_MEMBAR 0x00000001
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 // Probably nonsensical for MMU_TLB_INVALIDATE_TARGETED
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_GPC 1:1
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE 0x00000000
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_REPLAY 4:2 // only relevant if GPC ENABLE
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE 0x00000000
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START 0x00000001
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_VA_GLOBAL 0x00000005
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE 6:5 // only relevant if GPC ENABLE
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_NONE 0x00000000
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_GLOBALLY 0x00000001
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_INTRANODE 0x00000002
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE 9:7 //only relevant for REPLAY_CANCEL_VA_GLOBAL
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_READ 0
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE 1
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_STRONG 2
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_RSVRVD 3
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_WEAK 4
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_ALL 5
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE_AND_ATOMIC 6
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ALL 7
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL 9:7 // Invalidate affects this level and all below
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_ALL 0x00000000 // Invalidate tlb caches at all levels of the page table
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_PTE_ONLY 0x00000001
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE0 0x00000002
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE1 0x00000003
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE2 0x00000004
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 0x00000005
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE4 0x00000006
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE5 0x00000007
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE 11:10 // only relevant if PDB_ONE
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_VID_MEM 0x00000000
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_COHERENT 0x00000002
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_NONCOHERENT 0x00000003
|
|
||||||
#define NVC96F_MEM_OP_C_TLB_INVALIDATE_PDB_ADDR_LO 31:12 // only relevant if PDB_ONE
|
|
||||||
#define NVC96F_MEM_OP_C_ACCESS_COUNTER_CLR_TARGETED_NOTIFY_TAG 19:0
|
|
||||||
// MEM_OP_D MUST be preceded by MEM_OPs A-C.
|
|
||||||
#define NVC96F_MEM_OP_D (0x00000034)
|
|
||||||
#define NVC96F_MEM_OP_D_TLB_INVALIDATE_PDB_ADDR_HI 26:0 // only relevant if PDB_ONE
|
|
||||||
#define NVC96F_MEM_OP_D_OPERATION 31:27
|
|
||||||
#define NVC96F_MEM_OP_D_OPERATION_MEMBAR 0x00000005
|
|
||||||
#define NVC96F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009
|
|
||||||
#define NVC96F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE_TARGETED 0x0000000a
|
|
||||||
#define NVC96F_MEM_OP_D_OPERATION_MMU_OPERATION 0x0000000b
|
|
||||||
#define NVC96F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d
|
|
||||||
#define NVC96F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e
|
|
||||||
// CLEAN_LINES is an alias for Tegra/GPU IP usage
|
|
||||||
#define NVC96F_MEM_OP_B_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e
|
|
||||||
#define NVC96F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f
|
|
||||||
#define NVC96F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010
|
|
||||||
#define NVC96F_MEM_OP_D_OPERATION_L2_SYSMEM_NCOH_INVALIDATE 0x00000011
|
|
||||||
#define NVC96F_MEM_OP_D_OPERATION_L2_SYSMEM_COH_INVALIDATE 0x00000012
|
|
||||||
#define NVC96F_MEM_OP_D_OPERATION_L2_WAIT_FOR_SYS_PENDING_READS 0x00000015
|
|
||||||
#define NVC96F_MEM_OP_D_OPERATION_ACCESS_COUNTER_CLR 0x00000016
|
|
||||||
#define NVC96F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE 1:0
|
|
||||||
#define NVC96F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MIMC 0x00000000
|
|
||||||
#define NVC96F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MOMC 0x00000001
|
|
||||||
#define NVC96F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_ALL 0x00000002
|
|
||||||
#define NVC96F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_TARGETED 0x00000003
|
|
||||||
#define NVC96F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE 2:2
|
|
||||||
#define NVC96F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MIMC 0x00000000
|
|
||||||
#define NVC96F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MOMC 0x00000001
|
|
||||||
#define NVC96F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_BANK 6:3
|
|
||||||
#define NVC96F_MEM_OP_D_MMU_OPERATION_TYPE 23:20
|
|
||||||
#define NVC96F_MEM_OP_D_MMU_OPERATION_TYPE_RESERVED 0x00000000
|
|
||||||
#define NVC96F_MEM_OP_D_MMU_OPERATION_TYPE_VIDMEM_ACCESS_BIT_DUMP 0x00000001
|
|
||||||
#define NVC96F_SEM_ADDR_LO (0x0000005c)
|
|
||||||
#define NVC96F_SEM_ADDR_LO_OFFSET 31:2
|
|
||||||
#define NVC96F_SEM_ADDR_HI (0x00000060)
|
|
||||||
#define NVC96F_SEM_ADDR_HI_OFFSET 24:0
|
|
||||||
#define NVC96F_SEM_PAYLOAD_LO (0x00000064)
|
|
||||||
#define NVC96F_SEM_PAYLOAD_LO_PAYLOAD 31:0
|
|
||||||
#define NVC96F_SEM_PAYLOAD_HI (0x00000068)
|
|
||||||
#define NVC96F_SEM_PAYLOAD_HI_PAYLOAD 31:0
|
|
||||||
#define NVC96F_SEM_EXECUTE (0x0000006c)
|
|
||||||
#define NVC96F_SEM_EXECUTE_OPERATION 2:0
|
|
||||||
#define NVC96F_SEM_EXECUTE_OPERATION_ACQUIRE 0x00000000
|
|
||||||
#define NVC96F_SEM_EXECUTE_OPERATION_RELEASE 0x00000001
|
|
||||||
#define NVC96F_SEM_EXECUTE_OPERATION_ACQ_STRICT_GEQ 0x00000002
|
|
||||||
#define NVC96F_SEM_EXECUTE_OPERATION_ACQ_CIRC_GEQ 0x00000003
|
|
||||||
#define NVC96F_SEM_EXECUTE_OPERATION_ACQ_AND 0x00000004
|
|
||||||
#define NVC96F_SEM_EXECUTE_OPERATION_ACQ_NOR 0x00000005
|
|
||||||
#define NVC96F_SEM_EXECUTE_OPERATION_REDUCTION 0x00000006
|
|
||||||
#define NVC96F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG 12:12
|
|
||||||
#define NVC96F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_DIS 0x00000000
|
|
||||||
#define NVC96F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_EN 0x00000001
|
|
||||||
#define NVC96F_SEM_EXECUTE_ACQUIRE_RECHECK 18:18
|
|
||||||
#define NVC96F_SEM_EXECUTE_ACQUIRE_RECHECK_DIS 0x00000000
|
|
||||||
#define NVC96F_SEM_EXECUTE_ACQUIRE_RECHECK_EN 0x00000001
|
|
||||||
#define NVC96F_SEM_EXECUTE_RELEASE_WFI 20:20
|
|
||||||
#define NVC96F_SEM_EXECUTE_RELEASE_WFI_DIS 0x00000000
|
|
||||||
#define NVC96F_SEM_EXECUTE_RELEASE_WFI_EN 0x00000001
|
|
||||||
#define NVC96F_SEM_EXECUTE_PAYLOAD_SIZE 24:24
|
|
||||||
#define NVC96F_SEM_EXECUTE_PAYLOAD_SIZE_32BIT 0x00000000
|
|
||||||
#define NVC96F_SEM_EXECUTE_PAYLOAD_SIZE_64BIT 0x00000001
|
|
||||||
#define NVC96F_SEM_EXECUTE_RELEASE_TIMESTAMP 25:25
|
|
||||||
#define NVC96F_SEM_EXECUTE_RELEASE_TIMESTAMP_DIS 0x00000000
|
|
||||||
#define NVC96F_SEM_EXECUTE_RELEASE_TIMESTAMP_EN 0x00000001
|
|
||||||
#define NVC96F_SEM_EXECUTE_REDUCTION 30:27
|
|
||||||
#define NVC96F_SEM_EXECUTE_REDUCTION_IMIN 0x00000000
|
|
||||||
#define NVC96F_SEM_EXECUTE_REDUCTION_IMAX 0x00000001
|
|
||||||
#define NVC96F_SEM_EXECUTE_REDUCTION_IXOR 0x00000002
|
|
||||||
#define NVC96F_SEM_EXECUTE_REDUCTION_IAND 0x00000003
|
|
||||||
#define NVC96F_SEM_EXECUTE_REDUCTION_IOR 0x00000004
|
|
||||||
#define NVC96F_SEM_EXECUTE_REDUCTION_IADD 0x00000005
|
|
||||||
#define NVC96F_SEM_EXECUTE_REDUCTION_INC 0x00000006
|
|
||||||
#define NVC96F_SEM_EXECUTE_REDUCTION_DEC 0x00000007
|
|
||||||
#define NVC96F_SEM_EXECUTE_REDUCTION_FORMAT 31:31
|
|
||||||
#define NVC96F_SEM_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000000
|
|
||||||
#define NVC96F_SEM_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000001
|
|
||||||
#define NVC96F_WFI (0x00000078)
|
|
||||||
#define NVC96F_WFI_SCOPE 0:0
|
|
||||||
#define NVC96F_WFI_SCOPE_CURRENT_SCG_TYPE 0x00000000
|
|
||||||
#define NVC96F_WFI_SCOPE_CURRENT_VEID 0x00000000
|
|
||||||
#define NVC96F_WFI_SCOPE_ALL 0x00000001
|
|
||||||
#define NVC96F_YIELD (0x00000080)
|
|
||||||
#define NVC96F_YIELD_OP 1:0
|
|
||||||
#define NVC96F_YIELD_OP_NOP 0x00000000
|
|
||||||
#define NVC96F_YIELD_OP_TSG 0x00000003
|
|
||||||
#define NVC96F_CLEAR_FAULTED (0x00000084)
|
|
||||||
// Note: RM provides the HANDLE as an opaque value; the internal detail fields
|
|
||||||
// are intentionally not exposed to the driver through these defines.
|
|
||||||
#define NVC96F_CLEAR_FAULTED_HANDLE 30:0
|
|
||||||
#define NVC96F_CLEAR_FAULTED_TYPE 31:31
|
|
||||||
#define NVC96F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000
|
|
||||||
#define NVC96F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001
|
|
||||||
|
|
||||||
|
|
||||||
/* GPFIFO entry format */
|
|
||||||
#define NVC96F_GP_ENTRY__SIZE 8
|
|
||||||
#define NVC96F_GP_ENTRY0_FETCH 0:0
|
|
||||||
#define NVC96F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000
|
|
||||||
#define NVC96F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001
|
|
||||||
#define NVC96F_GP_ENTRY0_GET 31:2
|
|
||||||
#define NVC96F_GP_ENTRY0_OPERAND 31:0
|
|
||||||
#define NVC96F_GP_ENTRY0_PB_EXTENDED_BASE_OPERAND 24:8
|
|
||||||
#define NVC96F_GP_ENTRY1_GET_HI 7:0
|
|
||||||
#define NVC96F_GP_ENTRY1_LEVEL 9:9
|
|
||||||
#define NVC96F_GP_ENTRY1_LEVEL_MAIN 0x00000000
|
|
||||||
#define NVC96F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001
|
|
||||||
#define NVC96F_GP_ENTRY1_LENGTH 30:10
|
|
||||||
#define NVC96F_GP_ENTRY1_SYNC 31:31
|
|
||||||
#define NVC96F_GP_ENTRY1_SYNC_PROCEED 0x00000000
|
|
||||||
#define NVC96F_GP_ENTRY1_SYNC_WAIT 0x00000001
|
|
||||||
#define NVC96F_GP_ENTRY1_OPCODE 7:0
|
|
||||||
#define NVC96F_GP_ENTRY1_OPCODE_NOP 0x00000000
|
|
||||||
#define NVC96F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001
|
|
||||||
#define NVC96F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002
|
|
||||||
#define NVC96F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003
|
|
||||||
#define NVC96F_GP_ENTRY1_OPCODE_SET_PB_SEGMENT_EXTENDED_BASE 0x00000004
|
|
||||||
|
|
||||||
/* dma method formats */
|
|
||||||
#define NVC96F_DMA_METHOD_ADDRESS_OLD 12:2
|
|
||||||
#define NVC96F_DMA_METHOD_ADDRESS 11:0
|
|
||||||
#define NVC96F_DMA_SUBDEVICE_MASK 15:4
|
|
||||||
#define NVC96F_DMA_METHOD_SUBCHANNEL 15:13
|
|
||||||
#define NVC96F_DMA_TERT_OP 17:16
|
|
||||||
#define NVC96F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000)
|
|
||||||
#define NVC96F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001)
|
|
||||||
#define NVC96F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002)
|
|
||||||
#define NVC96F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003)
|
|
||||||
#define NVC96F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000)
|
|
||||||
#define NVC96F_DMA_METHOD_COUNT_OLD 28:18
|
|
||||||
#define NVC96F_DMA_METHOD_COUNT 28:16
|
|
||||||
#define NVC96F_DMA_IMMD_DATA 28:16
|
|
||||||
#define NVC96F_DMA_SEC_OP 31:29
|
|
||||||
#define NVC96F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000)
|
|
||||||
#define NVC96F_DMA_SEC_OP_INC_METHOD (0x00000001)
|
|
||||||
#define NVC96F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002)
|
|
||||||
#define NVC96F_DMA_SEC_OP_NON_INC_METHOD (0x00000003)
|
|
||||||
#define NVC96F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004)
|
|
||||||
#define NVC96F_DMA_SEC_OP_ONE_INC (0x00000005)
|
|
||||||
#define NVC96F_DMA_SEC_OP_RESERVED6 (0x00000006)
|
|
||||||
#define NVC96F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007)
|
|
||||||
/* dma incrementing method format */
|
|
||||||
#define NVC96F_DMA_INCR_ADDRESS 11:0
|
|
||||||
#define NVC96F_DMA_INCR_SUBCHANNEL 15:13
|
|
||||||
#define NVC96F_DMA_INCR_COUNT 28:16
|
|
||||||
#define NVC96F_DMA_INCR_OPCODE 31:29
|
|
||||||
#define NVC96F_DMA_INCR_OPCODE_VALUE (0x00000001)
|
|
||||||
#define NVC96F_DMA_INCR_DATA 31:0
|
|
||||||
/* dma non-incrementing method format */
|
|
||||||
#define NVC96F_DMA_NONINCR_ADDRESS 11:0
|
|
||||||
#define NVC96F_DMA_NONINCR_SUBCHANNEL 15:13
|
|
||||||
#define NVC96F_DMA_NONINCR_COUNT 28:16
|
|
||||||
#define NVC96F_DMA_NONINCR_OPCODE 31:29
|
|
||||||
#define NVC96F_DMA_NONINCR_OPCODE_VALUE (0x00000003)
|
|
||||||
#define NVC96F_DMA_NONINCR_DATA 31:0
|
|
||||||
/* dma increment-once method format */
|
|
||||||
#define NVC96F_DMA_ONEINCR_ADDRESS 11:0
|
|
||||||
#define NVC96F_DMA_ONEINCR_SUBCHANNEL 15:13
|
|
||||||
#define NVC96F_DMA_ONEINCR_COUNT 28:16
|
|
||||||
#define NVC96F_DMA_ONEINCR_OPCODE 31:29
|
|
||||||
#define NVC96F_DMA_ONEINCR_OPCODE_VALUE (0x00000005)
|
|
||||||
#define NVC96F_DMA_ONEINCR_DATA 31:0
|
|
||||||
/* dma no-operation format */
|
|
||||||
#define NVC96F_DMA_NOP (0x00000000)
|
|
||||||
/* dma immediate-data format */
|
|
||||||
#define NVC96F_DMA_IMMD_ADDRESS 11:0
|
|
||||||
#define NVC96F_DMA_IMMD_SUBCHANNEL 15:13
|
|
||||||
#define NVC96F_DMA_IMMD_DATA 28:16
|
|
||||||
#define NVC96F_DMA_IMMD_OPCODE 31:29
|
|
||||||
#define NVC96F_DMA_IMMD_OPCODE_VALUE (0x00000004)
|
|
||||||
/* dma set sub-device mask format */
|
|
||||||
#define NVC96F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4
|
|
||||||
#define NVC96F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16
|
|
||||||
#define NVC96F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001)
|
|
||||||
/* dma store sub-device mask format */
|
|
||||||
#define NVC96F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4
|
|
||||||
#define NVC96F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16
|
|
||||||
#define NVC96F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002)
|
|
||||||
/* dma use sub-device mask format */
|
|
||||||
#define NVC96F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16
|
|
||||||
#define NVC96F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003)
|
|
||||||
/* dma end-segment format */
|
|
||||||
#define NVC96F_DMA_ENDSEG_OPCODE 31:29
|
|
||||||
#define NVC96F_DMA_ENDSEG_OPCODE_VALUE (0x00000007)
|
|
||||||
/* dma legacy incrementing/non-incrementing formats */
|
|
||||||
#define NVC96F_DMA_ADDRESS 12:2
|
|
||||||
#define NVC96F_DMA_SUBCH 15:13
|
|
||||||
#define NVC96F_DMA_OPCODE3 17:16
|
|
||||||
#define NVC96F_DMA_OPCODE3_NONE (0x00000000)
|
|
||||||
#define NVC96F_DMA_COUNT 28:18
|
|
||||||
#define NVC96F_DMA_OPCODE 31:29
|
|
||||||
#define NVC96F_DMA_OPCODE_METHOD (0x00000000)
|
|
||||||
#define NVC96F_DMA_OPCODE_NONINC_METHOD (0x00000002)
|
|
||||||
#define NVC96F_DMA_DATA 31:0
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}; /* extern "C" */
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* _clc96f_h_ */
|
|
||||||
@@ -1,460 +0,0 @@
|
|||||||
/*******************************************************************************
|
|
||||||
Copyright (c) 1993-2004 NVIDIA Corporation
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to
|
|
||||||
deal in the Software without restriction, including without limitation the
|
|
||||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
||||||
sell copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
DEALINGS IN THE SOFTWARE.
|
|
||||||
|
|
||||||
*******************************************************************************/
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#include "nvtypes.h"
|
|
||||||
|
|
||||||
#ifndef _clc9b5_h_
|
|
||||||
#define _clc9b5_h_
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define BLACKWELL_DMA_COPY_A (0x0000C9B5)
|
|
||||||
|
|
||||||
typedef volatile struct _clc9b5_tag0 {
|
|
||||||
NvV32 Reserved00[0x40];
|
|
||||||
NvV32 Nop; // 0x00000100 - 0x00000103
|
|
||||||
NvV32 Reserved01[0xF];
|
|
||||||
NvV32 PmTrigger; // 0x00000140 - 0x00000143
|
|
||||||
NvV32 Reserved02[0x36];
|
|
||||||
NvV32 SetMonitoredFenceType; // 0x0000021C - 0x0000021F
|
|
||||||
NvV32 SetMonitoredFenceSignalAddrBaseUpper; // 0x00000220 - 0x00000223
|
|
||||||
NvV32 SetMonitoredFenceSignalAddrBaseLower; // 0x00000224 - 0x00000227
|
|
||||||
NvV32 Reserved03[0x6];
|
|
||||||
NvV32 SetSemaphoreA; // 0x00000240 - 0x00000243
|
|
||||||
NvV32 SetSemaphoreB; // 0x00000244 - 0x00000247
|
|
||||||
NvV32 SetSemaphorePayload; // 0x00000248 - 0x0000024B
|
|
||||||
NvV32 SetSemaphorePayloadUpper; // 0x0000024C - 0x0000024F
|
|
||||||
NvV32 Reserved04[0x1];
|
|
||||||
NvV32 SetRenderEnableA; // 0x00000254 - 0x00000257
|
|
||||||
NvV32 SetRenderEnableB; // 0x00000258 - 0x0000025B
|
|
||||||
NvV32 SetRenderEnableC; // 0x0000025C - 0x0000025F
|
|
||||||
NvV32 SetSrcPhysMode; // 0x00000260 - 0x00000263
|
|
||||||
NvV32 SetDstPhysMode; // 0x00000264 - 0x00000267
|
|
||||||
NvV32 Reserved05[0x26];
|
|
||||||
NvV32 LaunchDma; // 0x00000300 - 0x00000303
|
|
||||||
NvV32 Reserved06[0x3F];
|
|
||||||
NvV32 OffsetInUpper; // 0x00000400 - 0x00000403
|
|
||||||
NvV32 OffsetInLower; // 0x00000404 - 0x00000407
|
|
||||||
NvV32 OffsetOutUpper; // 0x00000408 - 0x0000040B
|
|
||||||
NvV32 OffsetOutLower; // 0x0000040C - 0x0000040F
|
|
||||||
NvV32 PitchIn; // 0x00000410 - 0x00000413
|
|
||||||
NvV32 PitchOut; // 0x00000414 - 0x00000417
|
|
||||||
NvV32 LineLengthIn; // 0x00000418 - 0x0000041B
|
|
||||||
NvV32 LineCount; // 0x0000041C - 0x0000041F
|
|
||||||
NvV32 Reserved07[0x38];
|
|
||||||
NvV32 SetSecureCopyMode; // 0x00000500 - 0x00000503
|
|
||||||
NvV32 SetDecryptIv0; // 0x00000504 - 0x00000507
|
|
||||||
NvV32 SetDecryptIv1; // 0x00000508 - 0x0000050B
|
|
||||||
NvV32 SetDecryptIv2; // 0x0000050C - 0x0000050F
|
|
||||||
NvV32 Reserved_SetAESCounter; // 0x00000510 - 0x00000513
|
|
||||||
NvV32 SetDecryptAuthTagCompareAddrUpper; // 0x00000514 - 0x00000517
|
|
||||||
NvV32 SetDecryptAuthTagCompareAddrLower; // 0x00000518 - 0x0000051B
|
|
||||||
NvV32 Reserved08[0x5];
|
|
||||||
NvV32 SetEncryptAuthTagAddrUpper; // 0x00000530 - 0x00000533
|
|
||||||
NvV32 SetEncryptAuthTagAddrLower; // 0x00000534 - 0x00000537
|
|
||||||
NvV32 SetEncryptIvAddrUpper; // 0x00000538 - 0x0000053B
|
|
||||||
NvV32 SetEncryptIvAddrLower; // 0x0000053C - 0x0000053F
|
|
||||||
NvV32 Reserved09[0x10];
|
|
||||||
NvV32 SetCompressionParameters; // 0x00000580 - 0x00000583
|
|
||||||
NvV32 SetDecompressOutLength; // 0x00000584 - 0x00000587
|
|
||||||
NvV32 SetDecompressOutLengthAddrUpper; // 0x00000588 - 0x0000058B
|
|
||||||
NvV32 SetDecompressOutLengthAddrLower; // 0x0000058C - 0x0000058F
|
|
||||||
NvV32 SetDecompressChecksum; // 0x00000590 - 0x00000593
|
|
||||||
NvV32 Reserved10[0x5A];
|
|
||||||
NvV32 SetMemoryScrubParameters; // 0x000006FC - 0x000006FF
|
|
||||||
NvV32 SetRemapConstA; // 0x00000700 - 0x00000703
|
|
||||||
NvV32 SetRemapConstB; // 0x00000704 - 0x00000707
|
|
||||||
NvV32 SetRemapComponents; // 0x00000708 - 0x0000070B
|
|
||||||
NvV32 SetDstBlockSize; // 0x0000070C - 0x0000070F
|
|
||||||
NvV32 SetDstWidth; // 0x00000710 - 0x00000713
|
|
||||||
NvV32 SetDstHeight; // 0x00000714 - 0x00000717
|
|
||||||
NvV32 SetDstDepth; // 0x00000718 - 0x0000071B
|
|
||||||
NvV32 SetDstLayer; // 0x0000071C - 0x0000071F
|
|
||||||
NvV32 SetDstOrigin; // 0x00000720 - 0x00000723
|
|
||||||
NvV32 Reserved11[0x1];
|
|
||||||
NvV32 SetSrcBlockSize; // 0x00000728 - 0x0000072B
|
|
||||||
NvV32 SetSrcWidth; // 0x0000072C - 0x0000072F
|
|
||||||
NvV32 SetSrcHeight; // 0x00000730 - 0x00000733
|
|
||||||
NvV32 SetSrcDepth; // 0x00000734 - 0x00000737
|
|
||||||
NvV32 SetSrcLayer; // 0x00000738 - 0x0000073B
|
|
||||||
NvV32 SetSrcOrigin; // 0x0000073C - 0x0000073F
|
|
||||||
NvV32 Reserved12[0x1];
|
|
||||||
NvV32 SrcOriginX; // 0x00000744 - 0x00000747
|
|
||||||
NvV32 SrcOriginY; // 0x00000748 - 0x0000074B
|
|
||||||
NvV32 DstOriginX; // 0x0000074C - 0x0000074F
|
|
||||||
NvV32 DstOriginY; // 0x00000750 - 0x00000753
|
|
||||||
NvV32 Reserved13[0x270];
|
|
||||||
NvV32 PmTriggerEnd; // 0x00001114 - 0x00001117
|
|
||||||
NvV32 Reserved14[0x3BA];
|
|
||||||
} blackwell_dma_copy_aControlPio;
|
|
||||||
|
|
||||||
#define NVC9B5_NOP (0x00000100)
|
|
||||||
#define NVC9B5_NOP_PARAMETER 31:0
|
|
||||||
#define NVC9B5_PM_TRIGGER (0x00000140)
|
|
||||||
#define NVC9B5_PM_TRIGGER_V 31:0
|
|
||||||
#define NVC9B5_SET_MONITORED_FENCE_TYPE (0x0000021C)
|
|
||||||
#define NVC9B5_SET_MONITORED_FENCE_TYPE_TYPE 0:0
|
|
||||||
#define NVC9B5_SET_MONITORED_FENCE_TYPE_TYPE_MONITORED_FENCE (0x00000000)
|
|
||||||
#define NVC9B5_SET_MONITORED_FENCE_TYPE_TYPE_MONITORED_FENCE_EXT (0x00000001)
|
|
||||||
#define NVC9B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_UPPER (0x00000220)
|
|
||||||
#define NVC9B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_UPPER_UPPER 24:0
|
|
||||||
#define NVC9B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_LOWER (0x00000224)
|
|
||||||
#define NVC9B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_LOWER_LOWER 31:0
|
|
||||||
#define NVC9B5_SET_SEMAPHORE_A (0x00000240)
|
|
||||||
#define NVC9B5_SET_SEMAPHORE_A_UPPER 24:0
|
|
||||||
#define NVC9B5_SET_SEMAPHORE_B (0x00000244)
|
|
||||||
#define NVC9B5_SET_SEMAPHORE_B_LOWER 31:0
|
|
||||||
#define NVC9B5_SET_SEMAPHORE_PAYLOAD (0x00000248)
|
|
||||||
#define NVC9B5_SET_SEMAPHORE_PAYLOAD_PAYLOAD 31:0
|
|
||||||
#define NVC9B5_SET_SEMAPHORE_PAYLOAD_UPPER (0x0000024C)
|
|
||||||
#define NVC9B5_SET_SEMAPHORE_PAYLOAD_UPPER_PAYLOAD 31:0
|
|
||||||
#define NVC9B5_SET_RENDER_ENABLE_A (0x00000254)
|
|
||||||
#define NVC9B5_SET_RENDER_ENABLE_A_UPPER 24:0
|
|
||||||
#define NVC9B5_SET_RENDER_ENABLE_B (0x00000258)
|
|
||||||
#define NVC9B5_SET_RENDER_ENABLE_B_LOWER 31:0
|
|
||||||
#define NVC9B5_SET_RENDER_ENABLE_C (0x0000025C)
|
|
||||||
#define NVC9B5_SET_RENDER_ENABLE_C_MODE 2:0
|
|
||||||
#define NVC9B5_SET_RENDER_ENABLE_C_MODE_FALSE (0x00000000)
|
|
||||||
#define NVC9B5_SET_RENDER_ENABLE_C_MODE_TRUE (0x00000001)
|
|
||||||
#define NVC9B5_SET_RENDER_ENABLE_C_MODE_CONDITIONAL (0x00000002)
|
|
||||||
#define NVC9B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL (0x00000003)
|
|
||||||
#define NVC9B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL (0x00000004)
|
|
||||||
#define NVC9B5_SET_SRC_PHYS_MODE (0x00000260)
|
|
||||||
#define NVC9B5_SET_SRC_PHYS_MODE_TARGET 1:0
|
|
||||||
#define NVC9B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000)
|
|
||||||
#define NVC9B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001)
|
|
||||||
#define NVC9B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002)
|
|
||||||
#define NVC9B5_SET_SRC_PHYS_MODE_TARGET_PEERMEM (0x00000003)
|
|
||||||
#define NVC9B5_SET_SRC_PHYS_MODE_BASIC_KIND 5:2
|
|
||||||
#define NVC9B5_SET_SRC_PHYS_MODE_PEER_ID 8:6
|
|
||||||
#define NVC9B5_SET_SRC_PHYS_MODE_FLA 9:9
|
|
||||||
#define NVC9B5_SET_DST_PHYS_MODE (0x00000264)
|
|
||||||
#define NVC9B5_SET_DST_PHYS_MODE_TARGET 1:0
|
|
||||||
#define NVC9B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000)
|
|
||||||
#define NVC9B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001)
|
|
||||||
#define NVC9B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002)
|
|
||||||
#define NVC9B5_SET_DST_PHYS_MODE_TARGET_PEERMEM (0x00000003)
|
|
||||||
#define NVC9B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2
|
|
||||||
#define NVC9B5_SET_DST_PHYS_MODE_PEER_ID 8:6
|
|
||||||
#define NVC9B5_SET_DST_PHYS_MODE_FLA 9:9
|
|
||||||
#define NVC9B5_LAUNCH_DMA (0x00000300)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0
|
|
||||||
#define NVC9B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_FLUSH_ENABLE 2:2
|
|
||||||
#define NVC9B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_FLUSH_TYPE 25:25
|
|
||||||
#define NVC9B5_LAUNCH_DMA_FLUSH_TYPE_SYS (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_FLUSH_TYPE_GL (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_SEMAPHORE_NO_TIMESTAMP (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_SEMAPHORE_WITH_TIMESTAMP (0x00000002)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_CONDITIONAL_INTR_SEMAPHORE (0x00000003)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5
|
|
||||||
#define NVC9B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8
|
|
||||||
#define NVC9B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9
|
|
||||||
#define NVC9B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_REMAP_ENABLE 10:10
|
|
||||||
#define NVC9B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_COMPRESSION_ENABLE 11:11
|
|
||||||
#define NVC9B5_LAUNCH_DMA_COMPRESSION_ENABLE_FALSE (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_COMPRESSION_ENABLE_TRUE (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SRC_TYPE 12:12
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_DST_TYPE 13:13
|
|
||||||
#define NVC9B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDA (0x00000008)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDB (0x00000009)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMIN (0x0000000B)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMAX (0x0000000C)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDC (0x0000000D)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDD (0x0000000E)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDE (0x0000000F)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_COPY_TYPE 21:20
|
|
||||||
#define NVC9B5_LAUNCH_DMA_COPY_TYPE_PROT2PROT (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_COPY_TYPE_DEFAULT (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_COPY_TYPE_SECURE (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_COPY_TYPE_NONPROT2NONPROT (0x00000002)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_COPY_TYPE_RESERVED (0x00000003)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_VPRMODE 22:22
|
|
||||||
#define NVC9B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_MEMORY_SCRUB_ENABLE 23:23
|
|
||||||
#define NVC9B5_LAUNCH_DMA_MEMORY_SCRUB_ENABLE_FALSE (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_MEMORY_SCRUB_ENABLE_TRUE (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24
|
|
||||||
#define NVC9B5_LAUNCH_DMA_DISABLE_PLC 26:26
|
|
||||||
#define NVC9B5_LAUNCH_DMA_DISABLE_PLC_FALSE (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_DISABLE_PLC_TRUE (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_PAYLOAD_SIZE 27:27
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_PAYLOAD_SIZE_ONE_WORD (0x00000000)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_SEMAPHORE_PAYLOAD_SIZE_TWO_WORD (0x00000001)
|
|
||||||
#define NVC9B5_LAUNCH_DMA_RESERVED_ERR_CODE 31:28
|
|
||||||
#define NVC9B5_OFFSET_IN_UPPER (0x00000400)
|
|
||||||
#define NVC9B5_OFFSET_IN_UPPER_UPPER 24:0
|
|
||||||
#define NVC9B5_OFFSET_IN_LOWER (0x00000404)
|
|
||||||
#define NVC9B5_OFFSET_IN_LOWER_VALUE 31:0
|
|
||||||
#define NVC9B5_OFFSET_OUT_UPPER (0x00000408)
|
|
||||||
#define NVC9B5_OFFSET_OUT_UPPER_UPPER 24:0
|
|
||||||
#define NVC9B5_OFFSET_OUT_LOWER (0x0000040C)
|
|
||||||
#define NVC9B5_OFFSET_OUT_LOWER_VALUE 31:0
|
|
||||||
#define NVC9B5_PITCH_IN (0x00000410)
|
|
||||||
#define NVC9B5_PITCH_IN_VALUE 31:0
|
|
||||||
#define NVC9B5_PITCH_OUT (0x00000414)
|
|
||||||
#define NVC9B5_PITCH_OUT_VALUE 31:0
|
|
||||||
#define NVC9B5_LINE_LENGTH_IN (0x00000418)
|
|
||||||
#define NVC9B5_LINE_LENGTH_IN_VALUE 31:0
|
|
||||||
#define NVC9B5_LINE_COUNT (0x0000041C)
|
|
||||||
#define NVC9B5_LINE_COUNT_VALUE 31:0
|
|
||||||
#define NVC9B5_SET_SECURE_COPY_MODE (0x00000500)
|
|
||||||
#define NVC9B5_SET_SECURE_COPY_MODE_MODE 0:0
|
|
||||||
#define NVC9B5_SET_SECURE_COPY_MODE_MODE_ENCRYPT (0x00000000)
|
|
||||||
#define NVC9B5_SET_SECURE_COPY_MODE_MODE_DECRYPT (0x00000001)
|
|
||||||
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_SRC_TARGET 20:19
|
|
||||||
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_SRC_TARGET_LOCAL_FB (0x00000000)
|
|
||||||
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_SRC_TARGET_COHERENT_SYSMEM (0x00000001)
|
|
||||||
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_SRC_TARGET_NONCOHERENT_SYSMEM (0x00000002)
|
|
||||||
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_SRC_TARGET_PEERMEM (0x00000003)
|
|
||||||
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_SRC_PEER_ID 23:21
|
|
||||||
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_SRC_FLA 24:24
|
|
||||||
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_DST_TARGET 26:25
|
|
||||||
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_DST_TARGET_LOCAL_FB (0x00000000)
|
|
||||||
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_DST_TARGET_COHERENT_SYSMEM (0x00000001)
|
|
||||||
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_DST_TARGET_NONCOHERENT_SYSMEM (0x00000002)
|
|
||||||
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_DST_TARGET_PEERMEM (0x00000003)
|
|
||||||
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_DST_PEER_ID 29:27
|
|
||||||
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_DST_FLA 30:30
|
|
||||||
#define NVC9B5_SET_SECURE_COPY_MODE_RESERVED_END_OF_COPY 31:31
|
|
||||||
#define NVC9B5_SET_DECRYPT_IV0 (0x00000504)
|
|
||||||
#define NVC9B5_SET_DECRYPT_IV0_VALUE 31:0
|
|
||||||
#define NVC9B5_SET_DECRYPT_IV1 (0x00000508)
|
|
||||||
#define NVC9B5_SET_DECRYPT_IV1_VALUE 31:0
|
|
||||||
#define NVC9B5_SET_DECRYPT_IV2 (0x0000050C)
|
|
||||||
#define NVC9B5_SET_DECRYPT_IV2_VALUE 31:0
|
|
||||||
#define NVC9B5_RESERVED_SET_AESCOUNTER (0x00000510)
|
|
||||||
#define NVC9B5_RESERVED_SET_AESCOUNTER_VALUE 31:0
|
|
||||||
#define NVC9B5_SET_DECRYPT_AUTH_TAG_COMPARE_ADDR_UPPER (0x00000514)
|
|
||||||
#define NVC9B5_SET_DECRYPT_AUTH_TAG_COMPARE_ADDR_UPPER_UPPER 24:0
|
|
||||||
#define NVC9B5_SET_DECRYPT_AUTH_TAG_COMPARE_ADDR_LOWER (0x00000518)
|
|
||||||
#define NVC9B5_SET_DECRYPT_AUTH_TAG_COMPARE_ADDR_LOWER_LOWER 31:0
|
|
||||||
#define NVC9B5_SET_ENCRYPT_AUTH_TAG_ADDR_UPPER (0x00000530)
|
|
||||||
#define NVC9B5_SET_ENCRYPT_AUTH_TAG_ADDR_UPPER_UPPER 24:0
|
|
||||||
#define NVC9B5_SET_ENCRYPT_AUTH_TAG_ADDR_LOWER (0x00000534)
|
|
||||||
#define NVC9B5_SET_ENCRYPT_AUTH_TAG_ADDR_LOWER_LOWER 31:0
|
|
||||||
#define NVC9B5_SET_ENCRYPT_IV_ADDR_UPPER (0x00000538)
|
|
||||||
#define NVC9B5_SET_ENCRYPT_IV_ADDR_UPPER_UPPER 24:0
|
|
||||||
#define NVC9B5_SET_ENCRYPT_IV_ADDR_LOWER (0x0000053C)
|
|
||||||
#define NVC9B5_SET_ENCRYPT_IV_ADDR_LOWER_LOWER 31:0
|
|
||||||
#define NVC9B5_SET_COMPRESSION_PARAMETERS (0x00000580)
|
|
||||||
#define NVC9B5_SET_COMPRESSION_PARAMETERS_OPERATION 0:0
|
|
||||||
#define NVC9B5_SET_COMPRESSION_PARAMETERS_OPERATION_DECOMPRESS (0x00000000)
|
|
||||||
#define NVC9B5_SET_COMPRESSION_PARAMETERS_OPERATION_COMPRESS (0x00000001)
|
|
||||||
#define NVC9B5_SET_COMPRESSION_PARAMETERS_ALGO 3:1
|
|
||||||
#define NVC9B5_SET_COMPRESSION_PARAMETERS_ALGO_SNAPPY (0x00000000)
|
|
||||||
#define NVC9B5_SET_COMPRESSION_PARAMETERS_ALGO_LZ4_DATA_ONLY (0x00000001)
|
|
||||||
#define NVC9B5_SET_COMPRESSION_PARAMETERS_ALGO_LZ4_BLOCK (0x00000002)
|
|
||||||
#define NVC9B5_SET_COMPRESSION_PARAMETERS_ALGO_LZ4_BLOCK_CHECKSUM (0x00000003)
|
|
||||||
#define NVC9B5_SET_COMPRESSION_PARAMETERS_ALGO_DEFLATE (0x00000004)
|
|
||||||
#define NVC9B5_SET_COMPRESSION_PARAMETERS_ALGO_SNAPPY_WITH_LONG_FETCH (0x00000005)
|
|
||||||
#define NVC9B5_SET_COMPRESSION_PARAMETERS_CHECK_SUM 29:28
|
|
||||||
#define NVC9B5_SET_COMPRESSION_PARAMETERS_CHECK_SUM_NONE (0x00000000)
|
|
||||||
#define NVC9B5_SET_COMPRESSION_PARAMETERS_CHECK_SUM_ADLER32 (0x00000001)
|
|
||||||
#define NVC9B5_SET_COMPRESSION_PARAMETERS_CHECK_SUM_CRC32 (0x00000002)
|
|
||||||
#define NVC9B5_SET_COMPRESSION_PARAMETERS_CHECK_SUM_SNAPPY_CRC (0x00000003)
|
|
||||||
#define NVC9B5_SET_DECOMPRESS_OUT_LENGTH (0x00000584)
|
|
||||||
#define NVC9B5_SET_DECOMPRESS_OUT_LENGTH_V 31:0
|
|
||||||
#define NVC9B5_SET_DECOMPRESS_OUT_LENGTH_ADDR_UPPER (0x00000588)
|
|
||||||
#define NVC9B5_SET_DECOMPRESS_OUT_LENGTH_ADDR_UPPER_UPPER 24:0
|
|
||||||
#define NVC9B5_SET_DECOMPRESS_OUT_LENGTH_ADDR_LOWER (0x0000058C)
|
|
||||||
#define NVC9B5_SET_DECOMPRESS_OUT_LENGTH_ADDR_LOWER_LOWER 31:0
|
|
||||||
#define NVC9B5_SET_DECOMPRESS_CHECKSUM (0x00000590)
|
|
||||||
#define NVC9B5_SET_DECOMPRESS_CHECKSUM_V 31:0
|
|
||||||
#define NVC9B5_SET_MEMORY_SCRUB_PARAMETERS (0x000006FC)
|
|
||||||
#define NVC9B5_SET_MEMORY_SCRUB_PARAMETERS_DISCARDABLE 0:0
|
|
||||||
#define NVC9B5_SET_MEMORY_SCRUB_PARAMETERS_DISCARDABLE_FALSE (0x00000000)
|
|
||||||
#define NVC9B5_SET_MEMORY_SCRUB_PARAMETERS_DISCARDABLE_TRUE (0x00000001)
|
|
||||||
#define NVC9B5_SET_REMAP_CONST_A (0x00000700)
|
|
||||||
#define NVC9B5_SET_REMAP_CONST_A_V 31:0
|
|
||||||
#define NVC9B5_SET_REMAP_CONST_B (0x00000704)
|
|
||||||
#define NVC9B5_SET_REMAP_CONST_B_V 31:0
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS (0x00000708)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_X 2:0
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Y 6:4
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Z 10:8
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_W 14:12
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002)
|
|
||||||
#define NVC9B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003)
|
|
||||||
#define NVC9B5_SET_DST_BLOCK_SIZE (0x0000070C)
|
|
||||||
#define NVC9B5_SET_DST_BLOCK_SIZE_WIDTH 3:0
|
|
||||||
#define NVC9B5_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000)
|
|
||||||
#define NVC9B5_SET_DST_BLOCK_SIZE_HEIGHT 7:4
|
|
||||||
#define NVC9B5_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000)
|
|
||||||
#define NVC9B5_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001)
|
|
||||||
#define NVC9B5_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002)
|
|
||||||
#define NVC9B5_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003)
|
|
||||||
#define NVC9B5_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004)
|
|
||||||
#define NVC9B5_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005)
|
|
||||||
#define NVC9B5_SET_DST_BLOCK_SIZE_DEPTH 11:8
|
|
||||||
#define NVC9B5_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000)
|
|
||||||
#define NVC9B5_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001)
|
|
||||||
#define NVC9B5_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002)
|
|
||||||
#define NVC9B5_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003)
|
|
||||||
#define NVC9B5_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004)
|
|
||||||
#define NVC9B5_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005)
|
|
||||||
#define NVC9B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT 15:12
|
|
||||||
#define NVC9B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001)
|
|
||||||
#define NVC9B5_SET_DST_WIDTH (0x00000710)
|
|
||||||
#define NVC9B5_SET_DST_WIDTH_V 31:0
|
|
||||||
#define NVC9B5_SET_DST_HEIGHT (0x00000714)
|
|
||||||
#define NVC9B5_SET_DST_HEIGHT_V 31:0
|
|
||||||
#define NVC9B5_SET_DST_DEPTH (0x00000718)
|
|
||||||
#define NVC9B5_SET_DST_DEPTH_V 31:0
|
|
||||||
#define NVC9B5_SET_DST_LAYER (0x0000071C)
|
|
||||||
#define NVC9B5_SET_DST_LAYER_V 31:0
|
|
||||||
#define NVC9B5_SET_DST_ORIGIN (0x00000720)
|
|
||||||
#define NVC9B5_SET_DST_ORIGIN_X 15:0
|
|
||||||
#define NVC9B5_SET_DST_ORIGIN_Y 31:16
|
|
||||||
#define NVC9B5_SET_SRC_BLOCK_SIZE (0x00000728)
|
|
||||||
#define NVC9B5_SET_SRC_BLOCK_SIZE_WIDTH 3:0
|
|
||||||
#define NVC9B5_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000)
|
|
||||||
#define NVC9B5_SET_SRC_BLOCK_SIZE_HEIGHT 7:4
|
|
||||||
#define NVC9B5_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000)
|
|
||||||
#define NVC9B5_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001)
|
|
||||||
#define NVC9B5_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002)
|
|
||||||
#define NVC9B5_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003)
|
|
||||||
#define NVC9B5_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004)
|
|
||||||
#define NVC9B5_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005)
|
|
||||||
#define NVC9B5_SET_SRC_BLOCK_SIZE_DEPTH 11:8
|
|
||||||
#define NVC9B5_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000)
|
|
||||||
#define NVC9B5_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001)
|
|
||||||
#define NVC9B5_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002)
|
|
||||||
#define NVC9B5_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003)
|
|
||||||
#define NVC9B5_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004)
|
|
||||||
#define NVC9B5_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005)
|
|
||||||
#define NVC9B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT 15:12
|
|
||||||
#define NVC9B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001)
|
|
||||||
#define NVC9B5_SET_SRC_WIDTH (0x0000072C)
|
|
||||||
#define NVC9B5_SET_SRC_WIDTH_V 31:0
|
|
||||||
#define NVC9B5_SET_SRC_HEIGHT (0x00000730)
|
|
||||||
#define NVC9B5_SET_SRC_HEIGHT_V 31:0
|
|
||||||
#define NVC9B5_SET_SRC_DEPTH (0x00000734)
|
|
||||||
#define NVC9B5_SET_SRC_DEPTH_V 31:0
|
|
||||||
#define NVC9B5_SET_SRC_LAYER (0x00000738)
|
|
||||||
#define NVC9B5_SET_SRC_LAYER_V 31:0
|
|
||||||
#define NVC9B5_SET_SRC_ORIGIN (0x0000073C)
|
|
||||||
#define NVC9B5_SET_SRC_ORIGIN_X 15:0
|
|
||||||
#define NVC9B5_SET_SRC_ORIGIN_Y 31:16
|
|
||||||
#define NVC9B5_SRC_ORIGIN_X (0x00000744)
|
|
||||||
#define NVC9B5_SRC_ORIGIN_X_VALUE 31:0
|
|
||||||
#define NVC9B5_SRC_ORIGIN_Y (0x00000748)
|
|
||||||
#define NVC9B5_SRC_ORIGIN_Y_VALUE 31:0
|
|
||||||
#define NVC9B5_DST_ORIGIN_X (0x0000074C)
|
|
||||||
#define NVC9B5_DST_ORIGIN_X_VALUE 31:0
|
|
||||||
#define NVC9B5_DST_ORIGIN_Y (0x00000750)
|
|
||||||
#define NVC9B5_DST_ORIGIN_Y_VALUE 31:0
|
|
||||||
#define NVC9B5_PM_TRIGGER_END (0x00001114)
|
|
||||||
#define NVC9B5_PM_TRIGGER_END_V 31:0
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}; /* extern "C" */
|
|
||||||
#endif
|
|
||||||
#endif // _clc9b5_h
|
|
||||||
|
|
||||||
@@ -1,74 +0,0 @@
|
|||||||
/*
|
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2003-2023 NVIDIA CORPORATION & AFFILIATES
|
|
||||||
* SPDX-License-Identifier: MIT
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef __gb202_clca6f_h__
|
|
||||||
#define __gb202_clca6f_h__
|
|
||||||
|
|
||||||
typedef volatile struct Nvca6fControl_struct {
|
|
||||||
NvU32 Ignored00[0x23]; /* 0000-008b*/
|
|
||||||
NvU32 GPPut; /* GP FIFO put offset 008c-008f*/
|
|
||||||
NvU32 Ignored01[0x5c];
|
|
||||||
} Nvca6fControl, BlackwellBControlGPFifo;
|
|
||||||
|
|
||||||
#define BLACKWELL_CHANNEL_GPFIFO_B (0x0000CA6F)
|
|
||||||
|
|
||||||
#define NVCA6F_SET_OBJECT (0x00000000)
|
|
||||||
#define NVCA6F_SEM_ADDR_LO (0x0000005c)
|
|
||||||
#define NVCA6F_SEM_ADDR_LO_OFFSET 31:2
|
|
||||||
#define NVCA6F_SEM_ADDR_HI (0x00000060)
|
|
||||||
#define NVCA6F_SEM_ADDR_HI_OFFSET 24:0
|
|
||||||
#define NVCA6F_SEM_PAYLOAD_LO (0x00000064)
|
|
||||||
#define NVCA6F_SEM_PAYLOAD_HI (0x00000068)
|
|
||||||
#define NVCA6F_SEM_EXECUTE (0x0000006c)
|
|
||||||
#define NVCA6F_SEM_EXECUTE_OPERATION 2:0
|
|
||||||
#define NVCA6F_SEM_EXECUTE_OPERATION_ACQUIRE 0x00000000
|
|
||||||
#define NVCA6F_SEM_EXECUTE_OPERATION_RELEASE 0x00000001
|
|
||||||
#define NVCA6F_SEM_EXECUTE_RELEASE_WFI 20:20
|
|
||||||
#define NVCA6F_SEM_EXECUTE_RELEASE_WFI_DIS 0x00000000
|
|
||||||
#define NVCA6F_SEM_EXECUTE_PAYLOAD_SIZE 24:24
|
|
||||||
#define NVCA6F_SEM_EXECUTE_PAYLOAD_SIZE_32BIT 0x00000000
|
|
||||||
|
|
||||||
/* GPFIFO entry format */
|
|
||||||
#define NVCA6F_GP_ENTRY__SIZE 8
|
|
||||||
#define NVCA6F_GP_ENTRY0_FETCH 0:0
|
|
||||||
#define NVCA6F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000
|
|
||||||
#define NVCA6F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001
|
|
||||||
#define NVCA6F_GP_ENTRY0_GET 31:2
|
|
||||||
#define NVCA6F_GP_ENTRY0_OPERAND 31:0
|
|
||||||
#define NVCA6F_GP_ENTRY0_PB_EXTENDED_BASE_OPERAND 24:8
|
|
||||||
#define NVCA6F_GP_ENTRY1_GET_HI 7:0
|
|
||||||
#define NVCA6F_GP_ENTRY1_LEVEL 9:9
|
|
||||||
#define NVCA6F_GP_ENTRY1_LEVEL_MAIN 0x00000000
|
|
||||||
#define NVCA6F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001
|
|
||||||
#define NVCA6F_GP_ENTRY1_LENGTH 30:10
|
|
||||||
#define NVCA6F_GP_ENTRY1_SYNC 31:31
|
|
||||||
#define NVCA6F_GP_ENTRY1_SYNC_PROCEED 0x00000000
|
|
||||||
#define NVCA6F_GP_ENTRY1_SYNC_WAIT 0x00000001
|
|
||||||
#define NVCA6F_GP_ENTRY1_OPCODE 7:0
|
|
||||||
#define NVCA6F_GP_ENTRY1_OPCODE_NOP 0x00000000
|
|
||||||
#define NVCA6F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001
|
|
||||||
#define NVCA6F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002
|
|
||||||
#define NVCA6F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003
|
|
||||||
#define NVCA6F_GP_ENTRY1_OPCODE_SET_PB_SEGMENT_EXTENDED_BASE 0x00000004
|
|
||||||
|
|
||||||
#endif // __gb202_clca6f_h__
|
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
/*
|
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
||||||
* SPDX-License-Identifier: MIT
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _clcab5_h_
|
|
||||||
#define _clcab5_h_
|
|
||||||
|
|
||||||
#define BLACKWELL_DMA_COPY_B (0x0000CAB5)
|
|
||||||
|
|
||||||
#define NVCAB5_LAUNCH_DMA (0x00000300)
|
|
||||||
#define NVCAB5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0
|
|
||||||
#define NVCAB5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000)
|
|
||||||
#define NVCAB5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001)
|
|
||||||
#define NVCAB5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002)
|
|
||||||
#define NVCAB5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PREFETCH (0x00000003)
|
|
||||||
|
|
||||||
#define NVCAB5_REQ_ATTR (0x00000754)
|
|
||||||
#define NVCAB5_REQ_ATTR_PREFETCH_L2_CLASS 1:0
|
|
||||||
#define NVCAB5_REQ_ATTR_PREFETCH_L2_CLASS_EVICT_FIRST (0x00000000)
|
|
||||||
#define NVCAB5_REQ_ATTR_PREFETCH_L2_CLASS_EVICT_NORMAL (0x00000001)
|
|
||||||
#define NVCAB5_REQ_ATTR_PREFETCH_L2_CLASS_EVICT_LAST (0x00000002)
|
|
||||||
#define NVCAB5_REQ_ATTR_PREFETCH_L2_CLASS_EVICT_DEMOTE (0x00000003)
|
|
||||||
|
|
||||||
#endif /* _clcab5_h_ */
|
|
||||||
|
|
||||||
@@ -1,25 +1,25 @@
|
|||||||
/*
|
/*******************************************************************************
|
||||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
Copyright (c) 2021-2022 NVIDIA Corporation
|
||||||
* SPDX-License-Identifier: MIT
|
|
||||||
*
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
of this software and associated documentation files (the "Software"), to
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
deal in the Software without restriction, including without limitation the
|
||||||
* to deal in the Software without restriction, including without limitation
|
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
sell copies of the Software, and to permit persons to whom the Software is
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
furnished to do so, subject to the following conditions:
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
The above copyright notice and this permission notice shall be
|
||||||
* The above copyright notice and this permission notice shall be included in
|
included in all copies or substantial portions of the Software.
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
DEALINGS IN THE SOFTWARE.
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
*/
|
*******************************************************************************/
|
||||||
|
|
||||||
#include "nvtypes.h"
|
#include "nvtypes.h"
|
||||||
|
|
||||||
@@ -32,28 +32,6 @@ extern "C" {
|
|||||||
|
|
||||||
#define HOPPER_SEC2_WORK_LAUNCH_A (0x0000CBA2)
|
#define HOPPER_SEC2_WORK_LAUNCH_A (0x0000CBA2)
|
||||||
|
|
||||||
typedef volatile struct _clcba2_tag0 {
|
|
||||||
NvV32 Reserved00[0x100];
|
|
||||||
NvV32 DecryptCopySrcAddrHi; // 0x00000400 - 0x00000403
|
|
||||||
NvV32 DecryptCopySrcAddrLo; // 0x00000404 - 0x00000407
|
|
||||||
NvV32 DecryptCopyDstAddrHi; // 0x00000408 - 0x0000040B
|
|
||||||
NvV32 DecryptCopyDstAddrLo; // 0x0000040c - 0x0000040F
|
|
||||||
NvU32 DecryptCopySize; // 0x00000410 - 0x00000413
|
|
||||||
NvU32 DecryptCopyAuthTagAddrHi; // 0x00000414 - 0x00000417
|
|
||||||
NvU32 DecryptCopyAuthTagAddrLo; // 0x00000418 - 0x0000041B
|
|
||||||
NvV32 DigestAddrHi; // 0x0000041C - 0x0000041F
|
|
||||||
NvV32 DigestAddrLo; // 0x00000420 - 0x00000423
|
|
||||||
NvV32 Reserved01[0x7];
|
|
||||||
NvV32 SemaphoreA; // 0x00000440 - 0x00000443
|
|
||||||
NvV32 SemaphoreB; // 0x00000444 - 0x00000447
|
|
||||||
NvV32 SemaphoreSetPayloadLower; // 0x00000448 - 0x0000044B
|
|
||||||
NvV32 SemaphoreSetPayloadUppper; // 0x0000044C - 0x0000044F
|
|
||||||
NvV32 SemaphoreD; // 0x00000450 - 0x00000453
|
|
||||||
NvU32 Reserved02[0x7];
|
|
||||||
NvV32 Execute; // 0x00000470 - 0x00000473
|
|
||||||
NvV32 Reserved03[0x23];
|
|
||||||
} NVCBA2_HOPPER_SEC2_WORK_LAUNCH_AControlPio;
|
|
||||||
|
|
||||||
#define NVCBA2_DECRYPT_COPY_SRC_ADDR_HI (0x00000400)
|
#define NVCBA2_DECRYPT_COPY_SRC_ADDR_HI (0x00000400)
|
||||||
#define NVCBA2_DECRYPT_COPY_SRC_ADDR_HI_DATA 24:0
|
#define NVCBA2_DECRYPT_COPY_SRC_ADDR_HI_DATA 24:0
|
||||||
#define NVCBA2_DECRYPT_COPY_SRC_ADDR_LO (0x00000404)
|
#define NVCBA2_DECRYPT_COPY_SRC_ADDR_LO (0x00000404)
|
||||||
@@ -112,45 +90,6 @@ typedef volatile struct _clcba2_tag0 {
|
|||||||
#define NVCBA2_EXECUTE_TIMESTAMP 5:5
|
#define NVCBA2_EXECUTE_TIMESTAMP 5:5
|
||||||
#define NVCBA2_EXECUTE_TIMESTAMP_DISABLE (0x00000000)
|
#define NVCBA2_EXECUTE_TIMESTAMP_DISABLE (0x00000000)
|
||||||
#define NVCBA2_EXECUTE_TIMESTAMP_ENABLE (0x00000001)
|
#define NVCBA2_EXECUTE_TIMESTAMP_ENABLE (0x00000001)
|
||||||
#define NVCBA2_EXECUTE_PHYSICAL_SCRUBBER 6:6
|
|
||||||
#define NVCBA2_EXECUTE_PHYSICAL_SCRUBBER_DISABLE (0x00000000)
|
|
||||||
#define NVCBA2_EXECUTE_PHYSICAL_SCRUBBER_ENABLE (0x00000001)
|
|
||||||
|
|
||||||
// Class definitions
|
|
||||||
#define NVCBA2_DECRYPT_COPY_SIZE_MAX_BYTES (2*1024*1024)
|
|
||||||
#define NVCBA2_DECRYPT_SCRUB_SIZE_MAX_BYTES (1024*1024*1024)
|
|
||||||
|
|
||||||
// Errors
|
|
||||||
#define NVCBA2_ERROR_NONE (0x00000000)
|
|
||||||
#define NVCBA2_ERROR_DECRYPT_COPY_SRC_ADDR_MISALIGNED_POINTER (0x00000001)
|
|
||||||
#define NVCBA2_ERROR_DECRYPT_COPY_DEST_ADDR_MISALIGNED_POINTER (0x00000002)
|
|
||||||
#define NVCBA2_ERROR_DECRYPT_COPY_AUTH_TAG_ADDR_MISALIGNED_POINTER (0x00000003)
|
|
||||||
#define NVCBA2_ERROR_DECRYPT_COPY_DMA_NACK (0x00000004)
|
|
||||||
#define NVCBA2_ERROR_DECRYPT_COPY_AUTH_TAG_MISMATCH (0x00000005)
|
|
||||||
#define NVCBA2_ERROR_METHOD_STREAM_AUTH_TAG_ADDR_MISALIGNED_POINTER (0x00000006)
|
|
||||||
#define NVCBA2_ERROR_METHOD_STREAM_AUTH_TAG_ADDR_DMA_NACK (0x00000007)
|
|
||||||
#define NVCBA2_ERROR_METHOD_STREAM_AUTH_TAG_CHECK_FAILURE (0x00000008)
|
|
||||||
#define NVCBA2_ERROR_MISALIGNED_SIZE (0x00000009)
|
|
||||||
#define NVCBA2_ERROR_MISSING_METHODS (0x0000000A)
|
|
||||||
#define NVCBA2_ERROR_SEMAPHORE_RELEASE_DMA_NACK (0x0000000B)
|
|
||||||
#define NVCBA2_ERROR_DECRYPT_SIZE_MAX_EXCEEDED (0x0000000C)
|
|
||||||
#define NVCBA2_ERROR_OS_APPLICATION (0x0000000D)
|
|
||||||
#define NVCBA2_ERROR_INVALID_CTXSW_REQUEST (0x0000000E)
|
|
||||||
#define NVCBA2_ERROR_BUFFER_OVERFLOW (0x0000000F)
|
|
||||||
#define NVCBA2_ERROR_IV_OVERFLOW (0x00000010)
|
|
||||||
#define NVCBA2_ERROR_INTERNAL_SETUP_FAILURE (0x00000011)
|
|
||||||
#define NVCBA2_ERROR_DECRYPT_COPY_INTERNAL_DMA_FAILURE (0x00000012)
|
|
||||||
#define NVCBA2_ERROR_METHOD_STREAM_AUTH_TAG_ADDR_INTERNAL_DMA_FAILURE (0x00000013)
|
|
||||||
#define NVCBA2_ERROR_METHOD_STREAM_AUTH_TAG_HMAC_CALC_FAILURE (0x00000014)
|
|
||||||
#define NVCBA2_ERROR_NONCE_OVERFLOW (0x00000015)
|
|
||||||
#define NVCBA2_ERROR_AES_GCM_DECRYPTION_FAILURE (0x00000016)
|
|
||||||
#define NVCBA2_ERROR_SEMAPHORE_RELEASE_INTERNAL_DMA_FAILURE (0x00000017)
|
|
||||||
#define NVCBA2_ERROR_KEY_DERIVATION_FAILURE (0x00000018)
|
|
||||||
#define NVCBA2_ERROR_SCRUBBER_FAILURE (0x00000019)
|
|
||||||
#define NVCBA2_ERROR_SCRUBBER_INVALD_ADDRESS (0x0000001a)
|
|
||||||
#define NVCBA2_ERROR_SCRUBBER_INSUFFICIENT_PERMISSIONS (0x0000001b)
|
|
||||||
#define NVCBA2_ERROR_SCRUBBER_MUTEX_ACQUIRE_FAILURE (0x0000001c)
|
|
||||||
#define NVCBA2_ERROR_SCRUB_SIZE_MAX_EXCEEDED (0x0000001d)
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}; /* extern "C" */
|
}; /* extern "C" */
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
Copyright (c) 2013-2024 NVIDIA Corporation
|
Copyright (c) 2013-2022 NVIDIA Corporation
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to
|
of this software and associated documentation files (the "Software"), to
|
||||||
@@ -34,8 +34,6 @@
|
|||||||
#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GA100 (0x00000170)
|
#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GA100 (0x00000170)
|
||||||
#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GH100 (0x00000180)
|
#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GH100 (0x00000180)
|
||||||
#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_AD100 (0x00000190)
|
#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_AD100 (0x00000190)
|
||||||
#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GB100 (0x000001A0)
|
|
||||||
#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GB200 (0x000001B0)
|
|
||||||
|
|
||||||
/* valid ARCHITECTURE_GP10x implementation values */
|
/* valid ARCHITECTURE_GP10x implementation values */
|
||||||
#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GP100 (0x00000000)
|
#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GP100 (0x00000000)
|
||||||
|
|||||||
@@ -1,547 +0,0 @@
|
|||||||
/*******************************************************************************
|
|
||||||
Copyright (c) 2003-2016 NVIDIA Corporation
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to
|
|
||||||
deal in the Software without restriction, including without limitation the
|
|
||||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
||||||
sell copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
DEALINGS IN THE SOFTWARE.
|
|
||||||
|
|
||||||
*******************************************************************************/
|
|
||||||
|
|
||||||
|
|
||||||
#ifndef __gb100_dev_fault_h__
|
|
||||||
#define __gb100_dev_fault_h__
|
|
||||||
/* This file is autogenerated. Do not edit */
|
|
||||||
#define NV_PFAULT /* ----G */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_GRAPHICS 384 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_DISPLAY 1 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_GSP 2 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_IFB 55 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_FLA 4 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1 256 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2 320 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_SEC 6 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_FSP 7 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_PERF 10 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_PERF0 10 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_PERF1 11 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_PERF2 12 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_PERF3 13 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_PERF4 14 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_PERF5 15 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_PERF6 16 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_PERF7 17 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_PERF8 18 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_PERF9 19 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_GSPLITE 20 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVDEC 28 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVDEC0 28 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVDEC1 29 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVDEC2 30 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVDEC3 31 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVDEC4 32 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVDEC5 33 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVDEC6 34 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVDEC7 35 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVJPG0 36 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVJPG1 37 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVJPG2 38 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVJPG3 39 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVJPG4 40 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVJPG5 41 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVJPG6 42 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVJPG7 43 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_GRCOPY 65 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_CE0 65 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_CE1 66 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_CE2 67 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_CE3 68 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_CE4 69 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_CE5 70 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_CE6 71 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_CE7 72 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_CE8 73 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_CE9 74 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_CE10 75 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_CE11 76 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_CE12 77 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_CE13 78 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_CE14 79 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_CE15 80 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_CE16 81 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_CE17 82 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_CE18 83 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_CE19 84 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_PWR_PMU 5 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_PTP 3 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVENC0 44 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVENC1 45 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVENC2 46 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_NVENC3 47 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_OFA0 48 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_PHYSICAL 56 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST0 85 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST1 86 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST2 87 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST3 88 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST4 89 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST5 90 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST6 91 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST7 92 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST8 93 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST9 94 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST10 95 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST11 96 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST12 97 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST13 98 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST14 99 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST15 100 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST16 101 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST17 102 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST18 103 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST19 104 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST20 105 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST21 106 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST22 107 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST23 108 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST24 109 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST25 110 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST26 111 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST27 112 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST28 113 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST29 114 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST30 115 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST31 116 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST32 117 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST33 118 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST34 119 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST35 120 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST36 121 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST37 122 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST38 123 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST39 124 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST40 125 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST41 126 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST42 127 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST43 128 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_HOST44 129 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN0 256 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN1 257 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN2 258 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN3 259 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN4 260 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN5 261 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN6 262 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN7 263 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN8 264 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN9 265 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN10 266 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN11 267 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN12 268 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN13 269 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN14 270 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN15 271 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN16 272 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN17 273 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN18 274 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN19 275 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN20 276 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN21 277 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN22 278 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN23 279 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN24 280 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN25 281 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN26 282 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN27 283 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN28 284 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN29 285 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN30 286 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN31 287 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN32 288 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN33 289 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN34 290 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN35 291 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN36 292 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN37 293 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN38 294 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN39 295 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN40 296 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN41 297 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN42 298 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN43 299 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN44 300 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN45 301 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN46 302 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN47 303 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN48 304 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN49 305 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN50 306 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN51 307 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN52 308 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN53 309 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN54 310 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN55 311 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN56 312 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN57 313 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN58 314 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN59 315 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN60 316 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN61 317 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN62 318 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR1_FN63 319 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN0 320 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN1 321 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN2 322 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN3 323 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN4 324 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN5 325 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN6 326 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN7 327 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN8 328 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN9 329 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN10 330 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN11 331 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN12 332 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN13 333 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN14 334 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN15 335 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN16 336 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN17 337 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN18 338 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN19 339 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN20 340 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN21 341 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN22 342 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN23 343 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN24 344 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN25 345 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN26 346 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN27 347 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN28 348 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN29 349 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN30 350 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN31 351 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN32 352 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN33 353 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN34 354 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN35 355 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN36 356 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN37 357 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN38 358 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN39 359 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN40 360 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN41 361 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN42 362 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN43 363 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN44 364 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN45 365 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN46 366 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN47 367 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN48 368 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN49 369 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN50 370 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN51 371 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN52 372 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN53 373 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN54 374 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN55 375 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN56 376 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN57 377 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN58 378 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN59 379 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN60 380 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN61 381 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN62 382 /* */
|
|
||||||
#define NV_PFAULT_MMU_ENG_ID_BAR2_FN63 383 /* */
|
|
||||||
#define NV_PFAULT_FAULT_TYPE 4:0 /* */
|
|
||||||
#define NV_PFAULT_FAULT_TYPE_PDE 0x00000000 /* */
|
|
||||||
#define NV_PFAULT_FAULT_TYPE_PDE_SIZE 0x00000001 /* */
|
|
||||||
#define NV_PFAULT_FAULT_TYPE_PTE 0x00000002 /* */
|
|
||||||
#define NV_PFAULT_FAULT_TYPE_VA_LIMIT_VIOLATION 0x00000003 /* */
|
|
||||||
#define NV_PFAULT_FAULT_TYPE_UNBOUND_INST_BLOCK 0x00000004 /* */
|
|
||||||
#define NV_PFAULT_FAULT_TYPE_PRIV_VIOLATION 0x00000005 /* */
|
|
||||||
#define NV_PFAULT_FAULT_TYPE_RO_VIOLATION 0x00000006 /* */
|
|
||||||
#define NV_PFAULT_FAULT_TYPE_WO_VIOLATION 0x00000007 /* */
|
|
||||||
#define NV_PFAULT_FAULT_TYPE_PITCH_MASK_VIOLATION 0x00000008 /* */
|
|
||||||
#define NV_PFAULT_FAULT_TYPE_WORK_CREATION 0x00000009 /* */
|
|
||||||
#define NV_PFAULT_FAULT_TYPE_UNSUPPORTED_APERTURE 0x0000000a /* */
|
|
||||||
#define NV_PFAULT_FAULT_TYPE_CC_VIOLATION 0x0000000b /* */
|
|
||||||
#define NV_PFAULT_FAULT_TYPE_UNSUPPORTED_KIND 0x0000000c /* */
|
|
||||||
#define NV_PFAULT_FAULT_TYPE_REGION_VIOLATION 0x0000000d /* */
|
|
||||||
#define NV_PFAULT_FAULT_TYPE_POISONED 0x0000000e /* */
|
|
||||||
#define NV_PFAULT_FAULT_TYPE_ATOMIC_VIOLATION 0x0000000f /* */
|
|
||||||
#define NV_PFAULT_CLIENT 14:8 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_0 0x00000000 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_1 0x00000001 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_2 0x00000002 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_3 0x00000003 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_4 0x00000004 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_5 0x00000005 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_6 0x00000006 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_7 0x00000007 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PE_0 0x00000008 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PE_1 0x00000009 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PE_2 0x0000000A /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PE_3 0x0000000B /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PE_4 0x0000000C /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PE_5 0x0000000D /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PE_6 0x0000000E /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PE_7 0x0000000F /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_RAST 0x00000010 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_GCC 0x00000011 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_GPCCS 0x00000012 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PROP_0 0x00000013 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PROP_1 0x00000014 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PROP_2 0x00000015 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PROP_3 0x00000016 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_8 0x00000021 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_9 0x00000022 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_10 0x00000023 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_11 0x00000024 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_12 0x00000025 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_13 0x00000026 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_14 0x00000027 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_15 0x00000028 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_TPCCS_0 0x00000029 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_TPCCS_1 0x0000002A /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_TPCCS_2 0x0000002B /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_TPCCS_3 0x0000002C /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_TPCCS_4 0x0000002D /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_TPCCS_5 0x0000002E /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_TPCCS_6 0x0000002F /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_TPCCS_7 0x00000030 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PE_8 0x00000031 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PE_9 0x00000032 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_TPCCS_8 0x00000033 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_TPCCS_9 0x00000034 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_16 0x00000035 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_17 0x00000036 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_18 0x00000037 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_19 0x00000038 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PE_10 0x00000039 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PE_11 0x0000003A /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_TPCCS_10 0x0000003B /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_TPCCS_11 0x0000003C /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_20 0x0000003D /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_21 0x0000003E /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_22 0x0000003F /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_23 0x00000040 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PE_12 0x00000041 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PE_13 0x00000042 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_TPCCS_12 0x00000043 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_TPCCS_13 0x00000044 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_24 0x00000045 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_25 0x00000046 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_26 0x00000047 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_27 0x00000048 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PE_14 0x00000049 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PE_15 0x0000004A /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_TPCCS_14 0x0000004B /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_TPCCS_15 0x0000004C /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_28 0x0000004D /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_29 0x0000004E /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_30 0x0000004F /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_31 0x00000050 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PE_16 0x00000051 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PE_17 0x00000052 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_TPCCS_16 0x00000053 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_TPCCS_17 0x00000054 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_32 0x00000055 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_33 0x00000056 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_34 0x00000057 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_35 0x00000058 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PE_18 0x00000059 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_PE_19 0x0000005A /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_TPCCS_18 0x0000005B /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_TPCCS_19 0x0000005C /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_36 0x0000005D /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_37 0x0000005E /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_38 0x0000005F /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_T1_39 0x00000060 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_ROP_0 0x00000070 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_ROP_1 0x00000071 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_ROP_2 0x00000072 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_ROP_3 0x00000073 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_GPC_GPM 0x00000017 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_VIP 0x00000000 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_CE0 0x00000001 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_CE1 0x00000002 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_DNISO 0x00000003 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_DISPNISO 0x00000003 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_FE0 0x00000004 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_FE 0x00000004 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_FECS0 0x00000005 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_FECS 0x00000005 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_HOST 0x00000006 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_HOST_CPU 0x00000007 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_HOST_CPU_NB 0x00000008 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_ISO 0x00000009 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_MMU 0x0000000A /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVDEC0 0x0000000B /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVDEC 0x0000000B /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_CE3 0x0000000C /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVENC1 0x0000000D /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NISO 0x0000000E /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_ACTRS 0x0000000E /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_P2P 0x0000000F /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_PD 0x00000010 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_PD0 0x00000010 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_PERF0 0x00000011 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_PERF 0x00000011 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_PMU 0x00000012 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_RASTERTWOD 0x00000013 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_RASTERTWOD0 0x00000013 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SCC 0x00000014 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SCC0 0x00000014 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SCC_NB 0x00000015 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SCC_NB0 0x00000015 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SEC 0x00000016 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SSYNC 0x00000017 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SSYNC0 0x00000017 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_GRCOPY 0x00000018 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_CE2 0x00000018 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_XV 0x00000019 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_MMU_NB 0x0000001A /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVENC0 0x0000001B /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVENC 0x0000001B /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_DFALCON 0x0000001C /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SKED0 0x0000001D /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SKED 0x0000001D /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_PD1 0x0000001E /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_DONT_CARE 0x0000001F /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_HSCE0 0x00000020 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_HSCE1 0x00000021 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_HSCE2 0x00000022 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_HSCE3 0x00000023 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_HSCE4 0x00000024 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_HSCE5 0x00000025 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_HSCE6 0x00000026 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_HSCE7 0x00000027 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SSYNC1 0x00000028 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SSYNC2 0x00000029 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_HSHUB 0x0000002A /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_PTP_X0 0x0000002B /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_PTP_X1 0x0000002C /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_PTP_X2 0x0000002D /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_PTP_X3 0x0000002E /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_PTP_X4 0x0000002F /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_PTP_X5 0x00000030 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_PTP_X6 0x00000031 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_PTP_X7 0x00000032 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVENC2 0x00000033 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_VPR_SCRUBBER0 0x00000034 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_VPR_SCRUBBER1 0x00000035 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SSYNC3 0x00000036 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_FBFALCON 0x00000037 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_CE_SHIM 0x00000038 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_CE_SHIM0 0x00000038 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_GSP 0x00000039 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVDEC1 0x0000003A /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVDEC2 0x0000003B /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVJPG0 0x0000003C /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVDEC3 0x0000003D /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVDEC4 0x0000003E /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_OFA0 0x0000003F /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SCC1 0x00000040 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SCC_NB1 0x00000041 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SCC2 0x00000042 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SCC_NB2 0x00000043 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SCC3 0x00000044 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SCC_NB3 0x00000045 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_RASTERTWOD1 0x00000046 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_PTP_X8 0x00000046 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_RASTERTWOD2 0x00000047 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_RASTERTWOD3 0x00000048 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_GSPLITE1 0x00000049 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_GSPLITE2 0x0000004A /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_GSPLITE3 0x0000004B /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_PD2 0x0000004C /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_PD3 0x0000004D /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_FE1 0x0000004E /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_FE2 0x0000004F /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_FE3 0x00000050 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_FE4 0x00000051 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_FE5 0x00000052 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_FE6 0x00000053 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_FE7 0x00000054 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_FECS1 0x00000055 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_FECS2 0x00000056 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_FECS3 0x00000057 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_FECS4 0x00000058 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_FECS5 0x00000059 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_FECS6 0x0000005A /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_FECS7 0x0000005B /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SKED1 0x0000005C /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SKED2 0x0000005D /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SKED3 0x0000005E /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SKED4 0x0000005F /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SKED5 0x00000060 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SKED6 0x00000061 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_SKED7 0x00000062 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_ESC 0x00000063 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_ESC0 0x00000063 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_ESC1 0x00000064 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_ESC2 0x00000065 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_ESC3 0x00000066 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_ESC4 0x00000067 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_ESC5 0x00000068 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_ESC6 0x00000069 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_ESC7 0x0000006a /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_ESC8 0x0000006b /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_ESC9 0x0000006c /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_ESC10 0x0000006d /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_ESC11 0x0000006e /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVDEC5 0x0000006F /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVDEC6 0x00000070 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVDEC7 0x00000071 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVJPG1 0x00000072 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVJPG2 0x00000073 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVJPG3 0x00000074 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVJPG4 0x00000075 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVJPG5 0x00000076 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVJPG6 0x00000077 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVJPG7 0x00000078 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_FSP 0x00000079 /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_BSI 0x0000007A /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_GSPLITE 0x0000007B /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_GSPLITE0 0x0000007B /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_VPR_SCRUBBER2 0x0000007C /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_VPR_SCRUBBER3 0x0000007D /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_VPR_SCRUBBER4 0x0000007E /* */
|
|
||||||
#define NV_PFAULT_CLIENT_HUB_NVENC3 0x0000007F /* */
|
|
||||||
#define NV_PFAULT_ACCESS_TYPE 19:16 /* */
|
|
||||||
#define NV_PFAULT_ACCESS_TYPE_READ 0x00000000 /* */
|
|
||||||
#define NV_PFAULT_ACCESS_TYPE_WRITE 0x00000001 /* */
|
|
||||||
#define NV_PFAULT_ACCESS_TYPE_ATOMIC 0x00000002 /* */
|
|
||||||
#define NV_PFAULT_ACCESS_TYPE_PREFETCH 0x00000003 /* */
|
|
||||||
#define NV_PFAULT_ACCESS_TYPE_VIRT_READ 0x00000000 /* */
|
|
||||||
#define NV_PFAULT_ACCESS_TYPE_VIRT_WRITE 0x00000001 /* */
|
|
||||||
#define NV_PFAULT_ACCESS_TYPE_VIRT_ATOMIC 0x00000002 /* */
|
|
||||||
#define NV_PFAULT_ACCESS_TYPE_VIRT_ATOMIC_STRONG 0x00000002 /* */
|
|
||||||
#define NV_PFAULT_ACCESS_TYPE_VIRT_PREFETCH 0x00000003 /* */
|
|
||||||
#define NV_PFAULT_ACCESS_TYPE_VIRT_ATOMIC_WEAK 0x00000004 /* */
|
|
||||||
#define NV_PFAULT_ACCESS_TYPE_PHYS_READ 0x00000008 /* */
|
|
||||||
#define NV_PFAULT_ACCESS_TYPE_PHYS_WRITE 0x00000009 /* */
|
|
||||||
#define NV_PFAULT_ACCESS_TYPE_PHYS_ATOMIC 0x0000000a /* */
|
|
||||||
#define NV_PFAULT_ACCESS_TYPE_PHYS_PREFETCH 0x0000000b /* */
|
|
||||||
#define NV_PFAULT_MMU_CLIENT_TYPE 20:20 /* */
|
|
||||||
#define NV_PFAULT_MMU_CLIENT_TYPE_GPC 0x00000000 /* */
|
|
||||||
#define NV_PFAULT_MMU_CLIENT_TYPE_HUB 0x00000001 /* */
|
|
||||||
#define NV_PFAULT_GPC_ID 28:24 /* */
|
|
||||||
#define NV_PFAULT_PROTECTED_MODE 29:29 /* */
|
|
||||||
#define NV_PFAULT_REPLAYABLE_FAULT_EN 30:30 /* */
|
|
||||||
#define NV_PFAULT_VALID 31:31 /* */
|
|
||||||
#endif // __gb100_dev_fault_h__
|
|
||||||
@@ -201,7 +201,7 @@ static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
|
|||||||
|
|
||||||
// Ran out of attempts - return thread even if its stack may not be
|
// Ran out of attempts - return thread even if its stack may not be
|
||||||
// allocated on the preferred node
|
// allocated on the preferred node
|
||||||
if (i == (attempts - 1))
|
if ((i == (attempts - 1)))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
// Get the NUMA node where the first page of the stack is resident. If
|
// Get the NUMA node where the first page of the stack is resident. If
|
||||||
@@ -247,11 +247,6 @@ int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferr
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname)
|
|
||||||
{
|
|
||||||
return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true (non-zero) if the item was actually scheduled, and false if the
|
// Returns true (non-zero) if the item was actually scheduled, and false if the
|
||||||
// item was already pending in a queue.
|
// item was already pending in a queue.
|
||||||
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)
|
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)
|
||||||
|
|||||||
@@ -1,8 +1,14 @@
|
|||||||
NVIDIA_UVM_SOURCES ?=
|
NVIDIA_UVM_SOURCES ?=
|
||||||
NVIDIA_UVM_SOURCES_CXX ?=
|
NVIDIA_UVM_SOURCES_CXX ?=
|
||||||
|
|
||||||
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ats_sva.c
|
||||||
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_conf_computing.c
|
||||||
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_sec2_test.c
|
||||||
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_sec2.c
|
||||||
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_sec2.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_common.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_common.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_linux.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_linux.c
|
||||||
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_debug_optimized.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/nvstatus.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/nvstatus.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/nvCpuUuid.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/nvCpuUuid.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/nv-kthread-q.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/nv-kthread-q.c
|
||||||
@@ -21,12 +27,10 @@ NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_rm_mem.c
|
|||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_channel.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_channel.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_lock.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_lock.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hal.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hal.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_processors.c
|
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_tree.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_tree.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_rb_tree.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_rb_tree.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_allocator.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_allocator.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_va_range.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_va_range.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_va_range_device_p2p.c
|
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_va_policy.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_va_policy.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_va_block.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_va_block.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_group.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_group.c
|
||||||
@@ -45,7 +49,6 @@ NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_tracker.c
|
|||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_host.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_host.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_ce.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_ce.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_sec2.c
|
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_mmu.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_mmu.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_fault_buffer.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_fault_buffer.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_access_counter_buffer.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_access_counter_buffer.c
|
||||||
@@ -59,6 +62,7 @@ NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta_host.c
|
|||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta_mmu.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta_mmu.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta_fault_buffer.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta_fault_buffer.c
|
||||||
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta_access_counter_buffer.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_turing.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_turing.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_turing_access_counter_buffer.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_turing_access_counter_buffer.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_turing_fault_buffer.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_turing_fault_buffer.c
|
||||||
@@ -67,19 +71,13 @@ NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_turing_host.c
|
|||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ampere.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ampere.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ampere_ce.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ampere_ce.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ampere_host.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ampere_host.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ampere_fault_buffer.c
|
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ampere_mmu.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ampere_mmu.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_fault_buffer.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_fault_buffer.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_ce.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_ce.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_host.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_host.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_sec2.c
|
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_mmu.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hopper_mmu.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ada.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ada.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_blackwell.c
|
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_blackwell_fault_buffer.c
|
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_blackwell_mmu.c
|
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_blackwell_host.c
|
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_policy.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_policy.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_perf_utils.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_perf_utils.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_kvmalloc.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_kvmalloc.c
|
||||||
@@ -97,8 +95,6 @@ NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_perf_prefetch.c
|
|||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ats.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ats.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ats_ibm.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ats_ibm.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ats_faults.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ats_faults.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ats_sva.c
|
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_conf_computing.c
|
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_test.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_test.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_test_rng.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_test_rng.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_tree_test.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_tree_test.c
|
||||||
@@ -126,4 +122,3 @@ NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_va_block_test.c
|
|||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_group_tree_test.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_group_tree_test.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_thread_context_test.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_thread_context_test.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_rb_tree_test.c
|
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_rb_tree_test.c
|
||||||
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_sec2_test.c
|
|
||||||
|
|||||||
@@ -13,6 +13,19 @@ NVIDIA_UVM_OBJECTS =
|
|||||||
include $(src)/nvidia-uvm/nvidia-uvm-sources.Kbuild
|
include $(src)/nvidia-uvm/nvidia-uvm-sources.Kbuild
|
||||||
NVIDIA_UVM_OBJECTS += $(patsubst %.c,%.o,$(NVIDIA_UVM_SOURCES))
|
NVIDIA_UVM_OBJECTS += $(patsubst %.c,%.o,$(NVIDIA_UVM_SOURCES))
|
||||||
|
|
||||||
|
# Some linux kernel functions rely on being built with optimizations on and
|
||||||
|
# to work around this we put wrappers for them in a separate file that's built
|
||||||
|
# with optimizations on in debug builds and skipped in other builds.
|
||||||
|
# Notably gcc 4.4 supports per function optimization attributes that would be
|
||||||
|
# easier to use, but is too recent to rely on for now.
|
||||||
|
NVIDIA_UVM_DEBUG_OPTIMIZED_SOURCE := nvidia-uvm/uvm_debug_optimized.c
|
||||||
|
NVIDIA_UVM_DEBUG_OPTIMIZED_OBJECT := $(patsubst %.c,%.o,$(NVIDIA_UVM_DEBUG_OPTIMIZED_SOURCE))
|
||||||
|
|
||||||
|
ifneq ($(UVM_BUILD_TYPE),debug)
|
||||||
|
# Only build the wrappers on debug builds
|
||||||
|
NVIDIA_UVM_OBJECTS := $(filter-out $(NVIDIA_UVM_DEBUG_OPTIMIZED_OBJECT), $(NVIDIA_UVM_OBJECTS))
|
||||||
|
endif
|
||||||
|
|
||||||
obj-m += nvidia-uvm.o
|
obj-m += nvidia-uvm.o
|
||||||
nvidia-uvm-y := $(NVIDIA_UVM_OBJECTS)
|
nvidia-uvm-y := $(NVIDIA_UVM_OBJECTS)
|
||||||
|
|
||||||
@@ -23,14 +36,15 @@ NVIDIA_UVM_KO = nvidia-uvm/nvidia-uvm.ko
|
|||||||
#
|
#
|
||||||
|
|
||||||
ifeq ($(UVM_BUILD_TYPE),debug)
|
ifeq ($(UVM_BUILD_TYPE),debug)
|
||||||
NVIDIA_UVM_CFLAGS += -DDEBUG -g
|
NVIDIA_UVM_CFLAGS += -DDEBUG -O1 -g
|
||||||
endif
|
else
|
||||||
|
ifeq ($(UVM_BUILD_TYPE),develop)
|
||||||
ifeq ($(UVM_BUILD_TYPE),develop)
|
# -DDEBUG is required, in order to allow pr_devel() print statements to
|
||||||
# -DDEBUG is required, in order to allow pr_devel() print statements to
|
# work:
|
||||||
# work:
|
NVIDIA_UVM_CFLAGS += -DDEBUG
|
||||||
NVIDIA_UVM_CFLAGS += -DDEBUG
|
NVIDIA_UVM_CFLAGS += -DNVIDIA_UVM_DEVELOP
|
||||||
NVIDIA_UVM_CFLAGS += -DNVIDIA_UVM_DEVELOP
|
endif
|
||||||
|
NVIDIA_UVM_CFLAGS += -O2
|
||||||
endif
|
endif
|
||||||
|
|
||||||
NVIDIA_UVM_CFLAGS += -DNVIDIA_UVM_ENABLED
|
NVIDIA_UVM_CFLAGS += -DNVIDIA_UVM_ENABLED
|
||||||
@@ -42,42 +56,62 @@ NVIDIA_UVM_CFLAGS += -I$(src)/nvidia-uvm
|
|||||||
|
|
||||||
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_UVM_OBJECTS), $(NVIDIA_UVM_CFLAGS))
|
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_UVM_OBJECTS), $(NVIDIA_UVM_CFLAGS))
|
||||||
|
|
||||||
|
ifeq ($(UVM_BUILD_TYPE),debug)
|
||||||
|
# Force optimizations on for the wrappers
|
||||||
|
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_UVM_DEBUG_OPTIMIZED_OBJECT), $(NVIDIA_UVM_CFLAGS) -O2)
|
||||||
|
endif
|
||||||
|
|
||||||
#
|
#
|
||||||
# Register the conftests needed by nvidia-uvm.ko
|
# Register the conftests needed by nvidia-uvm.ko
|
||||||
#
|
#
|
||||||
|
|
||||||
NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_UVM_OBJECTS)
|
NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_UVM_OBJECTS)
|
||||||
|
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += wait_on_bit_lock_argument_count
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pde_data
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += radix_tree_empty
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += radix_tree_empty
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += radix_tree_replace_slot
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += radix_tree_replace_slot
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pnv_npu2_init_context
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pnv_npu2_init_context
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += cpumask_of_node
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += cpumask_of_node
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_bus_address
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_uc
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_uc
|
||||||
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioasid_get
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioasid_get
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mm_pasid_drop
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mm_pasid_drop
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mmget_not_zero
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mmget_not_zero
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mmgrab
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mmgrab
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += iommu_sva_bind_device_has_drvdata_arg
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += iommu_sva_bind_device_has_drvdata_arg
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vm_fault_to_errno
|
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vm_fault_to_errno
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += find_next_bit_wrap
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += iommu_is_dma_domain
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += for_each_sgtable_dma_page
|
|
||||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += folio_test_swapcache
|
|
||||||
|
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += backing_dev_info
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_context_t
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += get_user_pages_remote
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += get_user_pages
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += pin_user_pages_remote
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += pin_user_pages
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_has_address
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg
|
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += kmem_cache_has_kobj_remove_work
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += sysfs_slab_unlink
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_t
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mmu_notifier_ops_invalidate_range
|
NV_CONFTEST_TYPE_COMPILE_TESTS += mmu_notifier_ops_invalidate_range
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mmu_notifier_ops_arch_invalidate_secondary_tlbs
|
NV_CONFTEST_TYPE_COMPILE_TESTS += mmu_notifier_ops_arch_invalidate_secondary_tlbs
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += proc_ops
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += timespec64
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += migrate_vma_added_flags
|
NV_CONFTEST_TYPE_COMPILE_TESTS += migrate_vma_added_flags
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += migrate_device_range
|
NV_CONFTEST_TYPE_COMPILE_TESTS += migrate_device_range
|
||||||
|
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += handle_mm_fault_has_mm_arg
|
NV_CONFTEST_TYPE_COMPILE_TESTS += handle_mm_fault_has_mm_arg
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += handle_mm_fault_has_pt_regs_arg
|
NV_CONFTEST_TYPE_COMPILE_TESTS += handle_mm_fault_has_pt_regs_arg
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_unified_nodes
|
NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_unified_nodes
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_home_node
|
NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_home_node
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mpol_preferred_many_present
|
NV_CONFTEST_TYPE_COMPILE_TESTS += mpol_preferred_many_present
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mmu_interval_notifier
|
NV_CONFTEST_TYPE_COMPILE_TESTS += mmu_interval_notifier
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += fault_flag_remote_present
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += sg_dma_page_iter
|
|
||||||
NV_CONFTEST_TYPE_COMPILE_TESTS += struct_page_has_zone_device_data
|
|
||||||
|
|
||||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_int_active_memcg
|
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_int_active_memcg
|
||||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_migrate_vma_setup
|
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_migrate_vma_setup
|
||||||
|
|||||||
@@ -24,17 +24,16 @@
|
|||||||
#include "nvstatus.h"
|
#include "nvstatus.h"
|
||||||
|
|
||||||
#if !defined(NV_PRINTF_STRING_SECTION)
|
#if !defined(NV_PRINTF_STRING_SECTION)
|
||||||
#if defined(NVRM) && NVOS_IS_LIBOS
|
#if defined(NVRM) && NVCPU_IS_RISCV64
|
||||||
#include "libos_log.h"
|
#define NV_PRINTF_STRING_SECTION __attribute__ ((section (".logging")))
|
||||||
#define NV_PRINTF_STRING_SECTION LIBOS_SECTION_LOGGING
|
#else // defined(NVRM) && NVCPU_IS_RISCV64
|
||||||
#else // defined(NVRM) && NVOS_IS_LIBOS
|
|
||||||
#define NV_PRINTF_STRING_SECTION
|
#define NV_PRINTF_STRING_SECTION
|
||||||
#endif // defined(NVRM) && NVOS_IS_LIBOS
|
#endif // defined(NVRM) && NVCPU_IS_RISCV64
|
||||||
#endif // !defined(NV_PRINTF_STRING_SECTION)
|
#endif // !defined(NV_PRINTF_STRING_SECTION)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Include nvstatuscodes.h twice. Once for creating constant strings in the
|
* Include nvstatuscodes.h twice. Once for creating constant strings in the
|
||||||
* the NV_PRINTF_STRING_SECTION section of the executable, and once to build
|
* the NV_PRINTF_STRING_SECTION section of the ececutable, and once to build
|
||||||
* the g_StatusCodeList table.
|
* the g_StatusCodeList table.
|
||||||
*/
|
*/
|
||||||
#undef NV_STATUS_CODE
|
#undef NV_STATUS_CODE
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
Copyright (c) 2015-2024 NVIDIA Corporation
|
Copyright (c) 2015-2022 NVIDIA Corporation
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to
|
of this software and associated documentation files (the "Software"), to
|
||||||
@@ -127,9 +127,9 @@ static NV_STATUS uvm_api_mm_initialize(UVM_MM_INITIALIZE_PARAMS *params, struct
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
old_fd_type = atomic_long_cmpxchg((atomic_long_t *)&filp->private_data,
|
old_fd_type = nv_atomic_long_cmpxchg((atomic_long_t *)&filp->private_data,
|
||||||
UVM_FD_UNINITIALIZED,
|
UVM_FD_UNINITIALIZED,
|
||||||
UVM_FD_INITIALIZING);
|
UVM_FD_INITIALIZING);
|
||||||
old_fd_type &= UVM_FD_TYPE_MASK;
|
old_fd_type &= UVM_FD_TYPE_MASK;
|
||||||
if (old_fd_type != UVM_FD_UNINITIALIZED) {
|
if (old_fd_type != UVM_FD_UNINITIALIZED) {
|
||||||
status = NV_ERR_IN_USE;
|
status = NV_ERR_IN_USE;
|
||||||
@@ -222,6 +222,10 @@ static int uvm_open(struct inode *inode, struct file *filp)
|
|||||||
// assigning f_mapping.
|
// assigning f_mapping.
|
||||||
mapping->a_ops = inode->i_mapping->a_ops;
|
mapping->a_ops = inode->i_mapping->a_ops;
|
||||||
|
|
||||||
|
#if defined(NV_ADDRESS_SPACE_HAS_BACKING_DEV_INFO)
|
||||||
|
mapping->backing_dev_info = inode->i_mapping->backing_dev_info;
|
||||||
|
#endif
|
||||||
|
|
||||||
filp->private_data = NULL;
|
filp->private_data = NULL;
|
||||||
filp->f_mapping = mapping;
|
filp->f_mapping = mapping;
|
||||||
|
|
||||||
@@ -240,7 +244,7 @@ static void uvm_release_deferred(void *data)
|
|||||||
// Since this function is only scheduled to run when uvm_release() fails
|
// Since this function is only scheduled to run when uvm_release() fails
|
||||||
// to trylock-acquire the pm.lock, the following acquisition attempt
|
// to trylock-acquire the pm.lock, the following acquisition attempt
|
||||||
// is expected to block this thread, and cause it to remain blocked until
|
// is expected to block this thread, and cause it to remain blocked until
|
||||||
// uvm_resume() releases the lock. As a result, the deferred release
|
// uvm_resume() releases the lock. As a result, the deferred release
|
||||||
// kthread queue may stall for long periods of time.
|
// kthread queue may stall for long periods of time.
|
||||||
uvm_down_read(&g_uvm_global.pm.lock);
|
uvm_down_read(&g_uvm_global.pm.lock);
|
||||||
|
|
||||||
@@ -292,14 +296,14 @@ static int uvm_release(struct inode *inode, struct file *filp)
|
|||||||
|
|
||||||
// Because the kernel discards the status code returned from this release
|
// Because the kernel discards the status code returned from this release
|
||||||
// callback, early exit in case of a pm.lock acquisition failure is not
|
// callback, early exit in case of a pm.lock acquisition failure is not
|
||||||
// an option. Instead, the teardown work normally performed synchronously
|
// an option. Instead, the teardown work normally performed synchronously
|
||||||
// needs to be scheduled to run after uvm_resume() releases the lock.
|
// needs to be scheduled to run after uvm_resume() releases the lock.
|
||||||
if (uvm_down_read_trylock(&g_uvm_global.pm.lock)) {
|
if (uvm_down_read_trylock(&g_uvm_global.pm.lock)) {
|
||||||
uvm_va_space_destroy(va_space);
|
uvm_va_space_destroy(va_space);
|
||||||
uvm_up_read(&g_uvm_global.pm.lock);
|
uvm_up_read(&g_uvm_global.pm.lock);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
// Remove references to this inode from the address_space. This isn't
|
// Remove references to this inode from the address_space. This isn't
|
||||||
// strictly necessary, as any CPU mappings of this file have already
|
// strictly necessary, as any CPU mappings of this file have already
|
||||||
// been destroyed, and va_space->mapping won't be used again. Still,
|
// been destroyed, and va_space->mapping won't be used again. Still,
|
||||||
// the va_space survives the inode if its destruction is deferred, in
|
// the va_space survives the inode if its destruction is deferred, in
|
||||||
@@ -321,21 +325,21 @@ static int uvm_release_entry(struct inode *inode, struct file *filp)
|
|||||||
|
|
||||||
static void uvm_destroy_vma_managed(struct vm_area_struct *vma, bool make_zombie)
|
static void uvm_destroy_vma_managed(struct vm_area_struct *vma, bool make_zombie)
|
||||||
{
|
{
|
||||||
uvm_va_range_managed_t *managed_range, *managed_range_next;
|
uvm_va_range_t *va_range, *va_range_next;
|
||||||
NvU64 size = 0;
|
NvU64 size = 0;
|
||||||
|
|
||||||
uvm_assert_rwsem_locked_write(&uvm_va_space_get(vma->vm_file)->lock);
|
uvm_assert_rwsem_locked_write(&uvm_va_space_get(vma->vm_file)->lock);
|
||||||
uvm_for_each_va_range_managed_in_vma_safe(managed_range, managed_range_next, vma) {
|
uvm_for_each_va_range_in_vma_safe(va_range, va_range_next, vma) {
|
||||||
// On exit_mmap (process teardown), current->mm is cleared so
|
// On exit_mmap (process teardown), current->mm is cleared so
|
||||||
// uvm_va_range_vma_current would return NULL.
|
// uvm_va_range_vma_current would return NULL.
|
||||||
UVM_ASSERT(uvm_va_range_vma(managed_range) == vma);
|
UVM_ASSERT(uvm_va_range_vma(va_range) == vma);
|
||||||
UVM_ASSERT(managed_range->va_range.node.start >= vma->vm_start);
|
UVM_ASSERT(va_range->node.start >= vma->vm_start);
|
||||||
UVM_ASSERT(managed_range->va_range.node.end < vma->vm_end);
|
UVM_ASSERT(va_range->node.end < vma->vm_end);
|
||||||
size += uvm_va_range_size(&managed_range->va_range);
|
size += uvm_va_range_size(va_range);
|
||||||
if (make_zombie)
|
if (make_zombie)
|
||||||
uvm_va_range_zombify(managed_range);
|
uvm_va_range_zombify(va_range);
|
||||||
else
|
else
|
||||||
uvm_va_range_destroy(&managed_range->va_range, NULL);
|
uvm_va_range_destroy(va_range, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vma->vm_private_data) {
|
if (vma->vm_private_data) {
|
||||||
@@ -347,17 +351,18 @@ static void uvm_destroy_vma_managed(struct vm_area_struct *vma, bool make_zombie
|
|||||||
|
|
||||||
static void uvm_destroy_vma_semaphore_pool(struct vm_area_struct *vma)
|
static void uvm_destroy_vma_semaphore_pool(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
uvm_va_range_semaphore_pool_t *semaphore_pool_range;
|
|
||||||
uvm_va_space_t *va_space;
|
uvm_va_space_t *va_space;
|
||||||
|
uvm_va_range_t *va_range;
|
||||||
|
|
||||||
va_space = uvm_va_space_get(vma->vm_file);
|
va_space = uvm_va_space_get(vma->vm_file);
|
||||||
uvm_assert_rwsem_locked(&va_space->lock);
|
uvm_assert_rwsem_locked(&va_space->lock);
|
||||||
semaphore_pool_range = uvm_va_range_semaphore_pool_find(va_space, vma->vm_start);
|
va_range = uvm_va_range_find(va_space, vma->vm_start);
|
||||||
UVM_ASSERT(semaphore_pool_range &&
|
UVM_ASSERT(va_range &&
|
||||||
semaphore_pool_range->va_range.node.start == vma->vm_start &&
|
va_range->node.start == vma->vm_start &&
|
||||||
semaphore_pool_range->va_range.node.end + 1 == vma->vm_end);
|
va_range->node.end + 1 == vma->vm_end &&
|
||||||
|
va_range->type == UVM_VA_RANGE_TYPE_SEMAPHORE_POOL);
|
||||||
|
|
||||||
uvm_mem_unmap_cpu_user(semaphore_pool_range->mem);
|
uvm_mem_unmap_cpu_user(va_range->semaphore_pool.mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a fault handler is not set, paths like handle_pte_fault in older kernels
|
// If a fault handler is not set, paths like handle_pte_fault in older kernels
|
||||||
@@ -473,7 +478,7 @@ static void uvm_vm_open_failure(struct vm_area_struct *original,
|
|||||||
static void uvm_vm_open_managed(struct vm_area_struct *vma)
|
static void uvm_vm_open_managed(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
uvm_va_space_t *va_space = uvm_va_space_get(vma->vm_file);
|
uvm_va_space_t *va_space = uvm_va_space_get(vma->vm_file);
|
||||||
uvm_va_range_managed_t *managed_range;
|
uvm_va_range_t *va_range;
|
||||||
struct vm_area_struct *original;
|
struct vm_area_struct *original;
|
||||||
NV_STATUS status;
|
NV_STATUS status;
|
||||||
NvU64 new_end;
|
NvU64 new_end;
|
||||||
@@ -529,13 +534,13 @@ static void uvm_vm_open_managed(struct vm_area_struct *vma)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
// There can be multiple ranges under the vma already. Check if one spans
|
// There can be multiple va_ranges under the vma already. Check if one spans
|
||||||
// the new split boundary. If so, split it.
|
// the new split boundary. If so, split it.
|
||||||
managed_range = uvm_va_range_managed_find(va_space, new_end);
|
va_range = uvm_va_range_find(va_space, new_end);
|
||||||
UVM_ASSERT(managed_range);
|
UVM_ASSERT(va_range);
|
||||||
UVM_ASSERT(uvm_va_range_vma_current(managed_range) == original);
|
UVM_ASSERT(uvm_va_range_vma_current(va_range) == original);
|
||||||
if (managed_range->va_range.node.end != new_end) {
|
if (va_range->node.end != new_end) {
|
||||||
status = uvm_va_range_split(managed_range, new_end, NULL);
|
status = uvm_va_range_split(va_range, new_end, NULL);
|
||||||
if (status != NV_OK) {
|
if (status != NV_OK) {
|
||||||
UVM_DBG_PRINT("Failed to split VA range, destroying both: %s. "
|
UVM_DBG_PRINT("Failed to split VA range, destroying both: %s. "
|
||||||
"original vma [0x%lx, 0x%lx) new vma [0x%lx, 0x%lx)\n",
|
"original vma [0x%lx, 0x%lx) new vma [0x%lx, 0x%lx)\n",
|
||||||
@@ -547,10 +552,10 @@ static void uvm_vm_open_managed(struct vm_area_struct *vma)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Point managed_ranges to the new vma
|
// Point va_ranges to the new vma
|
||||||
uvm_for_each_va_range_managed_in_vma(managed_range, vma) {
|
uvm_for_each_va_range_in_vma(va_range, vma) {
|
||||||
UVM_ASSERT(uvm_va_range_vma_current(managed_range) == original);
|
UVM_ASSERT(uvm_va_range_vma_current(va_range) == original);
|
||||||
managed_range->vma_wrapper = vma->vm_private_data;
|
va_range->managed.vma_wrapper = vma->vm_private_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
@@ -652,12 +657,12 @@ static struct vm_operations_struct uvm_vm_ops_managed =
|
|||||||
};
|
};
|
||||||
|
|
||||||
// vm operations on semaphore pool allocations only control CPU mappings. Unmapping GPUs,
|
// vm operations on semaphore pool allocations only control CPU mappings. Unmapping GPUs,
|
||||||
// freeing the allocation, and destroying the range are handled by UVM_FREE.
|
// freeing the allocation, and destroying the va_range are handled by UVM_FREE.
|
||||||
static void uvm_vm_open_semaphore_pool(struct vm_area_struct *vma)
|
static void uvm_vm_open_semaphore_pool(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *origin_vma = (struct vm_area_struct *)vma->vm_private_data;
|
struct vm_area_struct *origin_vma = (struct vm_area_struct *)vma->vm_private_data;
|
||||||
uvm_va_space_t *va_space = uvm_va_space_get(origin_vma->vm_file);
|
uvm_va_space_t *va_space = uvm_va_space_get(origin_vma->vm_file);
|
||||||
uvm_va_range_semaphore_pool_t *semaphore_pool_range;
|
uvm_va_range_t *va_range;
|
||||||
bool is_fork = (vma->vm_mm != origin_vma->vm_mm);
|
bool is_fork = (vma->vm_mm != origin_vma->vm_mm);
|
||||||
NV_STATUS status;
|
NV_STATUS status;
|
||||||
|
|
||||||
@@ -665,24 +670,18 @@ static void uvm_vm_open_semaphore_pool(struct vm_area_struct *vma)
|
|||||||
|
|
||||||
uvm_va_space_down_write(va_space);
|
uvm_va_space_down_write(va_space);
|
||||||
|
|
||||||
semaphore_pool_range = uvm_va_range_semaphore_pool_find(va_space, origin_vma->vm_start);
|
va_range = uvm_va_range_find(va_space, origin_vma->vm_start);
|
||||||
UVM_ASSERT(semaphore_pool_range);
|
UVM_ASSERT(va_range);
|
||||||
UVM_ASSERT_MSG(semaphore_pool_range &&
|
UVM_ASSERT_MSG(va_range->type == UVM_VA_RANGE_TYPE_SEMAPHORE_POOL &&
|
||||||
semaphore_pool_range->va_range.node.start == origin_vma->vm_start &&
|
va_range->node.start == origin_vma->vm_start &&
|
||||||
semaphore_pool_range->va_range.node.end + 1 == origin_vma->vm_end,
|
va_range->node.end + 1 == origin_vma->vm_end,
|
||||||
"origin vma [0x%llx, 0x%llx); va_range [0x%llx, 0x%llx) type %d\n",
|
"origin vma [0x%llx, 0x%llx); va_range [0x%llx, 0x%llx) type %d\n",
|
||||||
(NvU64)origin_vma->vm_start,
|
(NvU64)origin_vma->vm_start, (NvU64)origin_vma->vm_end, va_range->node.start,
|
||||||
(NvU64)origin_vma->vm_end,
|
va_range->node.end + 1, va_range->type);
|
||||||
semaphore_pool_range->va_range.node.start,
|
|
||||||
semaphore_pool_range->va_range.node.end + 1,
|
|
||||||
semaphore_pool_range->va_range.type);
|
|
||||||
|
|
||||||
// Semaphore pool vmas do not have vma wrappers, but some functions will
|
// Semaphore pool vmas do not have vma wrappers, but some functions will
|
||||||
// assume vm_private_data is a wrapper.
|
// assume vm_private_data is a wrapper.
|
||||||
vma->vm_private_data = NULL;
|
vma->vm_private_data = NULL;
|
||||||
#if defined(VM_WIPEONFORK)
|
|
||||||
nv_vm_flags_set(vma, VM_WIPEONFORK);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (is_fork) {
|
if (is_fork) {
|
||||||
// If we forked, leave the parent vma alone.
|
// If we forked, leave the parent vma alone.
|
||||||
@@ -690,9 +689,9 @@ static void uvm_vm_open_semaphore_pool(struct vm_area_struct *vma)
|
|||||||
|
|
||||||
// uvm_disable_vma unmaps in the parent as well; clear the uvm_mem CPU
|
// uvm_disable_vma unmaps in the parent as well; clear the uvm_mem CPU
|
||||||
// user mapping metadata and then remap.
|
// user mapping metadata and then remap.
|
||||||
uvm_mem_unmap_cpu_user(semaphore_pool_range->mem);
|
uvm_mem_unmap_cpu_user(va_range->semaphore_pool.mem);
|
||||||
|
|
||||||
status = uvm_mem_map_cpu_user(semaphore_pool_range->mem, semaphore_pool_range->va_range.va_space, origin_vma);
|
status = uvm_mem_map_cpu_user(va_range->semaphore_pool.mem, va_range->va_space, origin_vma);
|
||||||
if (status != NV_OK) {
|
if (status != NV_OK) {
|
||||||
UVM_DBG_PRINT("Failed to remap semaphore pool to CPU for parent after fork; status = %d (%s)",
|
UVM_DBG_PRINT("Failed to remap semaphore pool to CPU for parent after fork; status = %d (%s)",
|
||||||
status, nvstatusToString(status));
|
status, nvstatusToString(status));
|
||||||
@@ -703,7 +702,7 @@ static void uvm_vm_open_semaphore_pool(struct vm_area_struct *vma)
|
|||||||
origin_vma->vm_private_data = NULL;
|
origin_vma->vm_private_data = NULL;
|
||||||
origin_vma->vm_ops = &uvm_vm_ops_disabled;
|
origin_vma->vm_ops = &uvm_vm_ops_disabled;
|
||||||
vma->vm_ops = &uvm_vm_ops_disabled;
|
vma->vm_ops = &uvm_vm_ops_disabled;
|
||||||
uvm_mem_unmap_cpu_user(semaphore_pool_range->mem);
|
uvm_mem_unmap_cpu_user(va_range->semaphore_pool.mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
uvm_va_space_up_write(va_space);
|
uvm_va_space_up_write(va_space);
|
||||||
@@ -752,84 +751,10 @@ static struct vm_operations_struct uvm_vm_ops_semaphore_pool =
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
static void uvm_vm_open_device_p2p(struct vm_area_struct *vma)
|
|
||||||
{
|
|
||||||
struct vm_area_struct *origin_vma = (struct vm_area_struct *)vma->vm_private_data;
|
|
||||||
uvm_va_space_t *va_space = uvm_va_space_get(origin_vma->vm_file);
|
|
||||||
uvm_va_range_t *va_range;
|
|
||||||
bool is_fork = (vma->vm_mm != origin_vma->vm_mm);
|
|
||||||
|
|
||||||
uvm_record_lock_mmap_lock_write(current->mm);
|
|
||||||
|
|
||||||
uvm_va_space_down_write(va_space);
|
|
||||||
|
|
||||||
va_range = uvm_va_range_find(va_space, origin_vma->vm_start);
|
|
||||||
UVM_ASSERT(va_range);
|
|
||||||
UVM_ASSERT_MSG(va_range->type == UVM_VA_RANGE_TYPE_DEVICE_P2P &&
|
|
||||||
va_range->node.start == origin_vma->vm_start &&
|
|
||||||
va_range->node.end + 1 == origin_vma->vm_end,
|
|
||||||
"origin vma [0x%llx, 0x%llx); va_range [0x%llx, 0x%llx) type %d\n",
|
|
||||||
(NvU64)origin_vma->vm_start, (NvU64)origin_vma->vm_end, va_range->node.start,
|
|
||||||
va_range->node.end + 1, va_range->type);
|
|
||||||
|
|
||||||
// Device P2P vmas do not have vma wrappers, but some functions will
|
|
||||||
// assume vm_private_data is a wrapper.
|
|
||||||
vma->vm_private_data = NULL;
|
|
||||||
#if defined(VM_WIPEONFORK)
|
|
||||||
nv_vm_flags_set(vma, VM_WIPEONFORK);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (is_fork) {
|
|
||||||
// If we forked, leave the parent vma alone.
|
|
||||||
uvm_disable_vma(vma);
|
|
||||||
|
|
||||||
// uvm_disable_vma unmaps in the parent as well so remap the parent
|
|
||||||
uvm_va_range_device_p2p_map_cpu(va_range->va_space, origin_vma, uvm_va_range_to_device_p2p(va_range));
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
// mremap will free the backing pages via unmap so we can't support it.
|
|
||||||
origin_vma->vm_private_data = NULL;
|
|
||||||
origin_vma->vm_ops = &uvm_vm_ops_disabled;
|
|
||||||
vma->vm_ops = &uvm_vm_ops_disabled;
|
|
||||||
unmap_mapping_range(va_space->mapping, va_range->node.start, va_range->node.end - va_range->node.start + 1, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
uvm_va_space_up_write(va_space);
|
|
||||||
|
|
||||||
uvm_record_unlock_mmap_lock_write(current->mm);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void uvm_vm_open_device_p2p_entry(struct vm_area_struct *vma)
|
|
||||||
{
|
|
||||||
UVM_ENTRY_VOID(uvm_vm_open_device_p2p(vma));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Device P2P pages are only mapped on the CPU. Pages are allocated externally
|
|
||||||
// to UVM but destroying the range must unpin the RM object.
|
|
||||||
static void uvm_vm_close_device_p2p(struct vm_area_struct *vma)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static void uvm_vm_close_device_p2p_entry(struct vm_area_struct *vma)
|
|
||||||
{
|
|
||||||
UVM_ENTRY_VOID(uvm_vm_close_device_p2p(vma));
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct vm_operations_struct uvm_vm_ops_device_p2p =
|
|
||||||
{
|
|
||||||
.open = uvm_vm_open_device_p2p_entry,
|
|
||||||
.close = uvm_vm_close_device_p2p_entry,
|
|
||||||
|
|
||||||
#if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG)
|
|
||||||
.fault = uvm_vm_fault_sigbus_wrapper_entry,
|
|
||||||
#else
|
|
||||||
.fault = uvm_vm_fault_sigbus_entry,
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
static int uvm_mmap(struct file *filp, struct vm_area_struct *vma)
|
static int uvm_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
uvm_va_space_t *va_space;
|
uvm_va_space_t *va_space;
|
||||||
|
uvm_va_range_t *va_range;
|
||||||
NV_STATUS status = uvm_global_get_status();
|
NV_STATUS status = uvm_global_get_status();
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
bool vma_wrapper_allocated = false;
|
bool vma_wrapper_allocated = false;
|
||||||
@@ -867,8 +792,8 @@ static int uvm_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If the PM lock cannot be acquired, disable the VMA and report success
|
// If the PM lock cannot be acquired, disable the VMA and report success
|
||||||
// to the caller. The caller is expected to determine whether the
|
// to the caller. The caller is expected to determine whether the
|
||||||
// map operation succeeded via an ioctl() call. This is necessary to
|
// map operation succeeded via an ioctl() call. This is necessary to
|
||||||
// safely handle MAP_FIXED, which needs to complete atomically to prevent
|
// safely handle MAP_FIXED, which needs to complete atomically to prevent
|
||||||
// the loss of the virtual address range.
|
// the loss of the virtual address range.
|
||||||
if (!uvm_down_read_trylock(&g_uvm_global.pm.lock)) {
|
if (!uvm_down_read_trylock(&g_uvm_global.pm.lock)) {
|
||||||
@@ -920,28 +845,18 @@ static int uvm_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||||||
status = uvm_va_range_create_mmap(va_space, current->mm, vma->vm_private_data, NULL);
|
status = uvm_va_range_create_mmap(va_space, current->mm, vma->vm_private_data, NULL);
|
||||||
|
|
||||||
if (status == NV_ERR_UVM_ADDRESS_IN_USE) {
|
if (status == NV_ERR_UVM_ADDRESS_IN_USE) {
|
||||||
uvm_va_range_semaphore_pool_t *semaphore_pool_range;
|
|
||||||
uvm_va_range_device_p2p_t *device_p2p_range;
|
|
||||||
// If the mmap is for a semaphore pool, the VA range will have been
|
// If the mmap is for a semaphore pool, the VA range will have been
|
||||||
// allocated by a previous ioctl, and the mmap just creates the CPU
|
// allocated by a previous ioctl, and the mmap just creates the CPU
|
||||||
// mapping.
|
// mapping.
|
||||||
semaphore_pool_range = uvm_va_range_semaphore_pool_find(va_space, vma->vm_start);
|
va_range = uvm_va_range_find(va_space, vma->vm_start);
|
||||||
device_p2p_range = uvm_va_range_device_p2p_find(va_space, vma->vm_start);
|
if (va_range && va_range->node.start == vma->vm_start &&
|
||||||
if (semaphore_pool_range && semaphore_pool_range->va_range.node.start == vma->vm_start &&
|
va_range->node.end + 1 == vma->vm_end &&
|
||||||
semaphore_pool_range->va_range.node.end + 1 == vma->vm_end) {
|
va_range->type == UVM_VA_RANGE_TYPE_SEMAPHORE_POOL) {
|
||||||
uvm_vma_wrapper_destroy(vma->vm_private_data);
|
uvm_vma_wrapper_destroy(vma->vm_private_data);
|
||||||
vma_wrapper_allocated = false;
|
vma_wrapper_allocated = false;
|
||||||
vma->vm_private_data = vma;
|
vma->vm_private_data = vma;
|
||||||
vma->vm_ops = &uvm_vm_ops_semaphore_pool;
|
vma->vm_ops = &uvm_vm_ops_semaphore_pool;
|
||||||
status = uvm_mem_map_cpu_user(semaphore_pool_range->mem, semaphore_pool_range->va_range.va_space, vma);
|
status = uvm_mem_map_cpu_user(va_range->semaphore_pool.mem, va_range->va_space, vma);
|
||||||
}
|
|
||||||
else if (device_p2p_range && device_p2p_range->va_range.node.start == vma->vm_start &&
|
|
||||||
device_p2p_range->va_range.node.end + 1 == vma->vm_end) {
|
|
||||||
uvm_vma_wrapper_destroy(vma->vm_private_data);
|
|
||||||
vma_wrapper_allocated = false;
|
|
||||||
vma->vm_private_data = vma;
|
|
||||||
vma->vm_ops = &uvm_vm_ops_device_p2p;
|
|
||||||
status = uvm_va_range_device_p2p_map_cpu(va_space, vma, device_p2p_range);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -999,9 +914,8 @@ static NV_STATUS uvm_api_initialize(UVM_INITIALIZE_PARAMS *params, struct file *
|
|||||||
// attempt to be made. This is safe because other threads will have only had
|
// attempt to be made. This is safe because other threads will have only had
|
||||||
// a chance to observe UVM_FD_INITIALIZING and not UVM_FD_VA_SPACE in this
|
// a chance to observe UVM_FD_INITIALIZING and not UVM_FD_VA_SPACE in this
|
||||||
// case.
|
// case.
|
||||||
old_fd_type = atomic_long_cmpxchg((atomic_long_t *)&filp->private_data,
|
old_fd_type = nv_atomic_long_cmpxchg((atomic_long_t *)&filp->private_data,
|
||||||
UVM_FD_UNINITIALIZED,
|
UVM_FD_UNINITIALIZED, UVM_FD_INITIALIZING);
|
||||||
UVM_FD_INITIALIZING);
|
|
||||||
old_fd_type &= UVM_FD_TYPE_MASK;
|
old_fd_type &= UVM_FD_TYPE_MASK;
|
||||||
if (old_fd_type == UVM_FD_UNINITIALIZED) {
|
if (old_fd_type == UVM_FD_UNINITIALIZED) {
|
||||||
status = uvm_va_space_create(filp->f_mapping, &va_space, params->flags);
|
status = uvm_va_space_create(filp->f_mapping, &va_space, params->flags);
|
||||||
@@ -1087,9 +1001,6 @@ static long uvm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||||||
UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_CLEAN_UP_ZOMBIE_RESOURCES, uvm_api_clean_up_zombie_resources);
|
UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_CLEAN_UP_ZOMBIE_RESOURCES, uvm_api_clean_up_zombie_resources);
|
||||||
UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_POPULATE_PAGEABLE, uvm_api_populate_pageable);
|
UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_POPULATE_PAGEABLE, uvm_api_populate_pageable);
|
||||||
UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_VALIDATE_VA_RANGE, uvm_api_validate_va_range);
|
UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_VALIDATE_VA_RANGE, uvm_api_validate_va_range);
|
||||||
UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TOOLS_GET_PROCESSOR_UUID_TABLE_V2,uvm_api_tools_get_processor_uuid_table_v2);
|
|
||||||
UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_ALLOC_DEVICE_P2P, uvm_api_alloc_device_p2p);
|
|
||||||
UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_CLEAR_ALL_ACCESS_COUNTERS, uvm_api_clear_all_access_counters);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try the test ioctls if none of the above matched
|
// Try the test ioctls if none of the above matched
|
||||||
@@ -1142,7 +1053,7 @@ NV_STATUS uvm_test_register_unload_state_buffer(UVM_TEST_REGISTER_UNLOAD_STATE_B
|
|||||||
// are not used because unload_state_buf may be a managed memory pointer and
|
// are not used because unload_state_buf may be a managed memory pointer and
|
||||||
// therefore a locking assertion from the CPU fault handler could be fired.
|
// therefore a locking assertion from the CPU fault handler could be fired.
|
||||||
nv_mmap_read_lock(current->mm);
|
nv_mmap_read_lock(current->mm);
|
||||||
ret = NV_PIN_USER_PAGES(params->unload_state_buf, 1, FOLL_WRITE, &page);
|
ret = NV_PIN_USER_PAGES(params->unload_state_buf, 1, FOLL_WRITE, &page, NULL);
|
||||||
nv_mmap_read_unlock(current->mm);
|
nv_mmap_read_unlock(current->mm);
|
||||||
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
@@ -1233,8 +1144,19 @@ static int uvm_init(void)
|
|||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pr_info("Loaded the UVM driver, major device number %d.\n", MAJOR(g_uvm_base_dev));
|
||||||
|
|
||||||
if (uvm_enable_builtin_tests)
|
if (uvm_enable_builtin_tests)
|
||||||
UVM_INFO_PRINT("Built-in UVM tests are enabled. This is a security risk.\n");
|
pr_info("Built-in UVM tests are enabled. This is a security risk.\n");
|
||||||
|
|
||||||
|
// After Open RM is released, both the enclosing "#if" and this comment
|
||||||
|
// block should be removed, because the uvm_hmm_is_enabled_system_wide()
|
||||||
|
// check is both necessary and sufficient for reporting functionality.
|
||||||
|
// Until that time, however, we need to avoid advertisting UVM's ability to
|
||||||
|
// enable HMM functionality.
|
||||||
|
|
||||||
|
if (uvm_hmm_is_enabled_system_wide())
|
||||||
|
UVM_INFO_PRINT("HMM (Heterogeneous Memory Management) is enabled in the UVM driver.\n");
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@@ -1263,6 +1185,8 @@ static void uvm_exit(void)
|
|||||||
uvm_global_exit();
|
uvm_global_exit();
|
||||||
|
|
||||||
uvm_test_unload_state_exit();
|
uvm_test_unload_state_exit();
|
||||||
|
|
||||||
|
pr_info("Unloaded the UVM driver.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit uvm_exit_entry(void)
|
static void __exit uvm_exit_entry(void)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
Copyright (c) 2013-2024 NVIDIA Corporation
|
Copyright (c) 2013-2022 NVIDIA Corporation
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to
|
of this software and associated documentation files (the "Software"), to
|
||||||
@@ -45,20 +45,16 @@
|
|||||||
// #endif
|
// #endif
|
||||||
// 3) Do the same thing for the function definition, and for any structs that
|
// 3) Do the same thing for the function definition, and for any structs that
|
||||||
// are taken as arguments to these functions.
|
// are taken as arguments to these functions.
|
||||||
// 4) Let this change propagate over to cuda_a and bugfix_main, so that the CUDA
|
// 4) Let this change propagate over to cuda_a, so that the CUDA driver can
|
||||||
// and nvidia-cfg libraries can start using the new API by bumping up the API
|
// start using the new API by bumping up the API version number its using.
|
||||||
// version number it's using.
|
// This can be found in gpgpu/cuda/cuda.nvmk.
|
||||||
// Places where UVM_API_REVISION is defined are:
|
// 5) Once the cuda_a changes have made it back into chips_a, remove the old API
|
||||||
// drivers/gpgpu/cuda/cuda.nvmk (cuda_a)
|
// declaration, definition, and any old structs that were in use.
|
||||||
// drivers/setup/linux/nvidia-cfg/makefile.nvmk (bugfix_main)
|
|
||||||
// 5) Once the bugfix_main and cuda_a changes have made it back into chips_a,
|
|
||||||
// remove the old API declaration, definition, and any old structs that were
|
|
||||||
// in use.
|
|
||||||
|
|
||||||
#ifndef _UVM_H_
|
#ifndef _UVM_H_
|
||||||
#define _UVM_H_
|
#define _UVM_H_
|
||||||
|
|
||||||
#define UVM_API_LATEST_REVISION 13
|
#define UVM_API_LATEST_REVISION 8
|
||||||
|
|
||||||
#if !defined(UVM_API_REVISION)
|
#if !defined(UVM_API_REVISION)
|
||||||
#error "please define UVM_API_REVISION macro to a desired version number or UVM_API_LATEST_REVISION macro"
|
#error "please define UVM_API_REVISION macro to a desired version number or UVM_API_LATEST_REVISION macro"
|
||||||
@@ -167,7 +163,7 @@ NV_STATUS UvmSetDriverVersion(NvU32 major, NvU32 changelist);
|
|||||||
//
|
//
|
||||||
// Error codes:
|
// Error codes:
|
||||||
// NV_ERR_NOT_SUPPORTED:
|
// NV_ERR_NOT_SUPPORTED:
|
||||||
// The kernel is not able to support UVM. This could be because
|
// The Linux kernel is not able to support UVM. This could be because
|
||||||
// the kernel is too old, or because it lacks a feature that UVM
|
// the kernel is too old, or because it lacks a feature that UVM
|
||||||
// requires. The kernel log will have details.
|
// requires. The kernel log will have details.
|
||||||
//
|
//
|
||||||
@@ -184,8 +180,12 @@ NV_STATUS UvmSetDriverVersion(NvU32 major, NvU32 changelist);
|
|||||||
// because it is not very informative.
|
// because it is not very informative.
|
||||||
//
|
//
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
#if UVM_API_REV_IS_AT_MOST(4)
|
||||||
|
NV_STATUS UvmInitialize(UvmFileDescriptor fd);
|
||||||
|
#else
|
||||||
NV_STATUS UvmInitialize(UvmFileDescriptor fd,
|
NV_STATUS UvmInitialize(UvmFileDescriptor fd,
|
||||||
NvU64 flags);
|
NvU64 flags);
|
||||||
|
#endif
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// UvmDeinitialize
|
// UvmDeinitialize
|
||||||
@@ -216,10 +216,6 @@ NV_STATUS UvmDeinitialize(void);
|
|||||||
// Note that it is not required to release VA ranges that were reserved with
|
// Note that it is not required to release VA ranges that were reserved with
|
||||||
// UvmReserveVa().
|
// UvmReserveVa().
|
||||||
//
|
//
|
||||||
// This is useful for per-process checkpoint and restore, where kernel-mode
|
|
||||||
// state needs to be reconfigured to match the expectations of a pre-existing
|
|
||||||
// user-mode process.
|
|
||||||
//
|
|
||||||
// UvmReopen() closes the open file returned by UvmGetFileDescriptor() and
|
// UvmReopen() closes the open file returned by UvmGetFileDescriptor() and
|
||||||
// replaces it with a new open file with the same name.
|
// replaces it with a new open file with the same name.
|
||||||
//
|
//
|
||||||
@@ -297,9 +293,7 @@ NV_STATUS UvmIsPageableMemoryAccessSupported(NvBool *pageableMemAccess);
|
|||||||
//
|
//
|
||||||
// Arguments:
|
// Arguments:
|
||||||
// gpuUuid: (INPUT)
|
// gpuUuid: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of the GPU for which pageable memory access support is queried.
|
||||||
// enabled, or the GPU instance UUID of the partition for which
|
|
||||||
// pageable memory access support is queried.
|
|
||||||
//
|
//
|
||||||
// pageableMemAccess: (OUTPUT)
|
// pageableMemAccess: (OUTPUT)
|
||||||
// Returns true (non-zero) if the GPU represented by gpuUuid supports
|
// Returns true (non-zero) if the GPU represented by gpuUuid supports
|
||||||
@@ -329,19 +323,9 @@ NV_STATUS UvmIsPageableMemoryAccessSupportedOnGpu(const NvProcessorUuid *gpuUuid
|
|||||||
// usage. Calling UvmRegisterGpu multiple times on the same GPU from the same
|
// usage. Calling UvmRegisterGpu multiple times on the same GPU from the same
|
||||||
// process results in an error.
|
// process results in an error.
|
||||||
//
|
//
|
||||||
// After successfully registering a GPU partition, all subsequent API calls
|
|
||||||
// which take a NvProcessorUuid argument (including UvmGpuMappingAttributes),
|
|
||||||
// must use the GI partition UUID which can be obtained with
|
|
||||||
// NvRmControl(NVC637_CTRL_CMD_GET_UUID). Otherwise, if the GPU is not SMC
|
|
||||||
// capable or SMC enabled, the physical GPU UUID must be used.
|
|
||||||
//
|
|
||||||
// Arguments:
|
// Arguments:
|
||||||
// gpuUuid: (INPUT)
|
// gpuUuid: (INPUT)
|
||||||
// UUID of the physical GPU to register.
|
// UUID of the GPU to register.
|
||||||
//
|
|
||||||
// platformParams: (INPUT)
|
|
||||||
// User handles identifying the GPU partition to register.
|
|
||||||
// This should be NULL if the GPU is not SMC capable or SMC enabled.
|
|
||||||
//
|
//
|
||||||
// Error codes:
|
// Error codes:
|
||||||
// NV_ERR_NO_MEMORY:
|
// NV_ERR_NO_MEMORY:
|
||||||
@@ -376,27 +360,39 @@ NV_STATUS UvmIsPageableMemoryAccessSupportedOnGpu(const NvProcessorUuid *gpuUuid
|
|||||||
// OS state required to register the GPU is not present.
|
// OS state required to register the GPU is not present.
|
||||||
//
|
//
|
||||||
// NV_ERR_INVALID_STATE:
|
// NV_ERR_INVALID_STATE:
|
||||||
// OS state required to register the GPU is malformed, or the partition
|
// OS state required to register the GPU is malformed.
|
||||||
// identified by the user handles or its configuration changed.
|
|
||||||
//
|
|
||||||
// NV_ERR_NVLINK_FABRIC_NOT_READY:
|
|
||||||
// (On NvSwitch-connected system) Indicates that the fabric has not been
|
|
||||||
// configured yet. Caller must retry GPU registration.
|
|
||||||
//
|
|
||||||
// NV_ERR_NVLINK_FABRIC_FAILURE:
|
|
||||||
// (On NvSwitch-connected systems) Indicates that the NvLink fabric
|
|
||||||
// failed to be configured.
|
|
||||||
//
|
|
||||||
// NV_ERR_GPU_MEMORY_ONLINING_FAULURE:
|
|
||||||
// (On coherent systems) The GPU's memory onlining failed.
|
|
||||||
//
|
//
|
||||||
// NV_ERR_GENERIC:
|
// NV_ERR_GENERIC:
|
||||||
// Unexpected error. We try hard to avoid returning this error code,
|
// Unexpected error. We try hard to avoid returning this error code,
|
||||||
// because it is not very informative.
|
// because it is not very informative.
|
||||||
//
|
//
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
NV_STATUS UvmRegisterGpu(const NvProcessorUuid *gpuUuid,
|
NV_STATUS UvmRegisterGpu(const NvProcessorUuid *gpuUuid);
|
||||||
const UvmGpuPlatformParams *platformParams);
|
|
||||||
|
//------------------------------------------------------------------------------
|
||||||
|
// UvmRegisterGpuSmc
|
||||||
|
//
|
||||||
|
// The same as UvmRegisterGpu, but takes additional parameters to specify the
|
||||||
|
// GPU partition being registered if SMC is enabled.
|
||||||
|
//
|
||||||
|
// TODO: Bug 2844714: Merge UvmRegisterGpuSmc() with UvmRegisterGpu() once
|
||||||
|
// the initial SMC support is in place.
|
||||||
|
//
|
||||||
|
// Arguments:
|
||||||
|
// gpuUuid: (INPUT)
|
||||||
|
// UUID of the parent GPU of the SMC partition to register.
|
||||||
|
//
|
||||||
|
// platformParams: (INPUT)
|
||||||
|
// User handles identifying the partition to register.
|
||||||
|
//
|
||||||
|
// Error codes (see UvmRegisterGpu also):
|
||||||
|
//
|
||||||
|
// NV_ERR_INVALID_STATE:
|
||||||
|
// SMC was not enabled, or the partition identified by the user
|
||||||
|
// handles or its configuration changed.
|
||||||
|
//
|
||||||
|
NV_STATUS UvmRegisterGpuSmc(const NvProcessorUuid *gpuUuid,
|
||||||
|
const UvmGpuPlatformParams *platformParams);
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// UvmUnregisterGpu
|
// UvmUnregisterGpu
|
||||||
@@ -422,8 +418,7 @@ NV_STATUS UvmRegisterGpu(const NvProcessorUuid *gpuUuid,
|
|||||||
//
|
//
|
||||||
// Arguments:
|
// Arguments:
|
||||||
// gpuUuid: (INPUT)
|
// gpuUuid: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of the GPU to unregister.
|
||||||
// enabled, or the GPU instance UUID of the partition to unregister.
|
|
||||||
//
|
//
|
||||||
// Error codes:
|
// Error codes:
|
||||||
// NV_ERR_INVALID_DEVICE:
|
// NV_ERR_INVALID_DEVICE:
|
||||||
@@ -481,8 +476,7 @@ NV_STATUS UvmUnregisterGpu(const NvProcessorUuid *gpuUuid);
|
|||||||
//
|
//
|
||||||
// Arguments:
|
// Arguments:
|
||||||
// gpuUuid: (INPUT)
|
// gpuUuid: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of the GPU to register.
|
||||||
// enabled, or the GPU instance UUID of the partition to register.
|
|
||||||
//
|
//
|
||||||
// platformParams: (INPUT)
|
// platformParams: (INPUT)
|
||||||
// On Linux: RM ctrl fd, hClient and hVaSpace.
|
// On Linux: RM ctrl fd, hClient and hVaSpace.
|
||||||
@@ -553,9 +547,7 @@ NV_STATUS UvmRegisterGpuVaSpace(const NvProcessorUuid *gpuUuid,
|
|||||||
//
|
//
|
||||||
// Arguments:
|
// Arguments:
|
||||||
// gpuUuid: (INPUT)
|
// gpuUuid: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of the GPU whose VA space should be unregistered.
|
||||||
// enabled, or the GPU instance UUID of the partition whose VA space
|
|
||||||
// should be unregistered.
|
|
||||||
//
|
//
|
||||||
// Error codes:
|
// Error codes:
|
||||||
// NV_ERR_INVALID_DEVICE:
|
// NV_ERR_INVALID_DEVICE:
|
||||||
@@ -585,7 +577,7 @@ NV_STATUS UvmUnregisterGpuVaSpace(const NvProcessorUuid *gpuUuid);
|
|||||||
//
|
//
|
||||||
// The two GPUs must be connected via PCIe. An error is returned if the GPUs are
|
// The two GPUs must be connected via PCIe. An error is returned if the GPUs are
|
||||||
// not connected or are connected over an interconnect different than PCIe
|
// not connected or are connected over an interconnect different than PCIe
|
||||||
// (NVLink or SMC partitions, for example).
|
// (NVLink, for example).
|
||||||
//
|
//
|
||||||
// If both GPUs have GPU VA spaces registered for them, the two GPU VA spaces
|
// If both GPUs have GPU VA spaces registered for them, the two GPU VA spaces
|
||||||
// must support the same set of page sizes for GPU mappings.
|
// must support the same set of page sizes for GPU mappings.
|
||||||
@@ -598,12 +590,10 @@ NV_STATUS UvmUnregisterGpuVaSpace(const NvProcessorUuid *gpuUuid);
|
|||||||
//
|
//
|
||||||
// Arguments:
|
// Arguments:
|
||||||
// gpuUuidA: (INPUT)
|
// gpuUuidA: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of GPU A.
|
||||||
// enabled, or the GPU instance UUID of the partition A.
|
|
||||||
//
|
//
|
||||||
// gpuUuidB: (INPUT)
|
// gpuUuidB: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of GPU B.
|
||||||
// enabled, or the GPU instance UUID of the partition B.
|
|
||||||
//
|
//
|
||||||
// Error codes:
|
// Error codes:
|
||||||
// NV_ERR_NO_MEMORY:
|
// NV_ERR_NO_MEMORY:
|
||||||
@@ -649,12 +639,10 @@ NV_STATUS UvmEnablePeerAccess(const NvProcessorUuid *gpuUuidA,
|
|||||||
//
|
//
|
||||||
// Arguments:
|
// Arguments:
|
||||||
// gpuUuidA: (INPUT)
|
// gpuUuidA: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of GPU A.
|
||||||
// enabled, or the GPU instance UUID of the partition A.
|
|
||||||
//
|
//
|
||||||
// gpuUuidB: (INPUT)
|
// gpuUuidB: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of GPU B.
|
||||||
// enabled, or the GPU instance UUID of the partition B.
|
|
||||||
//
|
//
|
||||||
// Error codes:
|
// Error codes:
|
||||||
// NV_ERR_INVALID_DEVICE:
|
// NV_ERR_INVALID_DEVICE:
|
||||||
@@ -699,9 +687,7 @@ NV_STATUS UvmDisablePeerAccess(const NvProcessorUuid *gpuUuidA,
|
|||||||
//
|
//
|
||||||
// Arguments:
|
// Arguments:
|
||||||
// gpuUuid: (INPUT)
|
// gpuUuid: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of the GPU that the channel is associated with.
|
||||||
// enabled, or the GPU instance UUID of the partition that the channel is
|
|
||||||
// associated with.
|
|
||||||
//
|
//
|
||||||
// platformParams: (INPUT)
|
// platformParams: (INPUT)
|
||||||
// On Linux: RM ctrl fd, hClient and hChannel.
|
// On Linux: RM ctrl fd, hClient and hChannel.
|
||||||
@@ -1140,14 +1126,11 @@ NV_STATUS UvmAllowMigrationRangeGroups(const NvU64 *rangeGroupIds,
|
|||||||
// Length, in bytes, of the range.
|
// Length, in bytes, of the range.
|
||||||
//
|
//
|
||||||
// preferredLocationUuid: (INPUT)
|
// preferredLocationUuid: (INPUT)
|
||||||
// UUID of the CPU, UUID of the physical GPU if the GPU is not SMC
|
// UUID of the preferred location for this VA range.
|
||||||
// capable or SMC enabled, or the GPU instance UUID of the partition of
|
|
||||||
// the preferred location for this VA range.
|
|
||||||
//
|
//
|
||||||
// accessedByUuids: (INPUT)
|
// accessedByUuids: (INPUT)
|
||||||
// UUID of the CPU, UUID of the physical GPUs if the GPUs are not SMC
|
// UUIDs of all processors that should have persistent mappings to this
|
||||||
// capable or SMC enabled, or the GPU instance UUID of the partitions
|
// VA range.
|
||||||
// that should have persistent mappings to this VA range.
|
|
||||||
//
|
//
|
||||||
// accessedByCount: (INPUT)
|
// accessedByCount: (INPUT)
|
||||||
// Number of elements in the accessedByUuids array.
|
// Number of elements in the accessedByUuids array.
|
||||||
@@ -1328,8 +1311,9 @@ NV_STATUS UvmCleanUpZombieResources(void);
|
|||||||
//
|
//
|
||||||
// NV_ERR_INVALID_ARGUMENT:
|
// NV_ERR_INVALID_ARGUMENT:
|
||||||
// perGpuAttribs is NULL but gpuAttribsCount is non-zero or vice-versa,
|
// perGpuAttribs is NULL but gpuAttribsCount is non-zero or vice-versa,
|
||||||
// or caching is requested on more than one GPU, or (in Confidential
|
// or caching is requested on more than one GPU.
|
||||||
// Computing only) the perGpuAttribs list is empty.
|
// The Confidential Computing feature is enabled and the perGpuAttribs
|
||||||
|
// list is empty.
|
||||||
//
|
//
|
||||||
// NV_ERR_NOT_SUPPORTED:
|
// NV_ERR_NOT_SUPPORTED:
|
||||||
// The current process is not the one which called UvmInitialize, and
|
// The current process is not the one which called UvmInitialize, and
|
||||||
@@ -1346,86 +1330,6 @@ NV_STATUS UvmAllocSemaphorePool(void *base,
|
|||||||
const UvmGpuMappingAttributes *perGpuAttribs,
|
const UvmGpuMappingAttributes *perGpuAttribs,
|
||||||
NvLength gpuAttribsCount);
|
NvLength gpuAttribsCount);
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
|
||||||
// UvmAllocDeviceP2P
|
|
||||||
//
|
|
||||||
// Create a VA range within the process's address space reserved for use by
|
|
||||||
// other devices to directly access GPU memory. The memory associated with the
|
|
||||||
// RM handle is mapped into the user address space associated with the range for
|
|
||||||
// direct access from the CPU.
|
|
||||||
//
|
|
||||||
// The VA range must not overlap with an existing VA range, irrespective of
|
|
||||||
// whether the existing range corresponds to a UVM allocation or an external
|
|
||||||
// allocation.
|
|
||||||
//
|
|
||||||
// Multiple VA ranges may be created mapping the same physical memory associated
|
|
||||||
// with the RM handle. The associated GPU memory will not be freed until all VA
|
|
||||||
// ranges have been destroyed either explicitly or implicitly and all non-UVM
|
|
||||||
// users (eg. third party device drivers) have stopped using the associated
|
|
||||||
// GPU memory.
|
|
||||||
//
|
|
||||||
// The VA range can be unmapped and freed by calling UvmFree.
|
|
||||||
//
|
|
||||||
// Destroying the final range mapping the RM handle may block until all third
|
|
||||||
// party device drivers and other kernel users have stopped using the memory.
|
|
||||||
//
|
|
||||||
// These VA ranges are only associated with a single GPU.
|
|
||||||
//
|
|
||||||
// Arguments:
|
|
||||||
// gpuUuid: (INPUT)
|
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
|
||||||
// enabled, or the GPU instance UUID of the partition containing the
|
|
||||||
// memory to be mapped on the CPU.
|
|
||||||
//
|
|
||||||
// base: (INPUT)
|
|
||||||
// Base address of the virtual address range.
|
|
||||||
//
|
|
||||||
// length: (INPUT)
|
|
||||||
// Length, in bytes, of the range.
|
|
||||||
//
|
|
||||||
// offset: (INPUT)
|
|
||||||
// Offset, in bytes, from the start of the externally allocated memory
|
|
||||||
// to map from.
|
|
||||||
//
|
|
||||||
// platformParams: (INPUT)
|
|
||||||
// Platform specific parameters that identify the allocation.
|
|
||||||
// On Linux: RM ctrl fd, hClient and the handle (hMemory) of the
|
|
||||||
// externally allocated memory to map.
|
|
||||||
//
|
|
||||||
// Errors:
|
|
||||||
//
|
|
||||||
// NV_ERR_INVALID_ADDRESS:
|
|
||||||
// base is NULL or length is zero or at least one of base and length is
|
|
||||||
// not aligned to 4K.
|
|
||||||
//
|
|
||||||
// NV_ERR_INVALID_DEVICE:
|
|
||||||
// The gpuUuid was either not registered or has no GPU VA space
|
|
||||||
// registered for it.
|
|
||||||
//
|
|
||||||
// NV_ERR_INVALID_ARGUMENT:
|
|
||||||
// base + offset + length exceeeds the end of the externally allocated
|
|
||||||
// memory handle or the externally allocated handle is not valid.
|
|
||||||
//
|
|
||||||
// NV_ERR_UVM_ADDRESS_IN_USE:
|
|
||||||
// The requested virtual address range overlaps with an existing
|
|
||||||
// allocation.
|
|
||||||
//
|
|
||||||
// NV_ERR_NO_MEMORY:
|
|
||||||
// Internal memory allocation failed.
|
|
||||||
//
|
|
||||||
// NV_ERR_NOT_SUPPORTED:
|
|
||||||
// The device peer-to-peer feature is not supported by the current
|
|
||||||
// system configuration. This may be because the GPU doesn't support
|
|
||||||
// the peer-to-peer feature or the kernel was not built with the correct
|
|
||||||
// configuration options.
|
|
||||||
//
|
|
||||||
//------------------------------------------------------------------------------
|
|
||||||
NV_STATUS UvmAllocDeviceP2P(NvProcessorUuid gpuUuid,
|
|
||||||
void *base,
|
|
||||||
NvLength length,
|
|
||||||
NvLength offset,
|
|
||||||
const UvmDeviceP2PPlatformParams *platformParams);
|
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// UvmMigrate
|
// UvmMigrate
|
||||||
//
|
//
|
||||||
@@ -1479,9 +1383,7 @@ NV_STATUS UvmAllocDeviceP2P(NvProcessorUuid gpuUuid,
|
|||||||
// If read duplication is enabled on any pages in the VA range, then those pages
|
// If read duplication is enabled on any pages in the VA range, then those pages
|
||||||
// are read duplicated at the destination processor, leaving the source copy, if
|
// are read duplicated at the destination processor, leaving the source copy, if
|
||||||
// present, intact with only its mapping changed to read-only if it wasn't
|
// present, intact with only its mapping changed to read-only if it wasn't
|
||||||
// already mapped that way. The exception to this behavior is migrating pages
|
// already mapped that way.
|
||||||
// between different NUMA nodes, in which case the pages are migrated to the
|
|
||||||
// destination node and a read-only mapping is created to the migrated pages.
|
|
||||||
//
|
//
|
||||||
// Pages in the VA range are migrated even if their preferred location is set to
|
// Pages in the VA range are migrated even if their preferred location is set to
|
||||||
// a processor other than the destination processor.
|
// a processor other than the destination processor.
|
||||||
@@ -1506,15 +1408,12 @@ NV_STATUS UvmAllocDeviceP2P(NvProcessorUuid gpuUuid,
|
|||||||
// Length, in bytes, of the range.
|
// Length, in bytes, of the range.
|
||||||
//
|
//
|
||||||
// destinationUuid: (INPUT)
|
// destinationUuid: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of the destination processor to migrate pages to.
|
||||||
// enabled, the GPU instance UUID of the partition, or the CPU UUID to
|
|
||||||
// migrate pages to.
|
|
||||||
//
|
//
|
||||||
// preferredCpuMemoryNode: (INPUT)
|
// preferredCpuMemoryNode: (INPUT)
|
||||||
// Preferred CPU NUMA memory node used if the destination processor is
|
// Preferred CPU NUMA memory node used if the destination processor is
|
||||||
// the CPU. -1 indicates no preference, in which case the pages used
|
// the CPU. This argument is ignored if the given virtual address range
|
||||||
// can be on any of the available CPU NUMA nodes. If NUMA is disabled
|
// corresponds to managed memory.
|
||||||
// only 0 and -1 are allowed.
|
|
||||||
//
|
//
|
||||||
// Error codes:
|
// Error codes:
|
||||||
// NV_ERR_INVALID_ADDRESS:
|
// NV_ERR_INVALID_ADDRESS:
|
||||||
@@ -1528,11 +1427,6 @@ NV_STATUS UvmAllocDeviceP2P(NvProcessorUuid gpuUuid,
|
|||||||
// The VA range exceeds the largest virtual address supported by the
|
// The VA range exceeds the largest virtual address supported by the
|
||||||
// destination processor.
|
// destination processor.
|
||||||
//
|
//
|
||||||
// NV_ERR_INVALID_ARGUMENT:
|
|
||||||
// preferredCpuMemoryNode is not a valid CPU NUMA node or it corresponds
|
|
||||||
// to a NUMA node ID for a registered GPU. If NUMA is disabled, it
|
|
||||||
// indicates that preferredCpuMemoryNode was not either 0 or -1.
|
|
||||||
//
|
|
||||||
// NV_ERR_INVALID_DEVICE:
|
// NV_ERR_INVALID_DEVICE:
|
||||||
// destinationUuid does not represent a valid processor such as a CPU or
|
// destinationUuid does not represent a valid processor such as a CPU or
|
||||||
// a GPU with a GPU VA space registered for it. Or destinationUuid is a
|
// a GPU with a GPU VA space registered for it. Or destinationUuid is a
|
||||||
@@ -1558,10 +1452,16 @@ NV_STATUS UvmAllocDeviceP2P(NvProcessorUuid gpuUuid,
|
|||||||
// pages were associated with a non-migratable range group.
|
// pages were associated with a non-migratable range group.
|
||||||
//
|
//
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
#if UVM_API_REV_IS_AT_MOST(5)
|
||||||
|
NV_STATUS UvmMigrate(void *base,
|
||||||
|
NvLength length,
|
||||||
|
const NvProcessorUuid *destinationUuid);
|
||||||
|
#else
|
||||||
NV_STATUS UvmMigrate(void *base,
|
NV_STATUS UvmMigrate(void *base,
|
||||||
NvLength length,
|
NvLength length,
|
||||||
const NvProcessorUuid *destinationUuid,
|
const NvProcessorUuid *destinationUuid,
|
||||||
NvS32 preferredCpuMemoryNode);
|
NvS32 preferredCpuMemoryNode);
|
||||||
|
#endif
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// UvmMigrateAsync
|
// UvmMigrateAsync
|
||||||
@@ -1593,15 +1493,12 @@ NV_STATUS UvmMigrate(void *base,
|
|||||||
// Length, in bytes, of the range.
|
// Length, in bytes, of the range.
|
||||||
//
|
//
|
||||||
// destinationUuid: (INPUT)
|
// destinationUuid: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of the destination processor to migrate pages to.
|
||||||
// enabled, the GPU instance UUID of the partition, or the CPU UUID to
|
|
||||||
// migrate pages to.
|
|
||||||
//
|
//
|
||||||
// preferredCpuMemoryNode: (INPUT)
|
// preferredCpuMemoryNode: (INPUT)
|
||||||
// Preferred CPU NUMA memory node used if the destination processor is
|
// Preferred CPU NUMA memory node used if the destination processor is
|
||||||
// the CPU. -1 indicates no preference, in which case the pages used
|
// the CPU. This argument is ignored if the given virtual address range
|
||||||
// can be on any of the available CPU NUMA nodes. If NUMA is disabled
|
// corresponds to managed memory.
|
||||||
// only 0 and -1 are allowed.
|
|
||||||
//
|
//
|
||||||
// semaphoreAddress: (INPUT)
|
// semaphoreAddress: (INPUT)
|
||||||
// Base address of the semaphore.
|
// Base address of the semaphore.
|
||||||
@@ -1646,20 +1543,30 @@ NV_STATUS UvmMigrate(void *base,
|
|||||||
// pages were associated with a non-migratable range group.
|
// pages were associated with a non-migratable range group.
|
||||||
//
|
//
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
#if UVM_API_REV_IS_AT_MOST(5)
|
||||||
|
NV_STATUS UvmMigrateAsync(void *base,
|
||||||
|
NvLength length,
|
||||||
|
const NvProcessorUuid *destinationUuid,
|
||||||
|
void *semaphoreAddress,
|
||||||
|
NvU32 semaphorePayload);
|
||||||
|
#else
|
||||||
NV_STATUS UvmMigrateAsync(void *base,
|
NV_STATUS UvmMigrateAsync(void *base,
|
||||||
NvLength length,
|
NvLength length,
|
||||||
const NvProcessorUuid *destinationUuid,
|
const NvProcessorUuid *destinationUuid,
|
||||||
NvS32 preferredCpuMemoryNode,
|
NvS32 preferredCpuMemoryNode,
|
||||||
void *semaphoreAddress,
|
void *semaphoreAddress,
|
||||||
NvU32 semaphorePayload);
|
NvU32 semaphorePayload);
|
||||||
|
#endif
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// UvmMigrateRangeGroup
|
// UvmMigrateRangeGroup
|
||||||
//
|
//
|
||||||
// Migrates the backing of all virtual address ranges associated with the given
|
// Migrates the backing of all virtual address ranges associated with the given
|
||||||
// range group to the specified destination processor. The behavior of this API
|
// range group to the specified destination processor. The behavior of this API
|
||||||
// is equivalent to calling UvmMigrate with preferredCpuMemoryNode = -1 on each
|
// is equivalent to calling UvmMigrate on each VA range associated with this
|
||||||
// VA range associated with this range group.
|
// range group. The value for the preferredCpuMemoryNode is irrelevant in this
|
||||||
|
// case as it only applies to migrations of pageable address, which cannot be
|
||||||
|
// used to create range groups.
|
||||||
//
|
//
|
||||||
// Any errors encountered during migration are returned immediately. No attempt
|
// Any errors encountered during migration are returned immediately. No attempt
|
||||||
// is made to migrate the remaining unmigrated ranges and the ranges that are
|
// is made to migrate the remaining unmigrated ranges and the ranges that are
|
||||||
@@ -1673,9 +1580,7 @@ NV_STATUS UvmMigrateAsync(void *base,
|
|||||||
// Id of the range group whose associated VA ranges have to be migrated.
|
// Id of the range group whose associated VA ranges have to be migrated.
|
||||||
//
|
//
|
||||||
// destinationUuid: (INPUT)
|
// destinationUuid: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of the destination processor to migrate pages to.
|
||||||
// enabled, the GPU instance UUID of the partition, or the CPU UUID to
|
|
||||||
// migrate pages to.
|
|
||||||
//
|
//
|
||||||
// Error codes:
|
// Error codes:
|
||||||
// NV_ERR_OBJECT_NOT_FOUND:
|
// NV_ERR_OBJECT_NOT_FOUND:
|
||||||
@@ -2037,9 +1942,7 @@ NV_STATUS UvmMapExternalAllocation(void *base,
|
|||||||
//
|
//
|
||||||
//
|
//
|
||||||
// gpuUuid: (INPUT)
|
// gpuUuid: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of the GPU to map the sparse region on.
|
||||||
// enabled, or the GPU instance UUID of the partition to map the sparse
|
|
||||||
// region on.
|
|
||||||
//
|
//
|
||||||
// Errors:
|
// Errors:
|
||||||
// NV_ERR_INVALID_ADDRESS:
|
// NV_ERR_INVALID_ADDRESS:
|
||||||
@@ -2096,9 +1999,7 @@ NV_STATUS UvmMapExternalSparse(void *base,
|
|||||||
// The length of the virtual address range.
|
// The length of the virtual address range.
|
||||||
//
|
//
|
||||||
// gpuUuid: (INPUT)
|
// gpuUuid: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of the GPU to unmap the VA range from.
|
||||||
// enabled, or the GPU instance UUID of the partition to unmap the VA
|
|
||||||
// range from.
|
|
||||||
//
|
//
|
||||||
// Errors:
|
// Errors:
|
||||||
// NV_ERR_INVALID_ADDRESS:
|
// NV_ERR_INVALID_ADDRESS:
|
||||||
@@ -2165,9 +2066,7 @@ NV_STATUS UvmUnmapExternalAllocation(void *base,
|
|||||||
// supported by the GPU.
|
// supported by the GPU.
|
||||||
//
|
//
|
||||||
// gpuUuid: (INPUT)
|
// gpuUuid: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of the GPU to map the dynamic parallelism region on.
|
||||||
// enabled, or the GPU instance UUID of the partition to map the
|
|
||||||
// dynamic parallelism region on.
|
|
||||||
//
|
//
|
||||||
// Errors:
|
// Errors:
|
||||||
// NV_ERR_UVM_ADDRESS_IN_USE:
|
// NV_ERR_UVM_ADDRESS_IN_USE:
|
||||||
@@ -2224,9 +2123,7 @@ NV_STATUS UvmMapDynamicParallelismRegion(void *base,
|
|||||||
//
|
//
|
||||||
// If UvmMigrate, UvmMigrateAsync or UvmMigrateRangeGroup is called on any pages
|
// If UvmMigrate, UvmMigrateAsync or UvmMigrateRangeGroup is called on any pages
|
||||||
// in this VA range, then those pages will also be read duplicated on the
|
// in this VA range, then those pages will also be read duplicated on the
|
||||||
// destination processor for the migration unless the migration is between CPU
|
// destination processor for the migration.
|
||||||
// NUMA nodes, in which case the pages are migrated to the destination NUMA
|
|
||||||
// node and a read-only mapping to the migrated pages is created.
|
|
||||||
//
|
//
|
||||||
// Enabling read duplication on a VA range requires the CPU and all GPUs with
|
// Enabling read duplication on a VA range requires the CPU and all GPUs with
|
||||||
// registered VA spaces to be fault-capable. Otherwise, the migration and
|
// registered VA spaces to be fault-capable. Otherwise, the migration and
|
||||||
@@ -2243,8 +2140,7 @@ NV_STATUS UvmMapDynamicParallelismRegion(void *base,
|
|||||||
//
|
//
|
||||||
// If any page in the VA range has a preferred location, then the migration and
|
// If any page in the VA range has a preferred location, then the migration and
|
||||||
// mapping policies associated with this API take precedence over those related
|
// mapping policies associated with this API take precedence over those related
|
||||||
// to the preferred location. If the preferred location is a specific CPU NUMA
|
// to the preferred location.
|
||||||
// node, that NUMA node will be used for a CPU-resident copy of the page.
|
|
||||||
//
|
//
|
||||||
// If any pages in this VA range have any processors present in their
|
// If any pages in this VA range have any processors present in their
|
||||||
// accessed-by list, the migration and mapping policies associated with this
|
// accessed-by list, the migration and mapping policies associated with this
|
||||||
@@ -2375,7 +2271,7 @@ NV_STATUS UvmDisableReadDuplication(void *base,
|
|||||||
// UvmPreventMigrationRangeGroups has not been called on the range group that
|
// UvmPreventMigrationRangeGroups has not been called on the range group that
|
||||||
// those pages are associated with, then the migration and mapping policies
|
// those pages are associated with, then the migration and mapping policies
|
||||||
// associated with UvmEnableReadDuplication override the policies outlined
|
// associated with UvmEnableReadDuplication override the policies outlined
|
||||||
// above. Note that enabling read duplication on any pages in this VA range
|
// above. Note that enabling read duplication on on any pages in this VA range
|
||||||
// does not clear the state set by this API for those pages. It merely overrides
|
// does not clear the state set by this API for those pages. It merely overrides
|
||||||
// the policies associated with this state until read duplication is disabled
|
// the policies associated with this state until read duplication is disabled
|
||||||
// for those pages.
|
// for those pages.
|
||||||
@@ -2401,15 +2297,15 @@ NV_STATUS UvmDisableReadDuplication(void *base,
|
|||||||
// Length, in bytes, of the range.
|
// Length, in bytes, of the range.
|
||||||
//
|
//
|
||||||
// preferredLocationUuid: (INPUT)
|
// preferredLocationUuid: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of the preferred location.
|
||||||
// enabled, the GPU instance UUID of the partition, or the CPU UUID
|
|
||||||
// preferred location.
|
|
||||||
//
|
//
|
||||||
// preferredCpuMemoryNode: (INPUT)
|
// preferredCpuNumaNode: (INPUT)
|
||||||
// Preferred CPU NUMA memory node used if preferredLocationUuid is the
|
// Preferred CPU NUMA memory node used if preferredLocationUuid is the
|
||||||
// UUID of the CPU. -1 is a special value which indicates all CPU nodes
|
// UUID of the CPU. -1 is a special value which indicates all CPU nodes
|
||||||
// allowed by the global and thread memory policies. If NUMA is disabled
|
// allowed by the global and thread memory policies. This argument is
|
||||||
// only 0 and -1 are allowed.
|
// ignored if preferredLocationUuid refers to a GPU or the given virtual
|
||||||
|
// address range corresponds to managed memory. If NUMA is not enabled,
|
||||||
|
// only 0 or -1 is allowed.
|
||||||
//
|
//
|
||||||
// Errors:
|
// Errors:
|
||||||
// NV_ERR_INVALID_ADDRESS:
|
// NV_ERR_INVALID_ADDRESS:
|
||||||
@@ -2439,11 +2335,10 @@ NV_STATUS UvmDisableReadDuplication(void *base,
|
|||||||
//
|
//
|
||||||
// NV_ERR_INVALID_ARGUMENT:
|
// NV_ERR_INVALID_ARGUMENT:
|
||||||
// One of the following occured:
|
// One of the following occured:
|
||||||
// - preferredLocationUuid is the UUID of the CPU and
|
// - preferredLocationUuid is the UUID of a CPU and preferredCpuNumaNode
|
||||||
// preferredCpuMemoryNode is either:
|
// refers to a registered GPU.
|
||||||
// - not a valid NUMA node,
|
// - preferredCpuNumaNode is invalid and preferredLocationUuid is the
|
||||||
// - not a possible NUMA node, or
|
// UUID of the CPU.
|
||||||
// - a NUMA node ID corresponding to a registered GPU.
|
|
||||||
//
|
//
|
||||||
// NV_ERR_NOT_SUPPORTED:
|
// NV_ERR_NOT_SUPPORTED:
|
||||||
// The UVM file descriptor is associated with another process and the
|
// The UVM file descriptor is associated with another process and the
|
||||||
@@ -2454,10 +2349,16 @@ NV_STATUS UvmDisableReadDuplication(void *base,
|
|||||||
// because it is not very informative.
|
// because it is not very informative.
|
||||||
//
|
//
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
#if UVM_API_REV_IS_AT_MOST(7)
|
||||||
|
NV_STATUS UvmSetPreferredLocation(void *base,
|
||||||
|
NvLength length,
|
||||||
|
const NvProcessorUuid *preferredLocationUuid);
|
||||||
|
#else
|
||||||
NV_STATUS UvmSetPreferredLocation(void *base,
|
NV_STATUS UvmSetPreferredLocation(void *base,
|
||||||
NvLength length,
|
NvLength length,
|
||||||
const NvProcessorUuid *preferredLocationUuid,
|
const NvProcessorUuid *preferredLocationUuid,
|
||||||
NvS32 preferredCpuMemoryNode);
|
NvS32 preferredCpuNumaNode);
|
||||||
|
#endif
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// UvmUnsetPreferredLocation
|
// UvmUnsetPreferredLocation
|
||||||
@@ -2580,9 +2481,8 @@ NV_STATUS UvmUnsetPreferredLocation(void *base,
|
|||||||
// Length, in bytes, of the range.
|
// Length, in bytes, of the range.
|
||||||
//
|
//
|
||||||
// accessedByUuid: (INPUT)
|
// accessedByUuid: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of the processor that should have pages in the the VA range
|
||||||
// enabled, the GPU instance UUID of the partition, or the CPU UUID
|
// mapped when possible.
|
||||||
// that should have pages in the VA range mapped when possible.
|
|
||||||
//
|
//
|
||||||
// Errors:
|
// Errors:
|
||||||
// NV_ERR_INVALID_ADDRESS:
|
// NV_ERR_INVALID_ADDRESS:
|
||||||
@@ -2650,10 +2550,8 @@ NV_STATUS UvmSetAccessedBy(void *base,
|
|||||||
// Length, in bytes, of the range.
|
// Length, in bytes, of the range.
|
||||||
//
|
//
|
||||||
// accessedByUuid: (INPUT)
|
// accessedByUuid: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of the processor from which any policies set by
|
||||||
// enabled, the GPU instance UUID of the partition, or the CPU UUID
|
// UvmSetAccessedBy should be revoked for the given VA range.
|
||||||
// from which any policies set by UvmSetAccessedBy should be revoked
|
|
||||||
// for the given VA range.
|
|
||||||
//
|
//
|
||||||
// Errors:
|
// Errors:
|
||||||
// NV_ERR_INVALID_ADDRESS:
|
// NV_ERR_INVALID_ADDRESS:
|
||||||
@@ -2711,9 +2609,7 @@ NV_STATUS UvmUnsetAccessedBy(void *base,
|
|||||||
//
|
//
|
||||||
// Arguments:
|
// Arguments:
|
||||||
// gpuUuid: (INPUT)
|
// gpuUuid: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of the GPU to enable software-assisted system-wide atomics on.
|
||||||
// enabled, or the GPU instance UUID of the partition to enable
|
|
||||||
// software-assisted system-wide atomics on.
|
|
||||||
//
|
//
|
||||||
// Error codes:
|
// Error codes:
|
||||||
// NV_ERR_NO_MEMORY:
|
// NV_ERR_NO_MEMORY:
|
||||||
@@ -2749,9 +2645,7 @@ NV_STATUS UvmEnableSystemWideAtomics(const NvProcessorUuid *gpuUuid);
|
|||||||
//
|
//
|
||||||
// Arguments:
|
// Arguments:
|
||||||
// gpuUuid: (INPUT)
|
// gpuUuid: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of the GPU to disable software-assisted system-wide atomics on.
|
||||||
// enabled, or the GPU instance UUID of the partition to disable
|
|
||||||
// software-assisted system-wide atomics on.
|
|
||||||
//
|
//
|
||||||
// Error codes:
|
// Error codes:
|
||||||
// NV_ERR_INVALID_DEVICE:
|
// NV_ERR_INVALID_DEVICE:
|
||||||
@@ -2980,9 +2874,7 @@ NV_STATUS UvmDebugCountersEnable(UvmDebugSession session,
|
|||||||
// Name of the counter in that scope.
|
// Name of the counter in that scope.
|
||||||
//
|
//
|
||||||
// gpu: (INPUT)
|
// gpu: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// Gpuid of the scoped GPU. This parameter is ignored in AllGpu scopes.
|
||||||
// enabled, or the GPU instance UUID of the partition of the scoped GPU.
|
|
||||||
// This parameter is ignored in AllGpu scopes.
|
|
||||||
//
|
//
|
||||||
// pCounterHandle: (OUTPUT)
|
// pCounterHandle: (OUTPUT)
|
||||||
// Handle to the counter address.
|
// Handle to the counter address.
|
||||||
@@ -3036,7 +2928,7 @@ NV_STATUS UvmDebugGetCounterVal(UvmDebugSession session,
|
|||||||
// UvmEventQueueCreate
|
// UvmEventQueueCreate
|
||||||
//
|
//
|
||||||
// This call creates an event queue of the given size.
|
// This call creates an event queue of the given size.
|
||||||
// No events are added in the queue until they are enabled by the user.
|
// No events are added in the queue till they are enabled by the user.
|
||||||
// Event queue data is visible to the user even after the target process dies
|
// Event queue data is visible to the user even after the target process dies
|
||||||
// if the session is active and queue is not freed.
|
// if the session is active and queue is not freed.
|
||||||
//
|
//
|
||||||
@@ -3087,7 +2979,7 @@ NV_STATUS UvmEventQueueCreate(UvmDebugSession sessionHandle,
|
|||||||
// UvmEventQueueDestroy
|
// UvmEventQueueDestroy
|
||||||
//
|
//
|
||||||
// This call frees all interal resources associated with the queue, including
|
// This call frees all interal resources associated with the queue, including
|
||||||
// unpinning of the memory associated with that queue. Freeing user buffer is
|
// upinning of the memory associated with that queue. Freeing user buffer is
|
||||||
// responsibility of a caller. Event queue might be also destroyed as a side
|
// responsibility of a caller. Event queue might be also destroyed as a side
|
||||||
// effect of destroying a session associated with this queue.
|
// effect of destroying a session associated with this queue.
|
||||||
//
|
//
|
||||||
@@ -3271,9 +3163,9 @@ NV_STATUS UvmEventGetNotificationHandles(UvmEventQueueHandle *queueHandleArray,
|
|||||||
// UvmEventGetGpuUuidTable
|
// UvmEventGetGpuUuidTable
|
||||||
//
|
//
|
||||||
// Each migration event entry contains the gpu index to/from where data is
|
// Each migration event entry contains the gpu index to/from where data is
|
||||||
// migrated. This index maps to a corresponding physical gpu UUID in the
|
// migrated. This index maps to a corresponding gpu UUID in the gpuUuidTable.
|
||||||
// gpuUuidTable. Using indices saves on the size of each event entry. This API
|
// Using indices saves on the size of each event entry. This API provides the
|
||||||
// provides the gpuIndex to gpuUuid relation to the user.
|
// gpuIndex to gpuUuid relation to the user.
|
||||||
//
|
//
|
||||||
// This API does not access the queue state maintained in the user
|
// This API does not access the queue state maintained in the user
|
||||||
// library and so the user doesn't need to acquire a lock to protect the
|
// library and so the user doesn't need to acquire a lock to protect the
|
||||||
@@ -3281,9 +3173,9 @@ NV_STATUS UvmEventGetNotificationHandles(UvmEventQueueHandle *queueHandleArray,
|
|||||||
//
|
//
|
||||||
// Arguments:
|
// Arguments:
|
||||||
// gpuUuidTable: (OUTPUT)
|
// gpuUuidTable: (OUTPUT)
|
||||||
// The return value is an array of physical GPU UUIDs. The array index
|
// The return value is an array of UUIDs. The array index is the
|
||||||
// is the corresponding gpuIndex. There can be at max 32 GPUs
|
// corresponding gpuIndex. There can be at max 32 gpus associated with
|
||||||
// associated with UVM, so array size is 32.
|
// UVM, so array size is 32.
|
||||||
//
|
//
|
||||||
// validCount: (OUTPUT)
|
// validCount: (OUTPUT)
|
||||||
// The system doesn't normally contain 32 GPUs. This field gives the
|
// The system doesn't normally contain 32 GPUs. This field gives the
|
||||||
@@ -3538,16 +3430,19 @@ NV_STATUS UvmToolsDestroySession(UvmToolsSessionHandle session);
|
|||||||
// 4. Destroy event Queue using UvmToolsDestroyEventQueue
|
// 4. Destroy event Queue using UvmToolsDestroyEventQueue
|
||||||
//
|
//
|
||||||
|
|
||||||
|
|
||||||
|
NvLength UvmToolsGetEventControlSize(void);
|
||||||
|
|
||||||
|
NvLength UvmToolsGetEventEntrySize(void);
|
||||||
|
|
||||||
NvLength UvmToolsGetNumberOfCounters(void);
|
NvLength UvmToolsGetNumberOfCounters(void);
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// UvmToolsCreateEventQueue
|
// UvmToolsCreateEventQueue
|
||||||
//
|
//
|
||||||
// This function is deprecated. See UvmToolsCreateEventQueue_V2.
|
// This call creates an event queue that can hold the given number of events.
|
||||||
//
|
// All events are disabled by default. Event queue data persists lifetime of the
|
||||||
// This call creates an event queue that can hold the given number of
|
// target process.
|
||||||
// UvmEventEntry events. All events are disabled by default. Event queue data
|
|
||||||
// persists lifetime of the target process.
|
|
||||||
//
|
//
|
||||||
// Arguments:
|
// Arguments:
|
||||||
// session: (INPUT)
|
// session: (INPUT)
|
||||||
@@ -3565,7 +3460,9 @@ NvLength UvmToolsGetNumberOfCounters(void);
|
|||||||
// event_control (INPUT)
|
// event_control (INPUT)
|
||||||
// User allocated buffer. Must be page-aligned. Must be large enough to
|
// User allocated buffer. Must be page-aligned. Must be large enough to
|
||||||
// hold UvmToolsEventControlData (although single page-size allocation
|
// hold UvmToolsEventControlData (although single page-size allocation
|
||||||
// should be more than enough). Gets pinned until queue is destroyed.
|
// should be more than enough). One could call
|
||||||
|
// UvmToolsGetEventControlSize() function to find out current size of
|
||||||
|
// UvmToolsEventControlData. Gets pinned until queue is destroyed.
|
||||||
//
|
//
|
||||||
// queue: (OUTPUT)
|
// queue: (OUTPUT)
|
||||||
// Handle to the created queue.
|
// Handle to the created queue.
|
||||||
@@ -3579,72 +3476,18 @@ NvLength UvmToolsGetNumberOfCounters(void);
|
|||||||
// is not valid
|
// is not valid
|
||||||
//
|
//
|
||||||
// NV_ERR_INSUFFICIENT_RESOURCES:
|
// NV_ERR_INSUFFICIENT_RESOURCES:
|
||||||
// There could be multiple reasons for this error. One would be that
|
// There could be multiple reasons for this error. One would be that it's
|
||||||
// it's not possible to allocate a queue of requested size. Another
|
// not possible to allocate a queue of requested size. Another would be
|
||||||
// would be either event_buffer or event_control memory couldn't be
|
// that either event_buffer or event_control memory couldn't be pinned
|
||||||
// pinned (e.g. because of OS limitation of pinnable memory). Also it
|
// (e.g. because of OS limitation of pinnable memory). Also it could not
|
||||||
// could not have been possible to create UvmToolsEventQueueDescriptor.
|
// have been possible to create UvmToolsEventQueueDescriptor.
|
||||||
//
|
|
||||||
NV_STATUS UvmToolsCreateEventQueue(UvmToolsSessionHandle session,
|
|
||||||
void *event_buffer,
|
|
||||||
NvLength event_buffer_size,
|
|
||||||
void *event_control,
|
|
||||||
UvmToolsEventQueueHandle *queue);
|
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
|
||||||
// UvmToolsCreateEventQueue_V2
|
|
||||||
//
|
|
||||||
// This call creates an event queue that can hold the given number of
|
|
||||||
// UvmEventEntry_V2 events. All events are disabled by default. Event queue data
|
|
||||||
// persists beyond the lifetime of the target process.
|
|
||||||
//
|
|
||||||
// Arguments:
|
|
||||||
// session: (INPUT)
|
|
||||||
// Handle to the tools session.
|
|
||||||
//
|
|
||||||
// event_buffer: (INPUT)
|
|
||||||
// User allocated buffer. Must be page-aligned. Must be large enough to
|
|
||||||
// hold at least event_buffer_size events. Gets pinned until queue is
|
|
||||||
// destroyed.
|
|
||||||
//
|
|
||||||
// event_buffer_size: (INPUT)
|
|
||||||
// Size of the event queue buffer in units of UvmEventEntry_V2's. Must
|
|
||||||
// be a power of two, and greater than 1.
|
|
||||||
//
|
|
||||||
// event_control (INPUT)
|
|
||||||
// User allocated buffer. Must be page-aligned. Must be large enough to
|
|
||||||
// hold UvmToolsEventControlData (although single page-size allocation
|
|
||||||
// should be more than enough). Gets pinned until queue is destroyed.
|
|
||||||
//
|
|
||||||
// queue: (OUTPUT)
|
|
||||||
// Handle to the created queue.
|
|
||||||
//
|
|
||||||
// Error codes:
|
|
||||||
// NV_ERR_INSUFFICIENT_PERMISSIONS:
|
|
||||||
// Session handle does not refer to a valid session
|
|
||||||
//
|
|
||||||
// NV_ERR_INVALID_ARGUMENT:
|
|
||||||
// One of the parameters: event_buffer, event_buffer_size, event_control
|
|
||||||
// is not valid
|
|
||||||
//
|
|
||||||
// NV_ERR_NOT_SUPPORTED:
|
|
||||||
// The requested version queue could not be created
|
|
||||||
// (i.e., the UVM kernel driver is older and doesn't support
|
|
||||||
// UvmToolsEventQueueVersion_V2).
|
|
||||||
//
|
|
||||||
// NV_ERR_INSUFFICIENT_RESOURCES:
|
|
||||||
// There could be multiple reasons for this error. One would be that
|
|
||||||
// it's not possible to allocate a queue of requested size. Another
|
|
||||||
// would be either event_buffer or event_control memory couldn't be
|
|
||||||
// pinned (e.g. because of OS limitation of pinnable memory). Also it
|
|
||||||
// could not have been possible to create UvmToolsEventQueueDescriptor.
|
|
||||||
//
|
//
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
NV_STATUS UvmToolsCreateEventQueue_V2(UvmToolsSessionHandle session,
|
NV_STATUS UvmToolsCreateEventQueue(UvmToolsSessionHandle session,
|
||||||
void *event_buffer,
|
void *event_buffer,
|
||||||
NvLength event_buffer_size,
|
NvLength event_buffer_size,
|
||||||
void *event_control,
|
void *event_control,
|
||||||
UvmToolsEventQueueHandle *queue);
|
UvmToolsEventQueueHandle *queue);
|
||||||
|
|
||||||
UvmToolsEventQueueDescriptor UvmToolsGetEventQueueDescriptor(UvmToolsEventQueueHandle queue);
|
UvmToolsEventQueueDescriptor UvmToolsGetEventQueueDescriptor(UvmToolsEventQueueHandle queue);
|
||||||
|
|
||||||
@@ -3681,7 +3524,7 @@ NV_STATUS UvmToolsSetNotificationThreshold(UvmToolsEventQueueHandle queue,
|
|||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// UvmToolsDestroyEventQueue
|
// UvmToolsDestroyEventQueue
|
||||||
//
|
//
|
||||||
// Destroys all internal resources associated with the queue. It unpins the
|
// Destroys all internal resources associated with the queue. It unpinns the
|
||||||
// buffers provided in UvmToolsCreateEventQueue. Event Queue is also auto
|
// buffers provided in UvmToolsCreateEventQueue. Event Queue is also auto
|
||||||
// destroyed when corresponding session gets destroyed.
|
// destroyed when corresponding session gets destroyed.
|
||||||
//
|
//
|
||||||
@@ -3703,7 +3546,7 @@ NV_STATUS UvmToolsDestroyEventQueue(UvmToolsEventQueueHandle queue);
|
|||||||
// UvmEventQueueEnableEvents
|
// UvmEventQueueEnableEvents
|
||||||
//
|
//
|
||||||
// This call enables a particular event type in the event queue. All events are
|
// This call enables a particular event type in the event queue. All events are
|
||||||
// disabled by default. Any event type is considered listed if and only if its
|
// disabled by default. Any event type is considered listed if and only if it's
|
||||||
// corresponding value is equal to 1 (in other words, bit is set). Disabled
|
// corresponding value is equal to 1 (in other words, bit is set). Disabled
|
||||||
// events listed in eventTypeFlags are going to be enabled. Enabled events and
|
// events listed in eventTypeFlags are going to be enabled. Enabled events and
|
||||||
// events not listed in eventTypeFlags are not affected by this call.
|
// events not listed in eventTypeFlags are not affected by this call.
|
||||||
@@ -3736,7 +3579,7 @@ NV_STATUS UvmToolsEventQueueEnableEvents(UvmToolsEventQueueHandle queue,
|
|||||||
// UvmToolsEventQueueDisableEvents
|
// UvmToolsEventQueueDisableEvents
|
||||||
//
|
//
|
||||||
// This call disables a particular event type in the event queue. Any event type
|
// This call disables a particular event type in the event queue. Any event type
|
||||||
// is considered listed if and only if its corresponding value is equal to 1
|
// is considered listed if and only if it's corresponding value is equal to 1
|
||||||
// (in other words, bit is set). Enabled events listed in eventTypeFlags are
|
// (in other words, bit is set). Enabled events listed in eventTypeFlags are
|
||||||
// going to be disabled. Disabled events and events not listed in eventTypeFlags
|
// going to be disabled. Disabled events and events not listed in eventTypeFlags
|
||||||
// are not affected by this call.
|
// are not affected by this call.
|
||||||
@@ -3774,7 +3617,7 @@ NV_STATUS UvmToolsEventQueueDisableEvents(UvmToolsEventQueueHandle queue,
|
|||||||
//
|
//
|
||||||
// Counters position follows the layout of the memory that UVM driver decides to
|
// Counters position follows the layout of the memory that UVM driver decides to
|
||||||
// use. To obtain particular counter value, user should perform consecutive
|
// use. To obtain particular counter value, user should perform consecutive
|
||||||
// atomic reads at a given buffer + offset address.
|
// atomic reads at a a given buffer + offset address.
|
||||||
//
|
//
|
||||||
// It is not defined what is the initial value of a counter. User should rely on
|
// It is not defined what is the initial value of a counter. User should rely on
|
||||||
// a difference between each snapshot.
|
// a difference between each snapshot.
|
||||||
@@ -3797,9 +3640,9 @@ NV_STATUS UvmToolsEventQueueDisableEvents(UvmToolsEventQueueHandle queue,
|
|||||||
// Provided session is not valid
|
// Provided session is not valid
|
||||||
//
|
//
|
||||||
// NV_ERR_INSUFFICIENT_RESOURCES
|
// NV_ERR_INSUFFICIENT_RESOURCES
|
||||||
// There could be multiple reasons for this error. One would be that
|
// There could be multiple reasons for this error. One would be that it's
|
||||||
// it's not possible to allocate counters structure. Another would be
|
// not possible to allocate counters structure. Another would be that
|
||||||
// that either event_buffer or event_control memory couldn't be pinned
|
// either event_buffer or event_control memory couldn't be pinned
|
||||||
// (e.g. because of OS limitation of pinnable memory)
|
// (e.g. because of OS limitation of pinnable memory)
|
||||||
//
|
//
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
@@ -3810,12 +3653,12 @@ NV_STATUS UvmToolsCreateProcessAggregateCounters(UvmToolsSessionHandle session
|
|||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// UvmToolsCreateProcessorCounters
|
// UvmToolsCreateProcessorCounters
|
||||||
//
|
//
|
||||||
// Creates the counters structure for tracking per-processor counters.
|
// Creates the counters structure for tracking per-process counters.
|
||||||
// These counters are disabled by default.
|
// These counters are disabled by default.
|
||||||
//
|
//
|
||||||
// Counters position follows the layout of the memory that UVM driver decides to
|
// Counters position follows the layout of the memory that UVM driver decides to
|
||||||
// use. To obtain particular counter value, user should perform consecutive
|
// use. To obtain particular counter value, user should perform consecutive
|
||||||
// atomic reads at a given buffer + offset address.
|
// atomic reads at a a given buffer + offset address.
|
||||||
//
|
//
|
||||||
// It is not defined what is the initial value of a counter. User should rely on
|
// It is not defined what is the initial value of a counter. User should rely on
|
||||||
// a difference between each snapshot.
|
// a difference between each snapshot.
|
||||||
@@ -3831,9 +3674,7 @@ NV_STATUS UvmToolsCreateProcessAggregateCounters(UvmToolsSessionHandle session
|
|||||||
// counters are destroyed.
|
// counters are destroyed.
|
||||||
//
|
//
|
||||||
// processorUuid: (INPUT)
|
// processorUuid: (INPUT)
|
||||||
// UUID of the physical GPU if the GPU is not SMC capable or SMC
|
// UUID of the resource, for which counters will provide statistic data.
|
||||||
// enabled, the GPU instance UUID of the partition, or the CPU UUID of
|
|
||||||
// the resource, for which counters will provide statistic data.
|
|
||||||
//
|
//
|
||||||
// counters: (OUTPUT)
|
// counters: (OUTPUT)
|
||||||
// Handle to the created counters.
|
// Handle to the created counters.
|
||||||
@@ -3843,9 +3684,9 @@ NV_STATUS UvmToolsCreateProcessAggregateCounters(UvmToolsSessionHandle session
|
|||||||
// session handle does not refer to a valid tools session
|
// session handle does not refer to a valid tools session
|
||||||
//
|
//
|
||||||
// NV_ERR_INSUFFICIENT_RESOURCES
|
// NV_ERR_INSUFFICIENT_RESOURCES
|
||||||
// There could be multiple reasons for this error. One would be that
|
// There could be multiple reasons for this error. One would be that it's
|
||||||
// it's not possible to allocate counters structure. Another would be
|
// not possible to allocate counters structure. Another would be that
|
||||||
// that either event_buffer or event_control memory couldn't be pinned
|
// either event_buffer or event_control memory couldn't be pinned
|
||||||
// (e.g. because of OS limitation of pinnable memory)
|
// (e.g. because of OS limitation of pinnable memory)
|
||||||
//
|
//
|
||||||
// NV_ERR_INVALID_ARGUMENT
|
// NV_ERR_INVALID_ARGUMENT
|
||||||
@@ -3861,7 +3702,7 @@ NV_STATUS UvmToolsCreateProcessorCounters(UvmToolsSessionHandle session,
|
|||||||
// UvmToolsDestroyCounters
|
// UvmToolsDestroyCounters
|
||||||
//
|
//
|
||||||
// Destroys all internal resources associated with this counters structure.
|
// Destroys all internal resources associated with this counters structure.
|
||||||
// It unpins the buffer provided in UvmToolsCreate*Counters. Counters structure
|
// It unpinns the buffer provided in UvmToolsCreate*Counters. Counters structure
|
||||||
// also gest destroyed when corresponding session is destroyed.
|
// also gest destroyed when corresponding session is destroyed.
|
||||||
//
|
//
|
||||||
// Arguments:
|
// Arguments:
|
||||||
@@ -3882,7 +3723,7 @@ NV_STATUS UvmToolsDestroyCounters(UvmToolsCountersHandle counters);
|
|||||||
// UvmToolsEnableCounters
|
// UvmToolsEnableCounters
|
||||||
//
|
//
|
||||||
// This call enables certain counter types in the counters structure. Any
|
// This call enables certain counter types in the counters structure. Any
|
||||||
// counter type is considered listed if and only if its corresponding value is
|
// counter type is considered listed if and only if it's corresponding value is
|
||||||
// equal to 1 (in other words, bit is set). Disabled counter types listed in
|
// equal to 1 (in other words, bit is set). Disabled counter types listed in
|
||||||
// counterTypeFlags are going to be enabled. Already enabled counter types and
|
// counterTypeFlags are going to be enabled. Already enabled counter types and
|
||||||
// counter types not listed in counterTypeFlags are not affected by this call.
|
// counter types not listed in counterTypeFlags are not affected by this call.
|
||||||
@@ -3916,7 +3757,7 @@ NV_STATUS UvmToolsEnableCounters(UvmToolsCountersHandle counters,
|
|||||||
// UvmToolsDisableCounters
|
// UvmToolsDisableCounters
|
||||||
//
|
//
|
||||||
// This call disables certain counter types in the counters structure. Any
|
// This call disables certain counter types in the counters structure. Any
|
||||||
// counter type is considered listed if and only if its corresponding value is
|
// counter type is considered listed if and only if it's corresponding value is
|
||||||
// equal to 1 (in other words, bit is set). Enabled counter types listed in
|
// equal to 1 (in other words, bit is set). Enabled counter types listed in
|
||||||
// counterTypeFlags are going to be disabled. Already disabled counter types and
|
// counterTypeFlags are going to be disabled. Already disabled counter types and
|
||||||
// counter types not listed in counterTypeFlags are not affected by this call.
|
// counter types not listed in counterTypeFlags are not affected by this call.
|
||||||
@@ -3959,7 +3800,9 @@ NV_STATUS UvmToolsDisableCounters(UvmToolsCountersHandle counters,
|
|||||||
// In-process scenario when targetVa address + size overlaps with buffer + size.
|
// In-process scenario when targetVa address + size overlaps with buffer + size.
|
||||||
//
|
//
|
||||||
// This is essentially a UVM version of RM ctrl call
|
// This is essentially a UVM version of RM ctrl call
|
||||||
// NV83DE_CTRL_CMD_DEBUG_READ_MEMORY.
|
// NV83DE_CTRL_CMD_DEBUG_READ_MEMORY. For implementation constraints (and more
|
||||||
|
// information), please refer to the documentation:
|
||||||
|
// //sw/docs/resman/components/compute/UVM/subsystems/UVM_8_Tools_API_Design.docx
|
||||||
//
|
//
|
||||||
// Arguments:
|
// Arguments:
|
||||||
// session: (INPUT)
|
// session: (INPUT)
|
||||||
@@ -4013,7 +3856,9 @@ NV_STATUS UvmToolsReadProcessMemory(UvmToolsSessionHandle session,
|
|||||||
// buffer + size.
|
// buffer + size.
|
||||||
//
|
//
|
||||||
// This is essentially a UVM version of RM ctrl call
|
// This is essentially a UVM version of RM ctrl call
|
||||||
// NV83DE_CTRL_CMD_DEBUG_READ_MEMORY.
|
// NV83DE_CTRL_CMD_DEBUG_READ_MEMORY. For implementation constraints (and more
|
||||||
|
// information), please refer to the documentation:
|
||||||
|
// //sw/docs/resman/components/compute/UVM/subsystems/UVM_8_Tools_API_Design.docx
|
||||||
//
|
//
|
||||||
// Arguments:
|
// Arguments:
|
||||||
// session: (INPUT)
|
// session: (INPUT)
|
||||||
@@ -4056,15 +3901,11 @@ NV_STATUS UvmToolsWriteProcessMemory(UvmToolsSessionHandle session,
|
|||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// UvmToolsGetProcessorUuidTable
|
// UvmToolsGetProcessorUuidTable
|
||||||
//
|
//
|
||||||
// This function is deprecated. See UvmToolsGetProcessorUuidTable_V2.
|
|
||||||
//
|
|
||||||
// Populate a table with the UUIDs of all the currently registered processors
|
// Populate a table with the UUIDs of all the currently registered processors
|
||||||
// in the target process. When a GPU is registered, it is added to the table.
|
// in the target process. When a GPU is registered, it is added to the table.
|
||||||
// When a GPU is unregistered, it is removed. As long as a GPU remains
|
// When a GPU is unregistered, it is removed. As long as a GPU remains registered,
|
||||||
// registered, its index in the table does not change.
|
// its index in the table does not change. New registrations obtain the first
|
||||||
// Note that the index in the table corresponds to the processor ID reported
|
// unused index.
|
||||||
// in UvmEventEntry event records and that the table is not contiguously packed
|
|
||||||
// with non-zero UUIDs even with no GPU unregistrations.
|
|
||||||
//
|
//
|
||||||
// Arguments:
|
// Arguments:
|
||||||
// session: (INPUT)
|
// session: (INPUT)
|
||||||
@@ -4072,61 +3913,21 @@ NV_STATUS UvmToolsWriteProcessMemory(UvmToolsSessionHandle session,
|
|||||||
//
|
//
|
||||||
// table: (OUTPUT)
|
// table: (OUTPUT)
|
||||||
// Array of processor UUIDs, including the CPU's UUID which is always
|
// Array of processor UUIDs, including the CPU's UUID which is always
|
||||||
// at index zero. The number of elements in the array must be greater
|
// at index zero. The srcIndex and dstIndex fields of the
|
||||||
// or equal to UVM_MAX_PROCESSORS_V1.
|
// UvmEventMigrationInfo struct index this array. Unused indices will
|
||||||
// The srcIndex and dstIndex fields of the UvmEventMigrationInfo struct
|
// have a UUID of zero.
|
||||||
// index this array. Unused indices will have a UUID of zero.
|
//
|
||||||
// The reported UUID will be that of the corresponding physical GPU,
|
// count: (OUTPUT)
|
||||||
// even if multiple SMC partitions are registered under that physical
|
// Set by UVM to the number of UUIDs written, including any gaps in
|
||||||
// GPU.
|
// the table due to unregistered GPUs.
|
||||||
//
|
//
|
||||||
// Error codes:
|
// Error codes:
|
||||||
// NV_ERR_INVALID_ADDRESS:
|
// NV_ERR_INVALID_ADDRESS:
|
||||||
// writing to table failed.
|
// writing to table failed.
|
||||||
//
|
|
||||||
// NV_ERR_NO_MEMORY:
|
|
||||||
// Internal memory allocation failed.
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
NV_STATUS UvmToolsGetProcessorUuidTable(UvmToolsSessionHandle session,
|
NV_STATUS UvmToolsGetProcessorUuidTable(UvmToolsSessionHandle session,
|
||||||
NvProcessorUuid *table);
|
NvProcessorUuid *table,
|
||||||
|
NvLength *count);
|
||||||
//------------------------------------------------------------------------------
|
|
||||||
// UvmToolsGetProcessorUuidTable_V2
|
|
||||||
//
|
|
||||||
// Populate a table with the UUIDs of all the currently registered processors
|
|
||||||
// in the target process. When a GPU is registered, it is added to the table.
|
|
||||||
// When a GPU is unregistered, it is removed. As long as a GPU remains
|
|
||||||
// registered, its index in the table does not change.
|
|
||||||
// Note that the index in the table corresponds to the processor ID reported
|
|
||||||
// in UvmEventEntry event records and that the table is not contiguously packed
|
|
||||||
// with non-zero UUIDs even with no GPU unregistrations.
|
|
||||||
//
|
|
||||||
// Arguments:
|
|
||||||
// session: (INPUT)
|
|
||||||
// Handle to the tools session.
|
|
||||||
//
|
|
||||||
// table: (OUTPUT)
|
|
||||||
// Array of processor UUIDs, including the CPU's UUID which is always
|
|
||||||
// at index zero. The number of elements in the array must be greater
|
|
||||||
// or equal to UVM_MAX_PROCESSORS.
|
|
||||||
// The srcIndex and dstIndex fields of the UvmEventMigrationInfo struct
|
|
||||||
// index this array. Unused indices will have a UUID of zero.
|
|
||||||
// The reported UUID will be the GPU instance UUID if SMC is enabled,
|
|
||||||
// otherwise it will be the UUID of the physical GPU.
|
|
||||||
//
|
|
||||||
// Error codes:
|
|
||||||
// NV_ERR_INVALID_ADDRESS:
|
|
||||||
// writing to table failed.
|
|
||||||
//
|
|
||||||
// NV_ERR_NOT_SUPPORTED:
|
|
||||||
// The UVM kernel driver is older and doesn't support
|
|
||||||
// UvmToolsGetProcessorUuidTable_V2.
|
|
||||||
//
|
|
||||||
// NV_ERR_NO_MEMORY:
|
|
||||||
// Internal memory allocation failed.
|
|
||||||
//------------------------------------------------------------------------------
|
|
||||||
NV_STATUS UvmToolsGetProcessorUuidTable_V2(UvmToolsSessionHandle session,
|
|
||||||
NvProcessorUuid *table);
|
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// UvmToolsFlushEvents
|
// UvmToolsFlushEvents
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
Copyright (c) 2021-2025 NVIDIA Corporation
|
Copyright (c) 2021-2023 NVIDIA Corporation
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to
|
of this software and associated documentation files (the "Software"), to
|
||||||
@@ -38,10 +38,12 @@ void uvm_hal_ada_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
|
|||||||
|
|
||||||
parent_gpu->utlb_per_gpc_count = uvm_ada_get_utlbs_per_gpc(parent_gpu);
|
parent_gpu->utlb_per_gpc_count = uvm_ada_get_utlbs_per_gpc(parent_gpu);
|
||||||
|
|
||||||
parent_gpu->fault_buffer.replayable.utlb_count = parent_gpu->rm_info.maxGpcCount * parent_gpu->utlb_per_gpc_count;
|
parent_gpu->fault_buffer_info.replayable.utlb_count = parent_gpu->rm_info.maxGpcCount *
|
||||||
|
parent_gpu->utlb_per_gpc_count;
|
||||||
{
|
{
|
||||||
uvm_fault_buffer_entry_t *dummy;
|
uvm_fault_buffer_entry_t *dummy;
|
||||||
UVM_ASSERT(parent_gpu->fault_buffer.replayable.utlb_count <= (1 << (sizeof(dummy->fault_source.utlb_id) * 8)));
|
UVM_ASSERT(parent_gpu->fault_buffer_info.replayable.utlb_count <= (1 << (sizeof(dummy->fault_source.utlb_id) *
|
||||||
|
8)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// A single top level PDE on Ada covers 128 TB and that's the minimum size
|
// A single top level PDE on Ada covers 128 TB and that's the minimum size
|
||||||
@@ -49,9 +51,6 @@ void uvm_hal_ada_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
|
|||||||
parent_gpu->rm_va_base = 0;
|
parent_gpu->rm_va_base = 0;
|
||||||
parent_gpu->rm_va_size = 128 * UVM_SIZE_1TB;
|
parent_gpu->rm_va_size = 128 * UVM_SIZE_1TB;
|
||||||
|
|
||||||
parent_gpu->peer_va_base = parent_gpu->rm_va_base + parent_gpu->rm_va_size;
|
|
||||||
parent_gpu->peer_va_size = NV_MAX_DEVICES * UVM_PEER_IDENTITY_VA_SIZE;
|
|
||||||
|
|
||||||
parent_gpu->uvm_mem_va_base = 384 * UVM_SIZE_1TB;
|
parent_gpu->uvm_mem_va_base = 384 * UVM_SIZE_1TB;
|
||||||
parent_gpu->uvm_mem_va_size = UVM_MEM_VA_SIZE;
|
parent_gpu->uvm_mem_va_size = UVM_MEM_VA_SIZE;
|
||||||
|
|
||||||
@@ -97,6 +96,4 @@ void uvm_hal_ada_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
|
|||||||
parent_gpu->plc_supported = true;
|
parent_gpu->plc_supported = true;
|
||||||
|
|
||||||
parent_gpu->no_ats_range_required = false;
|
parent_gpu->no_ats_range_required = false;
|
||||||
|
|
||||||
parent_gpu->conf_computing.per_channel_key_rotation = false;
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
Copyright (c) 2018-2025 NVIDIA Corporation
|
Copyright (c) 2018-20221 NVIDIA Corporation
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to
|
of this software and associated documentation files (the "Software"), to
|
||||||
@@ -38,10 +38,10 @@ void uvm_hal_ampere_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
|
|||||||
|
|
||||||
parent_gpu->utlb_per_gpc_count = uvm_ampere_get_utlbs_per_gpc(parent_gpu);
|
parent_gpu->utlb_per_gpc_count = uvm_ampere_get_utlbs_per_gpc(parent_gpu);
|
||||||
|
|
||||||
parent_gpu->fault_buffer.replayable.utlb_count = parent_gpu->rm_info.maxGpcCount * parent_gpu->utlb_per_gpc_count;
|
parent_gpu->fault_buffer_info.replayable.utlb_count = parent_gpu->rm_info.maxGpcCount * parent_gpu->utlb_per_gpc_count;
|
||||||
{
|
{
|
||||||
uvm_fault_buffer_entry_t *dummy;
|
uvm_fault_buffer_entry_t *dummy;
|
||||||
UVM_ASSERT(parent_gpu->fault_buffer.replayable.utlb_count <= (1 << (sizeof(dummy->fault_source.utlb_id) * 8)));
|
UVM_ASSERT(parent_gpu->fault_buffer_info.replayable.utlb_count <= (1 << (sizeof(dummy->fault_source.utlb_id) * 8)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// A single top level PDE on Ampere covers 128 TB and that's the minimum
|
// A single top level PDE on Ampere covers 128 TB and that's the minimum
|
||||||
@@ -49,14 +49,11 @@ void uvm_hal_ampere_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
|
|||||||
parent_gpu->rm_va_base = 0;
|
parent_gpu->rm_va_base = 0;
|
||||||
parent_gpu->rm_va_size = 128 * UVM_SIZE_1TB;
|
parent_gpu->rm_va_size = 128 * UVM_SIZE_1TB;
|
||||||
|
|
||||||
parent_gpu->peer_va_base = parent_gpu->rm_va_base + parent_gpu->rm_va_size;
|
|
||||||
parent_gpu->peer_va_size = NV_MAX_DEVICES * UVM_PEER_IDENTITY_VA_SIZE;
|
|
||||||
|
|
||||||
parent_gpu->uvm_mem_va_base = 384 * UVM_SIZE_1TB;
|
parent_gpu->uvm_mem_va_base = 384 * UVM_SIZE_1TB;
|
||||||
parent_gpu->uvm_mem_va_size = UVM_MEM_VA_SIZE;
|
parent_gpu->uvm_mem_va_size = UVM_MEM_VA_SIZE;
|
||||||
|
|
||||||
// See uvm_mmu.h for mapping placement
|
// See uvm_mmu.h for mapping placement
|
||||||
parent_gpu->flat_vidmem_va_base = 160 * UVM_SIZE_1TB;
|
parent_gpu->flat_vidmem_va_base = 136 * UVM_SIZE_1TB;
|
||||||
parent_gpu->flat_sysmem_va_base = 256 * UVM_SIZE_1TB;
|
parent_gpu->flat_sysmem_va_base = 256 * UVM_SIZE_1TB;
|
||||||
|
|
||||||
parent_gpu->ce_phys_vidmem_write_supported = true;
|
parent_gpu->ce_phys_vidmem_write_supported = true;
|
||||||
@@ -106,6 +103,4 @@ void uvm_hal_ampere_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
|
|||||||
parent_gpu->plc_supported = true;
|
parent_gpu->plc_supported = true;
|
||||||
|
|
||||||
parent_gpu->no_ats_range_required = false;
|
parent_gpu->no_ats_range_required = false;
|
||||||
|
|
||||||
parent_gpu->conf_computing.per_channel_key_rotation = false;
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
Copyright (c) 2018-2023 NVIDIA Corporation
|
Copyright (c) 2018-2022 NVIDIA Corporation
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to
|
of this software and associated documentation files (the "Software"), to
|
||||||
@@ -29,8 +29,6 @@
|
|||||||
|
|
||||||
bool uvm_hal_ampere_ce_method_is_valid_c6b5(uvm_push_t *push, NvU32 method_address, NvU32 method_data)
|
bool uvm_hal_ampere_ce_method_is_valid_c6b5(uvm_push_t *push, NvU32 method_address, NvU32 method_data)
|
||||||
{
|
{
|
||||||
UVM_ASSERT(push->channel);
|
|
||||||
|
|
||||||
if (!uvm_channel_is_proxy(push->channel))
|
if (!uvm_channel_is_proxy(push->channel))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
@@ -118,18 +116,8 @@ bool uvm_hal_ampere_ce_memcopy_is_valid_c6b5(uvm_push_t *push, uvm_gpu_address_t
|
|||||||
{
|
{
|
||||||
NvU64 push_begin_gpu_va;
|
NvU64 push_begin_gpu_va;
|
||||||
uvm_gpu_t *gpu = uvm_push_get_gpu(push);
|
uvm_gpu_t *gpu = uvm_push_get_gpu(push);
|
||||||
const bool peer_copy = uvm_gpu_address_is_peer(gpu, dst) || uvm_gpu_address_is_peer(gpu, src);
|
|
||||||
|
|
||||||
UVM_ASSERT(push->channel);
|
if (!uvm_gpu_is_virt_mode_sriov_heavy(gpu))
|
||||||
|
|
||||||
if (peer_copy && !uvm_channel_is_p2p(push->channel)) {
|
|
||||||
UVM_ERR_PRINT("Peer copy from address (0x%llx) to address (0x%llx) should use designated p2p channels!",
|
|
||||||
src.address,
|
|
||||||
dst.address);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!uvm_parent_gpu_is_virt_mode_sriov_heavy(gpu->parent))
|
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (uvm_channel_is_proxy(push->channel)) {
|
if (uvm_channel_is_proxy(push->channel)) {
|
||||||
@@ -194,8 +182,6 @@ void uvm_hal_ampere_ce_memcopy_patch_src_c6b5(uvm_push_t *push, uvm_gpu_address_
|
|||||||
{
|
{
|
||||||
uvm_pushbuffer_t *pushbuffer;
|
uvm_pushbuffer_t *pushbuffer;
|
||||||
|
|
||||||
UVM_ASSERT(push->channel);
|
|
||||||
|
|
||||||
if (!uvm_channel_is_proxy(push->channel))
|
if (!uvm_channel_is_proxy(push->channel))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@@ -210,7 +196,7 @@ bool uvm_hal_ampere_ce_memset_is_valid_c6b5(uvm_push_t *push,
|
|||||||
{
|
{
|
||||||
uvm_gpu_t *gpu = uvm_push_get_gpu(push);
|
uvm_gpu_t *gpu = uvm_push_get_gpu(push);
|
||||||
|
|
||||||
if (!uvm_parent_gpu_is_virt_mode_sriov_heavy(gpu->parent))
|
if (!uvm_gpu_is_virt_mode_sriov_heavy(gpu))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (uvm_channel_is_proxy(push->channel)) {
|
if (uvm_channel_is_proxy(push->channel)) {
|
||||||
|
|||||||
@@ -1,75 +0,0 @@
|
|||||||
/*******************************************************************************
|
|
||||||
Copyright (c) 2024 NVIDIA Corporation
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to
|
|
||||||
deal in the Software without restriction, including without limitation the
|
|
||||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
||||||
sell copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
DEALINGS IN THE SOFTWARE.
|
|
||||||
|
|
||||||
*******************************************************************************/
|
|
||||||
|
|
||||||
#include "uvm_linux.h"
|
|
||||||
#include "uvm_global.h"
|
|
||||||
#include "uvm_gpu.h"
|
|
||||||
#include "uvm_hal.h"
|
|
||||||
#include "hwref/ampere/ga100/dev_fault.h"
|
|
||||||
|
|
||||||
static bool client_id_ce(NvU16 client_id)
|
|
||||||
{
|
|
||||||
if (client_id >= NV_PFAULT_CLIENT_HUB_HSCE0 && client_id <= NV_PFAULT_CLIENT_HUB_HSCE9)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
if (client_id >= NV_PFAULT_CLIENT_HUB_HSCE10 && client_id <= NV_PFAULT_CLIENT_HUB_HSCE15)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
switch (client_id) {
|
|
||||||
case NV_PFAULT_CLIENT_HUB_CE0:
|
|
||||||
case NV_PFAULT_CLIENT_HUB_CE1:
|
|
||||||
case NV_PFAULT_CLIENT_HUB_CE2:
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
uvm_mmu_engine_type_t uvm_hal_ampere_fault_buffer_get_mmu_engine_type(NvU16 mmu_engine_id,
|
|
||||||
uvm_fault_client_type_t client_type,
|
|
||||||
NvU16 client_id)
|
|
||||||
{
|
|
||||||
// Servicing CE and Host (HUB clients) faults.
|
|
||||||
if (client_type == UVM_FAULT_CLIENT_TYPE_HUB) {
|
|
||||||
if (client_id_ce(client_id)) {
|
|
||||||
UVM_ASSERT(mmu_engine_id >= NV_PFAULT_MMU_ENG_ID_CE0 && mmu_engine_id <= NV_PFAULT_MMU_ENG_ID_CE9);
|
|
||||||
|
|
||||||
return UVM_MMU_ENGINE_TYPE_CE;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (client_id == NV_PFAULT_CLIENT_HUB_HOST || client_id == NV_PFAULT_CLIENT_HUB_ESC) {
|
|
||||||
UVM_ASSERT(mmu_engine_id >= NV_PFAULT_MMU_ENG_ID_HOST0 && mmu_engine_id <= NV_PFAULT_MMU_ENG_ID_HOST31);
|
|
||||||
|
|
||||||
return UVM_MMU_ENGINE_TYPE_HOST;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We shouldn't be servicing faults from any other engines other than GR.
|
|
||||||
UVM_ASSERT_MSG(client_id <= NV_PFAULT_CLIENT_GPC_ROP_3, "Unexpected client ID: 0x%x\n", client_id);
|
|
||||||
UVM_ASSERT_MSG(mmu_engine_id >= NV_PFAULT_MMU_ENG_ID_GRAPHICS && mmu_engine_id < NV_PFAULT_MMU_ENG_ID_BAR1,
|
|
||||||
"Unexpected engine ID: 0x%x\n",
|
|
||||||
mmu_engine_id);
|
|
||||||
UVM_ASSERT(client_type == UVM_FAULT_CLIENT_TYPE_GPC);
|
|
||||||
|
|
||||||
return UVM_MMU_ENGINE_TYPE_GRAPHICS;
|
|
||||||
}
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user