Compare commits

...

2 Commits

Author SHA1 Message Date
Andy Ritger
b5bf85a8e3 545.23.06 2023-10-17 09:25:29 -07:00
Maneet Singh
f59818b751 535.113.01 2023-09-21 10:43:43 -07:00
930 changed files with 133834 additions and 109755 deletions

View File

@@ -1,7 +1,25 @@
# Changelog
## Release 545 Entries
### [545.23.06] 2023-10-17
#### Fixed
- Fix always-false conditional, [#493](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/493) by @meme8383
#### Added
- Added beta-quality support for GeForce and Workstation GPUs. Please see the "Open Linux Kernel Modules" chapter in the NVIDIA GPU driver end user README for details.
## Release 535 Entries
### [535.113.01] 2023-09-21
#### Fixed
- Fixed building main against current centos stream 8 fails, [#550](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/550) by @airlied
### [535.104.05] 2023-08-22
### [535.98] 2023-08-08

View File

@@ -1,7 +1,7 @@
# NVIDIA Linux Open GPU Kernel Module Source
This is the source release of the NVIDIA Linux open GPU kernel modules,
version 535.104.05.
version 545.23.06.
## How to Build
@@ -17,7 +17,7 @@ as root:
Note that the kernel modules built here must be used with GSP
firmware and user-space NVIDIA GPU driver components from a corresponding
535.104.05 driver release. This can be achieved by installing
545.23.06 driver release. This can be achieved by installing
the NVIDIA GPU driver from the .run file using the `--no-kernel-modules`
option. E.g.,
@@ -179,16 +179,16 @@ software applications.
## Compatible GPUs
The open-gpu-kernel-modules can be used on any Turing or later GPU
(see the table below). However, in the 535.104.05 release,
GeForce and Workstation support is still considered alpha-quality.
The NVIDIA open kernel modules can be used on any Turing or later GPU
(see the table below). However, in the __DRIVER_VERION__ release, GeForce and
Workstation support is considered to be Beta quality. The open kernel modules
are suitable for broad usage, and NVIDIA requests feedback on any issues
encountered specific to them.
To enable use of the open kernel modules on GeForce and Workstation GPUs,
set the "NVreg_OpenRmEnableUnsupportedGpus" nvidia.ko kernel module
parameter to 1. For more details, see the NVIDIA GPU driver end user
README here:
For details on feature support and limitations, see the NVIDIA GPU driver
end user README here:
https://us.download.nvidia.com/XFree86/Linux-x86_64/535.104.05/README/kernel_open.html
https://us.download.nvidia.com/XFree86/Linux-x86_64/545.23.06/README/kernel_open.html
In the below table, if three IDs are listed, the first is the PCI Device
ID, the second is the PCI Subsystem Vendor ID, and the third is the PCI
@@ -856,6 +856,10 @@ Subsystem Device ID.
| NVIDIA RTX 4000 SFF Ada Generation | 27B0 103C 16FA |
| NVIDIA RTX 4000 SFF Ada Generation | 27B0 10DE 16FA |
| NVIDIA RTX 4000 SFF Ada Generation | 27B0 17AA 16FA |
| NVIDIA RTX 4500 Ada Generation | 27B1 1028 180C |
| NVIDIA RTX 4500 Ada Generation | 27B1 103C 180C |
| NVIDIA RTX 4500 Ada Generation | 27B1 10DE 180C |
| NVIDIA RTX 4500 Ada Generation | 27B1 17AA 180C |
| NVIDIA RTX 4000 Ada Generation | 27B2 1028 181B |
| NVIDIA RTX 4000 Ada Generation | 27B2 103C 181B |
| NVIDIA RTX 4000 Ada Generation | 27B2 10DE 181B |

View File

@@ -72,12 +72,24 @@ EXTRA_CFLAGS += -I$(src)/common/inc
EXTRA_CFLAGS += -I$(src)
EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-error -Wno-format-extra-args
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"535.104.05\"
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"545.23.06\"
ifneq ($(SYSSRCHOST1X),)
EXTRA_CFLAGS += -I$(SYSSRCHOST1X)
endif
# Some Android kernels prohibit driver use of filesystem functions like
# filp_open() and kernel_read(). Disable the NV_FILESYSTEM_ACCESS_AVAILABLE
# functionality that uses those functions when building for Android.
PLATFORM_IS_ANDROID ?= 0
ifeq ($(PLATFORM_IS_ANDROID),1)
EXTRA_CFLAGS += -DNV_FILESYSTEM_ACCESS_AVAILABLE=0
else
EXTRA_CFLAGS += -DNV_FILESYSTEM_ACCESS_AVAILABLE=1
endif
EXTRA_CFLAGS += -Wno-unused-function
ifneq ($(NV_BUILD_TYPE),debug)
@@ -92,7 +104,6 @@ endif
ifeq ($(NV_BUILD_TYPE),debug)
EXTRA_CFLAGS += -g
EXTRA_CFLAGS += $(call cc-option,-gsplit-dwarf,)
endif
EXTRA_CFLAGS += -ffreestanding
@@ -214,6 +225,7 @@ $(obj)/conftest/patches.h: $(NV_CONFTEST_SCRIPT)
NV_HEADER_PRESENCE_TESTS = \
asm/system.h \
drm/drmP.h \
drm/drm_aperture.h \
drm/drm_auth.h \
drm/drm_gem.h \
drm/drm_crtc.h \
@@ -224,6 +236,7 @@ NV_HEADER_PRESENCE_TESTS = \
drm/drm_encoder.h \
drm/drm_atomic_uapi.h \
drm/drm_drv.h \
drm/drm_fbdev_generic.h \
drm/drm_framebuffer.h \
drm/drm_connector.h \
drm/drm_probe_helper.h \
@@ -257,6 +270,7 @@ NV_HEADER_PRESENCE_TESTS = \
linux/sched/task_stack.h \
xen/ioemu.h \
linux/fence.h \
linux/dma-fence.h \
linux/dma-resv.h \
soc/tegra/chip-id.h \
soc/tegra/fuse.h \
@@ -302,6 +316,7 @@ NV_HEADER_PRESENCE_TESTS = \
linux/mdev.h \
soc/tegra/bpmp-abi.h \
soc/tegra/bpmp.h \
linux/sync_file.h \
linux/cc_platform.h \
asm/cpufeature.h

View File

@@ -0,0 +1,43 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_CHARDEV_NUMBERS_H_
#define _NV_CHARDEV_NUMBERS_H_
// NVIDIA's reserved major character device number (Linux).
#define NV_MAJOR_DEVICE_NUMBER 195
// Minor numbers 0 to 247 reserved for regular devices
#define NV_MINOR_DEVICE_NUMBER_REGULAR_MAX 247
// Minor numbers 248 to 253 currently unused
// Minor number 254 reserved for the modeset device (provided by NVKMS)
#define NV_MINOR_DEVICE_NUMBER_MODESET_DEVICE 254
// Minor number 255 reserved for the control device
#define NV_MINOR_DEVICE_NUMBER_CONTROL_DEVICE 255
#endif // _NV_CHARDEV_NUMBERS_H_

View File

@@ -25,14 +25,12 @@
#ifndef NV_IOCTL_NUMA_H
#define NV_IOCTL_NUMA_H
#if defined(NV_LINUX)
#include <nv-ioctl-numbers.h>
#if defined(NV_KERNEL_INTERFACE_LAYER)
#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX)
#include <linux/types.h>
#elif defined (NV_KERNEL_INTERFACE_LAYER) && defined(NV_BSD)
#include <sys/stdint.h>
#else
#include <stdint.h>
@@ -81,5 +79,3 @@ typedef struct nv_ioctl_set_numa_status
#define NV_IOCTL_NUMA_STATUS_OFFLINE_FAILED 6
#endif
#endif

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -21,27 +21,42 @@
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_FRONTEND_H_
#define _NV_FRONTEND_H_
#ifndef __NV_KTHREAD_QUEUE_OS_H__
#define __NV_KTHREAD_QUEUE_OS_H__
#include "nvtypes.h"
#include "nv-linux.h"
#include "nv-register-module.h"
#include <linux/types.h> // atomic_t
#include <linux/list.h> // list
#include <linux/sched.h> // task_struct
#include <linux/numa.h> // NUMA_NO_NODE
#include <linux/semaphore.h>
#define NV_MAX_MODULE_INSTANCES 8
#include "conftest.h"
#define NV_FRONTEND_MINOR_NUMBER(x) minor((x)->i_rdev)
struct nv_kthread_q
{
struct list_head q_list_head;
spinlock_t q_lock;
#define NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX 255
#define NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN (NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - \
NV_MAX_MODULE_INSTANCES)
// This is a counting semaphore. It gets incremented and decremented
// exactly once for each item that is added to the queue.
struct semaphore q_sem;
atomic_t main_loop_should_exit;
#define NV_FRONTEND_IS_CONTROL_DEVICE(x) ((x <= NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX) && \
(x > NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN))
struct task_struct *q_kthread;
};
int nvidia_frontend_add_device(nvidia_module_t *, nv_linux_state_t *);
int nvidia_frontend_remove_device(nvidia_module_t *, nv_linux_state_t *);
struct nv_kthread_q_item
{
struct list_head q_list_node;
nv_q_func_t function_to_run;
void *function_args;
};
extern nvidia_module_t *nv_minor_num_table[];
#ifndef NUMA_NO_NODE
#define NUMA_NO_NODE (-1)
#endif
#define NV_KTHREAD_NO_NODE NUMA_NO_NODE
#endif

View File

@@ -24,13 +24,14 @@
#ifndef __NV_KTHREAD_QUEUE_H__
#define __NV_KTHREAD_QUEUE_H__
#include <linux/types.h> // atomic_t
#include <linux/list.h> // list
#include <linux/sched.h> // task_struct
#include <linux/numa.h> // NUMA_NO_NODE
#include <linux/semaphore.h>
struct nv_kthread_q;
struct nv_kthread_q_item;
typedef struct nv_kthread_q nv_kthread_q_t;
typedef struct nv_kthread_q_item nv_kthread_q_item_t;
#include "conftest.h"
typedef void (*nv_q_func_t)(void *args);
#include "nv-kthread-q-os.h"
////////////////////////////////////////////////////////////////////////////////
// nv_kthread_q:
@@ -85,38 +86,6 @@
//
////////////////////////////////////////////////////////////////////////////////
typedef struct nv_kthread_q nv_kthread_q_t;
typedef struct nv_kthread_q_item nv_kthread_q_item_t;
typedef void (*nv_q_func_t)(void *args);
struct nv_kthread_q
{
struct list_head q_list_head;
spinlock_t q_lock;
// This is a counting semaphore. It gets incremented and decremented
// exactly once for each item that is added to the queue.
struct semaphore q_sem;
atomic_t main_loop_should_exit;
struct task_struct *q_kthread;
};
struct nv_kthread_q_item
{
struct list_head q_list_node;
nv_q_func_t function_to_run;
void *function_args;
};
#ifndef NUMA_NO_NODE
#define NUMA_NO_NODE (-1)
#endif
#define NV_KTHREAD_NO_NODE NUMA_NO_NODE
//
// The queue must not be used before calling this routine.
//
@@ -155,10 +124,7 @@ int nv_kthread_q_init_on_node(nv_kthread_q_t *q,
// This routine is the same as nv_kthread_q_init_on_node() with the exception
// that the queue stack will be allocated on the NUMA node of the caller.
//
static inline int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname)
{
return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE);
}
int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname);
//
// The caller is responsible for stopping all queues, by calling this routine

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2001-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -248,7 +248,7 @@ NV_STATUS nvos_forward_error_to_cray(struct pci_dev *, NvU32,
#undef NV_SET_PAGES_UC_PRESENT
#endif
#if !defined(NVCPU_AARCH64) && !defined(NVCPU_PPC64LE)
#if !defined(NVCPU_AARCH64) && !defined(NVCPU_PPC64LE) && !defined(NVCPU_RISCV64)
#if !defined(NV_SET_MEMORY_UC_PRESENT) && !defined(NV_SET_PAGES_UC_PRESENT)
#error "This driver requires the ability to change memory types!"
#endif
@@ -430,6 +430,11 @@ extern NvBool nvos_is_chipset_io_coherent(void);
#define CACHE_FLUSH() asm volatile("sync; \n" \
"isync; \n" ::: "memory")
#define WRITE_COMBINE_FLUSH() CACHE_FLUSH()
#elif defined(NVCPU_RISCV64)
#define CACHE_FLUSH() mb()
#define WRITE_COMBINE_FLUSH() CACHE_FLUSH()
#else
#error "CACHE_FLUSH() and WRITE_COMBINE_FLUSH() need to be defined for this architecture."
#endif
typedef enum
@@ -440,7 +445,7 @@ typedef enum
NV_MEMORY_TYPE_DEVICE_MMIO, /* All kinds of MMIO referred by NVRM e.g. BARs and MCFG of device */
} nv_memory_type_t;
#if defined(NVCPU_AARCH64) || defined(NVCPU_PPC64LE)
#if defined(NVCPU_AARCH64) || defined(NVCPU_PPC64LE) || defined(NVCPU_RISCV64)
#define NV_ALLOW_WRITE_COMBINING(mt) 1
#elif defined(NVCPU_X86_64)
#if defined(NV_ENABLE_PAT_SUPPORT)
@@ -753,7 +758,6 @@ static inline dma_addr_t nv_phys_to_dma(struct device *dev, NvU64 pa)
#define NV_VMA_FILE(vma) ((vma)->vm_file)
#define NV_DEVICE_MINOR_NUMBER(x) minor((x)->i_rdev)
#define NV_CONTROL_DEVICE_MINOR 255
#define NV_PCI_DISABLE_DEVICE(pci_dev) \
{ \
@@ -1646,20 +1650,11 @@ typedef struct nvidia_event
nv_event_t event;
} nvidia_event_t;
typedef enum
{
NV_FOPS_STACK_INDEX_MMAP,
NV_FOPS_STACK_INDEX_IOCTL,
NV_FOPS_STACK_INDEX_COUNT
} nvidia_entry_point_index_t;
typedef struct
{
nv_file_private_t nvfp;
nvidia_stack_t *sp;
nvidia_stack_t *fops_sp[NV_FOPS_STACK_INDEX_COUNT];
struct semaphore fops_sp_lock[NV_FOPS_STACK_INDEX_COUNT];
nv_alloc_t *free_list;
void *nvptr;
nvidia_event_t *event_data_head, *event_data_tail;
@@ -1689,28 +1684,6 @@ static inline nv_linux_file_private_t *nv_get_nvlfp_from_nvfp(nv_file_private_t
#define NV_STATE_PTR(nvl) &(((nv_linux_state_t *)(nvl))->nv_state)
static inline nvidia_stack_t *nv_nvlfp_get_sp(nv_linux_file_private_t *nvlfp, nvidia_entry_point_index_t which)
{
#if defined(NVCPU_X86_64)
if (rm_is_altstack_in_use())
{
down(&nvlfp->fops_sp_lock[which]);
return nvlfp->fops_sp[which];
}
#endif
return NULL;
}
static inline void nv_nvlfp_put_sp(nv_linux_file_private_t *nvlfp, nvidia_entry_point_index_t which)
{
#if defined(NVCPU_X86_64)
if (rm_is_altstack_in_use())
{
up(&nvlfp->fops_sp_lock[which]);
}
#endif
}
#define NV_ATOMIC_READ(data) atomic_read(&(data))
#define NV_ATOMIC_SET(data,val) atomic_set(&(data), (val))
#define NV_ATOMIC_INC(data) atomic_inc(&(data))

View File

@@ -119,6 +119,13 @@ static inline pgprot_t pgprot_modify_writecombine(pgprot_t old_prot)
#define NV_PGPROT_WRITE_COMBINED(old_prot) old_prot
#define NV_PGPROT_READ_ONLY(old_prot) \
__pgprot(pgprot_val((old_prot)) & ~NV_PAGE_RW)
#elif defined(NVCPU_RISCV64)
#define NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) \
pgprot_writecombine(old_prot)
/* Don't attempt to mark sysmem pages as write combined on riscv */
#define NV_PGPROT_WRITE_COMBINED(old_prot) old_prot
#define NV_PGPROT_READ_ONLY(old_prot) \
__pgprot(pgprot_val((old_prot)) & ~_PAGE_WRITE)
#else
/* Writecombine is not supported */
#undef NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot)

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -25,10 +25,8 @@
#define _NV_PROTO_H_
#include "nv-pci.h"
#include "nv-register-module.h"
extern const char *nv_device_name;
extern nvidia_module_t nv_fops;
void nv_acpi_register_notifier (nv_linux_state_t *);
void nv_acpi_unregister_notifier (nv_linux_state_t *);
@@ -86,7 +84,7 @@ void nv_shutdown_adapter(nvidia_stack_t *, nv_state_t *, nv_linux_state
void nv_dev_free_stacks(nv_linux_state_t *);
NvBool nv_lock_init_locks(nvidia_stack_t *, nv_state_t *);
void nv_lock_destroy_locks(nvidia_stack_t *, nv_state_t *);
void nv_linux_add_device_locked(nv_linux_state_t *);
int nv_linux_add_device_locked(nv_linux_state_t *);
void nv_linux_remove_device_locked(nv_linux_state_t *);
NvBool nv_acpi_power_resource_method_present(struct pci_dev *);

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -42,6 +42,7 @@
#include <nv-caps.h>
#include <nv-firmware.h>
#include <nv-ioctl.h>
#include <nv-ioctl-numa.h>
#include <nvmisc.h>
extern nv_cap_t *nvidia_caps_root;
@@ -50,9 +51,6 @@ extern const NvBool nv_is_rm_firmware_supported_os;
#include <nv-kernel-interface-api.h>
/* NVIDIA's reserved major character device number (Linux). */
#define NV_MAJOR_DEVICE_NUMBER 195
#define GPU_UUID_LEN (16)
/*
@@ -478,8 +476,6 @@ typedef struct nv_state_t
/* Bool to check if dma-buf is supported */
NvBool dma_buf_supported;
NvBool printed_openrm_enable_unsupported_gpus_error;
/* Check if NVPCF DSM function is implemented under NVPCF or GPU device scope */
NvBool nvpcf_dsm_in_gpu_scope;
@@ -505,6 +501,7 @@ struct nv_file_private_t
NvHandle *handles;
NvU16 maxHandles;
NvU32 deviceInstance;
NvU32 gpuInstanceId;
NvU8 metadata[64];
nv_file_private_t *ctl_nvfp;
@@ -765,7 +762,7 @@ nv_state_t* NV_API_CALL nv_get_ctl_state (void);
void NV_API_CALL nv_set_dma_address_size (nv_state_t *, NvU32 );
NV_STATUS NV_API_CALL nv_alias_pages (nv_state_t *, NvU32, NvU32, NvU32, NvU64, NvU64 *, void **);
NV_STATUS NV_API_CALL nv_alloc_pages (nv_state_t *, NvU32, NvBool, NvU32, NvBool, NvBool, NvS32, NvU64 *, void **);
NV_STATUS NV_API_CALL nv_alloc_pages (nv_state_t *, NvU32, NvU64, NvBool, NvU32, NvBool, NvBool, NvS32, NvU64 *, void **);
NV_STATUS NV_API_CALL nv_free_pages (nv_state_t *, NvU32, NvBool, NvU32, void *);
NV_STATUS NV_API_CALL nv_register_user_pages (nv_state_t *, NvU64, NvU64 *, void *, void **);
@@ -981,7 +978,7 @@ NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t
void NV_API_CALL rm_dma_buf_undup_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle);
NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, NvU64, void *, nv_phys_addr_range_t **, NvU32 *);
void NV_API_CALL rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, nv_phys_addr_range_t **, NvU32);
NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle *, NvHandle *, NvHandle *, void **, NvBool *);
NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle *, NvHandle *, NvHandle *, void **, NvBool *);
void NV_API_CALL rm_dma_buf_put_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, void *);
NV_STATUS NV_API_CALL rm_log_gpu_crash (nv_stack_t *, nv_state_t *);
@@ -993,7 +990,7 @@ NvBool NV_API_CALL rm_gpu_need_4k_page_isolation(nv_state_t *);
NvBool NV_API_CALL rm_is_chipset_io_coherent(nv_stack_t *);
NvBool NV_API_CALL rm_init_event_locks(nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_destroy_event_locks(nvidia_stack_t *, nv_state_t *);
NV_STATUS NV_API_CALL rm_get_gpu_numa_info(nvidia_stack_t *, nv_state_t *, NvS32 *, NvU64 *, NvU64 *, NvU64 *, NvU32 *);
NV_STATUS NV_API_CALL rm_get_gpu_numa_info(nvidia_stack_t *, nv_state_t *, nv_ioctl_numa_info_t *);
NV_STATUS NV_API_CALL rm_gpu_numa_online(nvidia_stack_t *, nv_state_t *);
NV_STATUS NV_API_CALL rm_gpu_numa_offline(nvidia_stack_t *, nv_state_t *);
NvBool NV_API_CALL rm_is_device_sequestered(nvidia_stack_t *, nv_state_t *);
@@ -1008,7 +1005,7 @@ void NV_API_CALL rm_cleanup_dynamic_power_management(nvidia_stack_t *, nv_
void NV_API_CALL rm_enable_dynamic_power_management(nvidia_stack_t *, nv_state_t *);
NV_STATUS NV_API_CALL rm_ref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t);
void NV_API_CALL rm_unref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t);
NV_STATUS NV_API_CALL rm_transition_dynamic_power(nvidia_stack_t *, nv_state_t *, NvBool);
NV_STATUS NV_API_CALL rm_transition_dynamic_power(nvidia_stack_t *, nv_state_t *, NvBool, NvBool *);
const char* NV_API_CALL rm_get_vidmem_power_status(nvidia_stack_t *, nv_state_t *);
const char* NV_API_CALL rm_get_dynamic_power_management_status(nvidia_stack_t *, nv_state_t *);
const char* NV_API_CALL rm_get_gpu_gcx_support(nvidia_stack_t *, nv_state_t *, NvBool);
@@ -1023,7 +1020,8 @@ NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, c
NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16);
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool);
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8);
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU32, void *);
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU32, void *, NvBool *);
NV_STATUS NV_API_CALL nv_vgpu_get_hbm_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU64 *);
NV_STATUS NV_API_CALL nv_vgpu_start(nvidia_stack_t *, const NvU8 *, void *, NvS32 *, NvU8 *, NvU32);
NV_STATUS NV_API_CALL nv_vgpu_get_sparse_mmap(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 **, NvU64 **, NvU32 *);
NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *);

View File

@@ -86,7 +86,7 @@
/* Not currently implemented for MSVC/ARM64. See bug 3366890. */
# define nv_speculation_barrier()
# define speculation_barrier() nv_speculation_barrier()
#elif defined(NVCPU_NVRISCV64) && NVOS_IS_LIBOS
#elif defined(NVCPU_IS_RISCV64)
# define nv_speculation_barrier()
#else
#error "Unknown compiler/chip family"

View File

@@ -104,6 +104,10 @@ typedef struct UvmGpuMemoryInfo_tag
// Out: Set to TRUE, if the allocation is in sysmem.
NvBool sysmem;
// Out: Set to TRUE, if this allocation is treated as EGM.
// sysmem is also TRUE when egm is TRUE.
NvBool egm;
// Out: Set to TRUE, if the allocation is a constructed
// under a Device or Subdevice.
// All permutations of sysmem and deviceDescendant are valid.
@@ -125,6 +129,8 @@ typedef struct UvmGpuMemoryInfo_tag
// Out: Uuid of the GPU to which the allocation belongs.
// This is only valid if deviceDescendant is NV_TRUE.
// When egm is NV_TRUE, this is also the UUID of the GPU
// for which EGM is local.
// Note: If the allocation is owned by a device in
// an SLI group and the allocation is broadcast
// across the SLI group, this UUID will be any one
@@ -332,7 +338,7 @@ typedef struct UvmGpuPagingChannelAllocParams_tag
// The max number of Copy Engines supported by a GPU.
// The gpu ops build has a static assert that this is the correct number.
#define UVM_COPY_ENGINE_COUNT_MAX 10
#define UVM_COPY_ENGINE_COUNT_MAX 64
typedef struct
{
@@ -566,11 +572,8 @@ typedef struct UvmPlatformInfo_tag
// Out: ATS (Address Translation Services) is supported
NvBool atsSupported;
// Out: True if HW trusted execution, such as AMD's SEV-SNP or Intel's TDX,
// is enabled in the VM, indicating that Confidential Computing must be
// also enabled in the GPU(s); these two security features are either both
// enabled, or both disabled.
NvBool confComputingEnabled;
// Out: AMD SEV (Secure Encrypted Virtualization) is enabled
NvBool sevEnabled;
} UvmPlatformInfo;
typedef struct UvmGpuClientInfo_tag
@@ -683,6 +686,10 @@ typedef struct UvmGpuInfo_tag
// to NVSwitch peers.
NvBool connectedToSwitch;
NvU64 nvswitchMemoryWindowStart;
// local EGM properties
NvBool egmEnabled;
NvU8 egmPeerId;
} UvmGpuInfo;
typedef struct UvmGpuFbInfo_tag

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -45,6 +45,11 @@
#define NVKMS_DEVICE_ID_TEGRA 0x0000ffff
#define NVKMS_MAX_SUPERFRAME_VIEWS 4
#define NVKMS_LOG2_LUT_ARRAY_SIZE 10
#define NVKMS_LUT_ARRAY_SIZE (1 << NVKMS_LOG2_LUT_ARRAY_SIZE)
typedef NvU32 NvKmsDeviceHandle;
typedef NvU32 NvKmsDispHandle;
typedef NvU32 NvKmsConnectorHandle;
@@ -179,6 +184,14 @@ enum NvKmsEventType {
NVKMS_EVENT_TYPE_FLIP_OCCURRED,
};
enum NvKmsFlipResult {
NV_KMS_FLIP_RESULT_SUCCESS = 0, /* Success */
NV_KMS_FLIP_RESULT_INVALID_PARAMS, /* Parameter validation failed */
NV_KMS_FLIP_RESULT_IN_PROGRESS, /* Flip would fail because an outstanding
flip containing changes that cannot be
queued is in progress */
};
typedef enum {
NV_EVO_SCALER_1TAP = 0,
NV_EVO_SCALER_2TAPS = 1,
@@ -221,6 +234,16 @@ struct NvKmsUsageBounds {
} layer[NVKMS_MAX_LAYERS_PER_HEAD];
};
/*!
* Per-component arrays of NvU16s describing the LUT; used for both the input
* LUT and output LUT.
*/
struct NvKmsLutRamps {
NvU16 red[NVKMS_LUT_ARRAY_SIZE]; /*! in */
NvU16 green[NVKMS_LUT_ARRAY_SIZE]; /*! in */
NvU16 blue[NVKMS_LUT_ARRAY_SIZE]; /*! in */
};
/*
* A 3x4 row-major colorspace conversion matrix.
*
@@ -531,6 +554,18 @@ typedef struct {
NvBool noncoherent;
} NvKmsDispIOCoherencyModes;
enum NvKmsInputColorRange {
/*
* If DEFAULT is provided, driver will assume full range for RGB formats
* and limited range for YUV formats.
*/
NVKMS_INPUT_COLORRANGE_DEFAULT = 0,
NVKMS_INPUT_COLORRANGE_LIMITED = 1,
NVKMS_INPUT_COLORRANGE_FULL = 2,
};
enum NvKmsInputColorSpace {
/* Unknown colorspace; no de-gamma will be applied */
NVKMS_INPUT_COLORSPACE_NONE = 0,
@@ -542,6 +577,12 @@ enum NvKmsInputColorSpace {
NVKMS_INPUT_COLORSPACE_BT2100_PQ = 2,
};
enum NvKmsOutputColorimetry {
NVKMS_OUTPUT_COLORIMETRY_DEFAULT = 0,
NVKMS_OUTPUT_COLORIMETRY_BT2100 = 1,
};
enum NvKmsOutputTf {
/*
* NVKMS itself won't apply any OETF (clients are still
@@ -552,6 +593,17 @@ enum NvKmsOutputTf {
NVKMS_OUTPUT_TF_PQ = 2,
};
/*!
* EOTF Data Byte 1 as per CTA-861-G spec.
* This is expected to match exactly with the spec.
*/
enum NvKmsInfoFrameEOTF {
NVKMS_INFOFRAME_EOTF_SDR_GAMMA = 0,
NVKMS_INFOFRAME_EOTF_HDR_GAMMA = 1,
NVKMS_INFOFRAME_EOTF_ST2084 = 2,
NVKMS_INFOFRAME_EOTF_HLG = 3,
};
/*!
* HDR Static Metadata Type1 Descriptor as per CEA-861.3 spec.
* This is expected to match exactly with the spec.
@@ -605,4 +657,29 @@ struct NvKmsHDRStaticMetadata {
NvU16 maxFALL;
};
/*!
* A superframe is made of two or more video streams that are combined in
* a specific way. A DP serializer (an external device connected to a Tegra
* ARM SOC over DP or HDMI) can receive a video stream comprising multiple
* videos combined into a single frame and then split it into multiple
* video streams. The following structure describes the number of views
* and dimensions of each view inside a superframe.
*/
struct NvKmsSuperframeInfo {
NvU8 numViews;
struct {
/* x offset inside superframe at which this view starts */
NvU16 x;
/* y offset inside superframe at which this view starts */
NvU16 y;
/* Horizontal active width in pixels for this view */
NvU16 width;
/* Vertical active height in lines for this view */
NvU16 height;
} view[NVKMS_MAX_SUPERFRAME_VIEWS];
};
#endif /* NVKMS_API_TYPES_H */

View File

@@ -49,6 +49,8 @@ struct NvKmsKapiDevice;
struct NvKmsKapiMemory;
struct NvKmsKapiSurface;
struct NvKmsKapiChannelEvent;
struct NvKmsKapiSemaphoreSurface;
struct NvKmsKapiSemaphoreSurfaceCallback;
typedef NvU32 NvKmsKapiConnector;
typedef NvU32 NvKmsKapiDisplay;
@@ -67,6 +69,14 @@ typedef NvU32 NvKmsKapiDisplay;
*/
typedef void NvKmsChannelEventProc(void *dataPtr, NvU32 dataU32);
/*
* Note: Same as above, this function must not call back into NVKMS-KAPI, nor
* directly into RM. Doing so could cause deadlocks given the notification
* function will most likely be called from within RM's interrupt handler
* callchain.
*/
typedef void NvKmsSemaphoreSurfaceCallbackProc(void *pData);
/** @} */
/**
@@ -126,6 +136,11 @@ struct NvKmsKapiDeviceResourcesInfo {
NvU32 validCursorCompositionModes;
NvU64 supportedCursorSurfaceMemoryFormats;
struct {
NvU64 maxSubmittedOffset;
NvU64 stride;
} semsurf;
struct {
NvU16 validRRTransforms;
NvU32 validCompositionModes;
@@ -218,8 +233,10 @@ struct NvKmsKapiLayerConfig {
struct NvKmsRRParams rrParams;
struct NvKmsKapiSyncpt syncptParams;
struct NvKmsHDRStaticMetadata hdrMetadata;
NvBool hdrMetadataSpecified;
struct {
struct NvKmsHDRStaticMetadata val;
NvBool enabled;
} hdrMetadata;
enum NvKmsOutputTf tf;
@@ -233,16 +250,21 @@ struct NvKmsKapiLayerConfig {
NvU16 dstWidth, dstHeight;
enum NvKmsInputColorSpace inputColorSpace;
struct NvKmsCscMatrix csc;
NvBool cscUseMain;
};
struct NvKmsKapiLayerRequestedConfig {
struct NvKmsKapiLayerConfig config;
struct {
NvBool surfaceChanged : 1;
NvBool srcXYChanged : 1;
NvBool srcWHChanged : 1;
NvBool dstXYChanged : 1;
NvBool dstWHChanged : 1;
NvBool surfaceChanged : 1;
NvBool srcXYChanged : 1;
NvBool srcWHChanged : 1;
NvBool dstXYChanged : 1;
NvBool dstWHChanged : 1;
NvBool cscChanged : 1;
NvBool tfChanged : 1;
NvBool hdrMetadataChanged : 1;
} flags;
};
@@ -286,14 +308,41 @@ struct NvKmsKapiHeadModeSetConfig {
struct NvKmsKapiDisplayMode mode;
NvBool vrrEnabled;
struct {
NvBool enabled;
enum NvKmsInfoFrameEOTF eotf;
struct NvKmsHDRStaticMetadata staticMetadata;
} hdrInfoFrame;
enum NvKmsOutputColorimetry colorimetry;
struct {
struct {
NvBool specified;
NvU32 depth;
NvU32 start;
NvU32 end;
struct NvKmsLutRamps *pRamps;
} input;
struct {
NvBool specified;
NvBool enabled;
struct NvKmsLutRamps *pRamps;
} output;
} lut;
};
struct NvKmsKapiHeadRequestedConfig {
struct NvKmsKapiHeadModeSetConfig modeSetConfig;
struct {
NvBool activeChanged : 1;
NvBool displaysChanged : 1;
NvBool modeChanged : 1;
NvBool activeChanged : 1;
NvBool displaysChanged : 1;
NvBool modeChanged : 1;
NvBool hdrInfoFrameChanged : 1;
NvBool colorimetryChanged : 1;
NvBool lutChanged : 1;
} flags;
struct NvKmsKapiCursorRequestedConfig cursorRequestedConfig;
@@ -318,6 +367,7 @@ struct NvKmsKapiHeadReplyConfig {
};
struct NvKmsKapiModeSetReplyConfig {
enum NvKmsFlipResult flipResult;
struct NvKmsKapiHeadReplyConfig
headReplyConfig[NVKMS_KAPI_MAX_HEADS];
};
@@ -434,6 +484,12 @@ enum NvKmsKapiAllocationType {
NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN = 2,
};
typedef enum NvKmsKapiRegisterWaiterResultRec {
NVKMS_KAPI_REG_WAITER_FAILED,
NVKMS_KAPI_REG_WAITER_SUCCESS,
NVKMS_KAPI_REG_WAITER_ALREADY_SIGNALLED,
} NvKmsKapiRegisterWaiterResult;
struct NvKmsKapiFunctionsTable {
/*!
@@ -519,8 +575,8 @@ struct NvKmsKapiFunctionsTable {
);
/*!
* Revoke permissions previously granted. Only one (dispIndex, head,
* display) is currently supported.
* Revoke modeset permissions previously granted. Only one (dispIndex,
* head, display) is currently supported.
*
* \param [in] device A device returned by allocateDevice().
*
@@ -537,6 +593,34 @@ struct NvKmsKapiFunctionsTable {
NvKmsKapiDisplay display
);
/*!
* Grant modeset sub-owner permissions to fd. This is used by clients to
* convert drm 'master' permissions into nvkms sub-owner permission.
*
* \param [in] fd fd from opening /dev/nvidia-modeset.
*
* \param [in] device A device returned by allocateDevice().
*
* \return NV_TRUE on success, NV_FALSE on failure.
*/
NvBool (*grantSubOwnership)
(
NvS32 fd,
struct NvKmsKapiDevice *device
);
/*!
* Revoke sub-owner permissions previously granted.
*
* \param [in] device A device returned by allocateDevice().
*
* \return NV_TRUE on success, NV_FALSE on failure.
*/
NvBool (*revokeSubOwnership)
(
struct NvKmsKapiDevice *device
);
/*!
* Registers for notification, via
* NvKmsKapiAllocateDeviceParams::eventCallback, of the events specified
@@ -1122,6 +1206,199 @@ struct NvKmsKapiFunctionsTable {
NvP64 dmaBuf,
NvU32 limit);
/*!
* Import a semaphore surface allocated elsewhere to NVKMS and return a
* handle to the new object.
*
* \param [in] device A device allocated using allocateDevice().
*
* \param [in] nvKmsParamsUser Userspace pointer to driver-specific
* parameters describing the semaphore
* surface being imported.
*
* \param [in] nvKmsParamsSize Size of the driver-specific parameter
* struct.
*
* \param [out] pSemaphoreMap Returns a CPU mapping of the semaphore
* surface's semaphore memory to the client.
*
* \param [out] pMaxSubmittedMap Returns a CPU mapping of the semaphore
* surface's semaphore memory to the client.
*
* \return struct NvKmsKapiSemaphoreSurface* on success, NULL on failure.
*/
struct NvKmsKapiSemaphoreSurface* (*importSemaphoreSurface)
(
struct NvKmsKapiDevice *device,
NvU64 nvKmsParamsUser,
NvU64 nvKmsParamsSize,
void **pSemaphoreMap,
void **pMaxSubmittedMap
);
/*!
* Free an imported semaphore surface.
*
* \param [in] device The device passed to
* importSemaphoreSurface() when creating
* semaphoreSurface.
*
* \param [in] semaphoreSurface A semaphore surface returned by
* importSemaphoreSurface().
*/
void (*freeSemaphoreSurface)
(
struct NvKmsKapiDevice *device,
struct NvKmsKapiSemaphoreSurface *semaphoreSurface
);
/*!
* Register a callback to be called when a semaphore reaches a value.
*
* The callback will be called when the semaphore at index in
* semaphoreSurface reaches the value wait_value. The callback will
* be called at most once and is automatically unregistered when called.
* It may also be unregistered (i.e., cancelled) explicitly using the
* unregisterSemaphoreSurfaceCallback() function. To avoid leaking the
* memory used to track the registered callback, callers must ensure one
* of these methods of unregistration is used for every successful
* callback registration that returns a non-NULL pCallbackHandle.
*
* \param [in] device The device passed to
* importSemaphoreSurface() when creating
* semaphoreSurface.
*
* \param [in] semaphoreSurface A semaphore surface returned by
* importSemaphoreSurface().
*
* \param [in] pCallback A pointer to the function to call when
* the specified value is reached. NULL
* means no callback.
*
* \param [in] pData Arbitrary data to be passed back to the
* callback as its sole parameter.
*
* \param [in] index The index of the semaphore within
* semaphoreSurface.
*
* \param [in] wait_value The value the semaphore must reach or
* exceed before the callback is called.
*
* \param [in] new_value The value the semaphore will be set to
* when it reaches or exceeds <wait_value>.
* 0 means do not update the value.
*
* \param [out] pCallbackHandle On success, the value pointed to will
* contain an opaque handle to the
* registered callback that may be used to
* cancel it if needed. Unused if pCallback
* is NULL.
*
* \return NVKMS_KAPI_REG_WAITER_SUCCESS if the waiter was registered or if
* no callback was requested and the semaphore at <index> has
* already reached or exceeded <wait_value>
*
* NVKMS_KAPI_REG_WAITER_ALREADY_SIGNALLED if a callback was
* requested and the semaphore at <index> has already reached or
* exceeded <wait_value>
*
* NVKMS_KAPI_REG_WAITER_FAILED if waiter registration failed.
*/
NvKmsKapiRegisterWaiterResult
(*registerSemaphoreSurfaceCallback)
(
struct NvKmsKapiDevice *device,
struct NvKmsKapiSemaphoreSurface *semaphoreSurface,
NvKmsSemaphoreSurfaceCallbackProc *pCallback,
void *pData,
NvU64 index,
NvU64 wait_value,
NvU64 new_value,
struct NvKmsKapiSemaphoreSurfaceCallback **pCallbackHandle
);
/*!
* Unregister a callback registered via registerSemaphoreSurfaceCallback()
*
* If the callback has not yet been called, this function will cancel the
* callback and free its associated resources.
*
* Note this function treats the callback handle as a pointer. While this
* function does not dereference that pointer itself, the underlying call
* to RM does within a properly guarded critical section that first ensures
* it is not in the process of being used within a callback. This means
* the callstack must take into consideration that pointers are not in
* general unique handles if they may have been freed, since a subsequent
* malloc could return the same pointer value at that point. This callchain
* avoids that by leveraging the behavior of the underlying RM APIs:
*
* 1) A callback handle is referenced relative to its corresponding
* (semaphore surface, index, wait_value) tuple here and within RM. It
* is not a valid handle outside of that scope.
*
* 2) A callback can not be registered against an already-reached value
* for a given semaphore surface index.
*
* 3) A given callback handle can not be registered twice against the same
* (semaphore surface, index, wait_value) tuple, so unregistration will
* never race with registration at the RM level, and would only race at
* a higher level if used incorrectly. Since this is kernel code, we
* can safely assume there won't be malicious clients purposely misuing
* the API, but the burden is placed on the caller to ensure its usage
* does not lead to races at higher levels.
*
* These factors considered together ensure any valid registered handle is
* either still in the relevant waiter list and refers to the same event/
* callback as when it was registered, or has been removed from the list
* as part of a critical section that also destroys the list itself and
* makes future lookups in that list impossible, and hence eliminates the
* chance of comparing a stale handle with a new handle of the same value
* as part of a lookup.
*
* \param [in] device The device passed to
* importSemaphoreSurface() when creating
* semaphoreSurface.
*
* \param [in] semaphoreSurface The semaphore surface passed to
* registerSemaphoreSurfaceCallback() when
* registering the callback.
*
* \param [in] index The index passed to
* registerSemaphoreSurfaceCallback() when
* registering the callback.
*
* \param [in] wait_value The wait_value passed to
* registerSemaphoreSurfaceCallback() when
* registering the callback.
*
* \param [in] callbackHandle The callback handle returned by
* registerSemaphoreSurfaceCallback().
*/
NvBool
(*unregisterSemaphoreSurfaceCallback)
(
struct NvKmsKapiDevice *device,
struct NvKmsKapiSemaphoreSurface *semaphoreSurface,
NvU64 index,
NvU64 wait_value,
struct NvKmsKapiSemaphoreSurfaceCallback *callbackHandle
);
/*!
* Update the value of a semaphore surface from the CPU.
*
* Update the semaphore value at the specified index from the CPU, then
* wake up any pending CPU waiters associated with that index that are
* waiting on it reaching a value <= the new value.
*/
NvBool
(*setSemaphoreSurfaceValue)
(
struct NvKmsKapiDevice *device,
struct NvKmsKapiSemaphoreSurface *semaphoreSurface,
NvU64 index,
NvU64 new_value
);
};
/** @} */

View File

@@ -162,7 +162,7 @@ NvBool NV_API_CALL os_is_vgx_hyper (void);
NV_STATUS NV_API_CALL os_inject_vgx_msi (NvU16, NvU64, NvU32);
NvBool NV_API_CALL os_is_grid_supported (void);
NvU32 NV_API_CALL os_get_grid_csp_support (void);
void NV_API_CALL os_get_screen_info (NvU64 *, NvU16 *, NvU16 *, NvU16 *, NvU16 *, NvU64, NvU64);
void NV_API_CALL os_get_screen_info (NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU32 *, NvU64, NvU64);
void NV_API_CALL os_bug_check (NvU32, const char *);
NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32);
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **, void**);
@@ -207,9 +207,13 @@ enum os_pci_req_atomics_type {
OS_INTF_PCIE_REQ_ATOMICS_128BIT
};
NV_STATUS NV_API_CALL os_enable_pci_req_atomics (void *, enum os_pci_req_atomics_type);
NV_STATUS NV_API_CALL os_get_numa_node_memory_usage (NvS32, NvU64 *, NvU64 *);
NV_STATUS NV_API_CALL os_numa_add_gpu_memory (void *, NvU64, NvU64, NvU32 *);
NV_STATUS NV_API_CALL os_numa_remove_gpu_memory (void *, NvU64, NvU64, NvU32);
NV_STATUS NV_API_CALL os_offline_page_at_address(NvU64 address);
void* NV_API_CALL os_get_pid_info(void);
void NV_API_CALL os_put_pid_info(void *pid_info);
NV_STATUS NV_API_CALL os_find_ns_pid(void *pid_info, NvU32 *ns_pid);
extern NvU32 os_page_size;
extern NvU64 os_page_mask;
@@ -226,12 +230,14 @@ extern NvBool os_dma_buf_enabled;
* ---------------------------------------------------------------------------
*/
#define NV_DBG_INFO 0x0
#define NV_DBG_SETUP 0x1
#define NV_DBG_USERERRORS 0x2
#define NV_DBG_INFO 0x1
#define NV_DBG_SETUP 0x2
#define NV_DBG_WARNINGS 0x3
#define NV_DBG_ERRORS 0x4
#define NV_DBG_HW_ERRORS 0x5
#define NV_DBG_FATAL 0x6
#define NV_DBG_FORCE_LEVEL(level) ((level) | (1 << 8))
void NV_API_CALL out_string(const char *str);
int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...);

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,334 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-kthread-q.h"
#include "nv-list-helpers.h"
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/module.h>
#include <linux/mm.h>
#if defined(NV_LINUX_BUG_H_PRESENT)
#include <linux/bug.h>
#else
#include <asm/bug.h>
#endif
// Today's implementation is a little simpler and more limited than the
// API description allows for in nv-kthread-q.h. Details include:
//
// 1. Each nv_kthread_q instance is a first-in, first-out queue.
//
// 2. Each nv_kthread_q instance is serviced by exactly one kthread.
//
// You can create any number of queues, each of which gets its own
// named kernel thread (kthread). You can then insert arbitrary functions
// into the queue, and those functions will be run in the context of the
// queue's kthread.
#ifndef WARN
// Only *really* old kernels (2.6.9) end up here. Just use a simple printk
// to implement this, because such kernels won't be supported much longer.
#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
printk(KERN_ERR format); \
unlikely(__ret_warn_on); \
})
#endif
#define NVQ_WARN(fmt, ...) \
do { \
if (in_interrupt()) { \
WARN(1, "nv_kthread_q: [in interrupt]: " fmt, \
##__VA_ARGS__); \
} \
else { \
WARN(1, "nv_kthread_q: task: %s: " fmt, \
current->comm, \
##__VA_ARGS__); \
} \
} while (0)
static int _main_loop(void *args)
{
nv_kthread_q_t *q = (nv_kthread_q_t *)args;
nv_kthread_q_item_t *q_item = NULL;
unsigned long flags;
while (1) {
// Normally this thread is never interrupted. However,
// down_interruptible (instead of down) is called here,
// in order to avoid being classified as a potentially
// hung task, by the kernel watchdog.
while (down_interruptible(&q->q_sem))
NVQ_WARN("Interrupted during semaphore wait\n");
if (atomic_read(&q->main_loop_should_exit))
break;
spin_lock_irqsave(&q->q_lock, flags);
// The q_sem semaphore prevents us from getting here unless there is
// at least one item in the list, so an empty list indicates a bug.
if (unlikely(list_empty(&q->q_list_head))) {
spin_unlock_irqrestore(&q->q_lock, flags);
NVQ_WARN("_main_loop: Empty queue: q: 0x%p\n", q);
continue;
}
// Consume one item from the queue
q_item = list_first_entry(&q->q_list_head,
nv_kthread_q_item_t,
q_list_node);
list_del_init(&q_item->q_list_node);
spin_unlock_irqrestore(&q->q_lock, flags);
// Run the item
q_item->function_to_run(q_item->function_args);
// Make debugging a little simpler by clearing this between runs:
q_item = NULL;
}
while (!kthread_should_stop())
schedule();
return 0;
}
void nv_kthread_q_stop(nv_kthread_q_t *q)
{
// check if queue has been properly initialized
if (unlikely(!q->q_kthread))
return;
nv_kthread_q_flush(q);
// If this assertion fires, then a caller likely either broke the API rules,
// by adding items after calling nv_kthread_q_stop, or possibly messed up
// with inadequate flushing of self-rescheduling q_items.
if (unlikely(!list_empty(&q->q_list_head)))
NVQ_WARN("list not empty after flushing\n");
if (likely(!atomic_read(&q->main_loop_should_exit))) {
atomic_set(&q->main_loop_should_exit, 1);
// Wake up the kthread so that it can see that it needs to stop:
up(&q->q_sem);
kthread_stop(q->q_kthread);
q->q_kthread = NULL;
}
}
// When CONFIG_VMAP_STACK is defined, the kernel thread stack allocator used by
// kthread_create_on_node relies on a 2 entry, per-core cache to minimize
// vmalloc invocations. The cache is NUMA-unaware, so when there is a hit, the
// stack location ends up being a function of the core assigned to the current
// thread, instead of being a function of the specified NUMA node. The cache was
// added to the kernel in commit ac496bf48d97f2503eaa353996a4dd5e4383eaf0
// ("fork: Optimize task creation by caching two thread stacks per CPU if
// CONFIG_VMAP_STACK=y")
//
// To work around the problematic cache, we create up to three kernel threads
// -If the first thread's stack is resident on the preferred node, return this
// thread.
// -Otherwise, create a second thread. If its stack is resident on the
// preferred node, stop the first thread and return this one.
// -Otherwise, create a third thread. The stack allocator does not find a
// cached stack, and so falls back to vmalloc, which takes the NUMA hint into
// consideration. The first two threads are then stopped.
//
// When CONFIG_VMAP_STACK is not defined, the first kernel thread is returned.
//
// This function is never invoked when there is no NUMA preference (preferred
// node is NUMA_NO_NODE).
static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
nv_kthread_q_t *q,
int preferred_node,
const char *q_name)
{
unsigned i, j;
const static unsigned attempts = 3;
struct task_struct *thread[3];
for (i = 0;; i++) {
struct page *stack;
thread[i] = kthread_create_on_node(threadfn, q, preferred_node, q_name);
if (unlikely(IS_ERR(thread[i]))) {
// Instead of failing, pick the previous thread, even if its
// stack is not allocated on the preferred node.
if (i > 0)
i--;
break;
}
// vmalloc is not used to allocate the stack, so simply return the
// thread, even if its stack may not be allocated on the preferred node
if (!is_vmalloc_addr(thread[i]->stack))
break;
// Ran out of attempts - return thread even if its stack may not be
// allocated on the preferred node
if ((i == (attempts - 1)))
break;
// Get the NUMA node where the first page of the stack is resident. If
// it is the preferred node, select this thread.
stack = vmalloc_to_page(thread[i]->stack);
if (page_to_nid(stack) == preferred_node)
break;
}
for (j = i; j > 0; j--)
kthread_stop(thread[j - 1]);
return thread[i];
}
int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferred_node)
{
memset(q, 0, sizeof(*q));
INIT_LIST_HEAD(&q->q_list_head);
spin_lock_init(&q->q_lock);
sema_init(&q->q_sem, 0);
if (preferred_node == NV_KTHREAD_NO_NODE) {
q->q_kthread = kthread_create(_main_loop, q, q_name);
}
else {
q->q_kthread = thread_create_on_node(_main_loop, q, preferred_node, q_name);
}
if (IS_ERR(q->q_kthread)) {
int err = PTR_ERR(q->q_kthread);
// Clear q_kthread before returning so that nv_kthread_q_stop() can be
// safely called on it making error handling easier.
q->q_kthread = NULL;
return err;
}
wake_up_process(q->q_kthread);
return 0;
}
int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname)
{
return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE);
}
// Returns true (non-zero) if the item was actually scheduled, and false if the
// item was already pending in a queue.
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&q->q_lock, flags);
if (likely(list_empty(&q_item->q_list_node)))
list_add_tail(&q_item->q_list_node, &q->q_list_head);
else
ret = 0;
spin_unlock_irqrestore(&q->q_lock, flags);
if (likely(ret))
up(&q->q_sem);
return ret;
}
void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item,
nv_q_func_t function_to_run,
void *function_args)
{
INIT_LIST_HEAD(&q_item->q_list_node);
q_item->function_to_run = function_to_run;
q_item->function_args = function_args;
}
// Returns true (non-zero) if the q_item got scheduled, false otherwise.
int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q,
nv_kthread_q_item_t *q_item)
{
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
NVQ_WARN("Not allowed: nv_kthread_q_schedule_q_item was "
"called with a non-alive q: 0x%p\n", q);
return 0;
}
return _raw_q_schedule(q, q_item);
}
static void _q_flush_function(void *args)
{
struct completion *completion = (struct completion *)args;
complete(completion);
}
static void _raw_q_flush(nv_kthread_q_t *q)
{
nv_kthread_q_item_t q_item;
DECLARE_COMPLETION_ONSTACK(completion);
nv_kthread_q_item_init(&q_item, _q_flush_function, &completion);
_raw_q_schedule(q, &q_item);
// Wait for the flush item to run. Once it has run, then all of the
// previously queued items in front of it will have run, so that means
// the flush is complete.
wait_for_completion(&completion);
}
void nv_kthread_q_flush(nv_kthread_q_t *q)
{
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
NVQ_WARN("Not allowed: nv_kthread_q_flush was called after "
"nv_kthread_q_stop. q: 0x%p\n", q);
return;
}
// This 2x flush is not a typing mistake. The queue really does have to be
// flushed twice, in order to take care of the case of a q_item that
// reschedules itself.
_raw_q_flush(q);
_raw_q_flush(q);
}

View File

@@ -43,9 +43,13 @@
#if defined(NV_LINUX_FENCE_H_PRESENT)
typedef struct fence nv_dma_fence_t;
typedef struct fence_ops nv_dma_fence_ops_t;
typedef struct fence_cb nv_dma_fence_cb_t;
typedef fence_func_t nv_dma_fence_func_t;
#else
typedef struct dma_fence nv_dma_fence_t;
typedef struct dma_fence_ops nv_dma_fence_ops_t;
typedef struct dma_fence_cb nv_dma_fence_cb_t;
typedef dma_fence_func_t nv_dma_fence_func_t;
#endif
#if defined(NV_LINUX_FENCE_H_PRESENT)
@@ -97,6 +101,14 @@ static inline int nv_dma_fence_signal(nv_dma_fence_t *fence) {
#endif
}
static inline int nv_dma_fence_signal_locked(nv_dma_fence_t *fence) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_signal_locked(fence);
#else
return dma_fence_signal_locked(fence);
#endif
}
static inline u64 nv_dma_fence_context_alloc(unsigned num) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_context_alloc(num);
@@ -108,7 +120,7 @@ static inline u64 nv_dma_fence_context_alloc(unsigned num) {
static inline void
nv_dma_fence_init(nv_dma_fence_t *fence,
const nv_dma_fence_ops_t *ops,
spinlock_t *lock, u64 context, unsigned seqno) {
spinlock_t *lock, u64 context, uint64_t seqno) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
fence_init(fence, ops, lock, context, seqno);
#else
@@ -116,6 +128,29 @@ nv_dma_fence_init(nv_dma_fence_t *fence,
#endif
}
static inline void
nv_dma_fence_set_error(nv_dma_fence_t *fence,
int error) {
#if defined(NV_DMA_FENCE_SET_ERROR_PRESENT)
return dma_fence_set_error(fence, error);
#elif defined(NV_FENCE_SET_ERROR_PRESENT)
return fence_set_error(fence, error);
#else
fence->status = error;
#endif
}
static inline int
nv_dma_fence_add_callback(nv_dma_fence_t *fence,
nv_dma_fence_cb_t *cb,
nv_dma_fence_func_t func) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_add_callback(fence, cb, func);
#else
return dma_fence_add_callback(fence, cb, func);
#endif
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
#endif /* __NVIDIA_DMA_FENCE_HELPER_H__ */

View File

@@ -121,6 +121,20 @@ static inline void nv_dma_resv_add_excl_fence(nv_dma_resv_t *obj,
#endif
}
static inline void nv_dma_resv_add_shared_fence(nv_dma_resv_t *obj,
nv_dma_fence_t *fence)
{
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
#if defined(NV_DMA_RESV_ADD_FENCE_PRESENT)
dma_resv_add_fence(obj, fence, DMA_RESV_USAGE_READ);
#else
dma_resv_add_shared_fence(obj, fence);
#endif
#else
reservation_object_add_shared_fence(obj, fence);
#endif
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
#endif /* __NVIDIA_DMA_RESV_HELPER_H__ */

View File

@@ -61,4 +61,15 @@
#undef NV_DRM_FENCE_AVAILABLE
#endif
/*
* We can support color management if either drm_helper_crtc_enable_color_mgmt()
* or drm_crtc_enable_color_mgmt() exist.
*/
#if defined(NV_DRM_HELPER_CRTC_ENABLE_COLOR_MGMT_PRESENT) || \
defined(NV_DRM_CRTC_ENABLE_COLOR_MGMT_PRESENT)
#define NV_DRM_COLOR_MGMT_AVAILABLE
#else
#undef NV_DRM_COLOR_MGMT_AVAILABLE
#endif
#endif /* defined(__NVIDIA_DRM_CONFTEST_H__) */

View File

@@ -349,10 +349,125 @@ nv_drm_connector_best_encoder(struct drm_connector *connector)
return NULL;
}
#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG)
static const NvU32 __nv_drm_connector_supported_colorspaces =
BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
BIT(DRM_MODE_COLORIMETRY_BT2020_YCC);
#endif
#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT)
static int
__nv_drm_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state)
{
struct drm_connector_state *new_connector_state =
drm_atomic_get_new_connector_state(state, connector);
struct drm_connector_state *old_connector_state =
drm_atomic_get_old_connector_state(state, connector);
struct nv_drm_device *nv_dev = to_nv_device(connector->dev);
struct drm_crtc *crtc = new_connector_state->crtc;
struct drm_crtc_state *crtc_state;
struct nv_drm_crtc_state *nv_crtc_state;
struct NvKmsKapiHeadRequestedConfig *req_config;
if (!crtc) {
return 0;
}
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
nv_crtc_state = to_nv_crtc_state(crtc_state);
req_config = &nv_crtc_state->req_config;
/*
* Override metadata for the entire head instead of allowing NVKMS to derive
* it from the layers' metadata.
*
* This is the metadata that will sent to the display, and if applicable,
* layers will be tone mapped to this metadata rather than that of the
* display.
*/
req_config->flags.hdrInfoFrameChanged =
!drm_connector_atomic_hdr_metadata_equal(old_connector_state,
new_connector_state);
if (new_connector_state->hdr_output_metadata &&
new_connector_state->hdr_output_metadata->data) {
/*
* Note that HDMI definitions are used here even though we might not
* be using HDMI. While that seems odd, it is consistent with
* upstream behavior.
*/
struct hdr_output_metadata *hdr_metadata =
new_connector_state->hdr_output_metadata->data;
struct hdr_metadata_infoframe *info_frame =
&hdr_metadata->hdmi_metadata_type1;
unsigned int i;
if (hdr_metadata->metadata_type != HDMI_STATIC_METADATA_TYPE1) {
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(info_frame->display_primaries); i++) {
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.displayPrimaries[i].x =
info_frame->display_primaries[i].x;
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.displayPrimaries[i].y =
info_frame->display_primaries[i].y;
}
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.whitePoint.x =
info_frame->white_point.x;
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.whitePoint.y =
info_frame->white_point.y;
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxDisplayMasteringLuminance =
info_frame->max_display_mastering_luminance;
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.minDisplayMasteringLuminance =
info_frame->min_display_mastering_luminance;
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxCLL =
info_frame->max_cll;
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxFALL =
info_frame->max_fall;
req_config->modeSetConfig.hdrInfoFrame.eotf = info_frame->eotf;
req_config->modeSetConfig.hdrInfoFrame.enabled = NV_TRUE;
} else {
req_config->modeSetConfig.hdrInfoFrame.enabled = NV_FALSE;
}
req_config->flags.colorimetryChanged =
(old_connector_state->colorspace != new_connector_state->colorspace);
// When adding a case here, also add to __nv_drm_connector_supported_colorspaces
switch (new_connector_state->colorspace) {
case DRM_MODE_COLORIMETRY_DEFAULT:
req_config->modeSetConfig.colorimetry =
NVKMS_OUTPUT_COLORIMETRY_DEFAULT;
break;
case DRM_MODE_COLORIMETRY_BT2020_RGB:
case DRM_MODE_COLORIMETRY_BT2020_YCC:
// Ignore RGB/YCC
// See https://patchwork.freedesktop.org/patch/525496/?series=111865&rev=4
req_config->modeSetConfig.colorimetry =
NVKMS_OUTPUT_COLORIMETRY_BT2100;
break;
default:
// XXX HDR TODO: Add support for more color spaces
NV_DRM_DEV_LOG_ERR(nv_dev, "Unsupported color space");
return -EINVAL;
}
return 0;
}
#endif /* defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT) */
static const struct drm_connector_helper_funcs nv_connector_helper_funcs = {
.get_modes = nv_drm_connector_get_modes,
.mode_valid = nv_drm_connector_mode_valid,
.best_encoder = nv_drm_connector_best_encoder,
#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT)
.atomic_check = __nv_drm_connector_atomic_check,
#endif
};
static struct drm_connector*
@@ -405,6 +520,32 @@ nv_drm_connector_new(struct drm_device *dev,
DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
}
#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT)
if (nv_connector->type == NVKMS_CONNECTOR_TYPE_HDMI) {
#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG)
if (drm_mode_create_hdmi_colorspace_property(
&nv_connector->base,
__nv_drm_connector_supported_colorspaces) == 0) {
#else
if (drm_mode_create_hdmi_colorspace_property(&nv_connector->base) == 0) {
#endif
drm_connector_attach_colorspace_property(&nv_connector->base);
}
drm_connector_attach_hdr_output_metadata_property(&nv_connector->base);
} else if (nv_connector->type == NVKMS_CONNECTOR_TYPE_DP) {
#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG)
if (drm_mode_create_dp_colorspace_property(
&nv_connector->base,
__nv_drm_connector_supported_colorspaces) == 0) {
#else
if (drm_mode_create_dp_colorspace_property(&nv_connector->base) == 0) {
#endif
drm_connector_attach_colorspace_property(&nv_connector->base);
}
drm_connector_attach_hdr_output_metadata_property(&nv_connector->base);
}
#endif /* defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT) */
/* Register connector with DRM subsystem */
ret = drm_connector_register(&nv_connector->base);

View File

@@ -48,6 +48,11 @@
#include <linux/host1x-next.h>
#endif
#if defined(NV_DRM_DRM_COLOR_MGMT_H_PRESENT)
#include <drm/drm_color_mgmt.h>
#endif
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
static int
nv_drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
@@ -399,27 +404,25 @@ plane_req_config_update(struct drm_plane *plane,
}
for (i = 0; i < ARRAY_SIZE(info_frame->display_primaries); i ++) {
req_config->config.hdrMetadata.displayPrimaries[i].x =
req_config->config.hdrMetadata.val.displayPrimaries[i].x =
info_frame->display_primaries[i].x;
req_config->config.hdrMetadata.displayPrimaries[i].y =
req_config->config.hdrMetadata.val.displayPrimaries[i].y =
info_frame->display_primaries[i].y;
}
req_config->config.hdrMetadata.whitePoint.x =
req_config->config.hdrMetadata.val.whitePoint.x =
info_frame->white_point.x;
req_config->config.hdrMetadata.whitePoint.y =
req_config->config.hdrMetadata.val.whitePoint.y =
info_frame->white_point.y;
req_config->config.hdrMetadata.maxDisplayMasteringLuminance =
req_config->config.hdrMetadata.val.maxDisplayMasteringLuminance =
info_frame->max_display_mastering_luminance;
req_config->config.hdrMetadata.minDisplayMasteringLuminance =
req_config->config.hdrMetadata.val.minDisplayMasteringLuminance =
info_frame->min_display_mastering_luminance;
req_config->config.hdrMetadata.maxCLL =
req_config->config.hdrMetadata.val.maxCLL =
info_frame->max_cll;
req_config->config.hdrMetadata.maxFALL =
req_config->config.hdrMetadata.val.maxFALL =
info_frame->max_fall;
req_config->config.hdrMetadataSpecified = true;
switch (info_frame->eotf) {
case HDMI_EOTF_SMPTE_ST2084:
req_config->config.tf = NVKMS_OUTPUT_TF_PQ;
@@ -432,10 +435,21 @@ plane_req_config_update(struct drm_plane *plane,
NV_DRM_DEV_LOG_ERR(nv_dev, "Unsupported EOTF");
return -1;
}
req_config->config.hdrMetadata.enabled = true;
} else {
req_config->config.hdrMetadataSpecified = false;
req_config->config.hdrMetadata.enabled = false;
req_config->config.tf = NVKMS_OUTPUT_TF_NONE;
}
req_config->flags.hdrMetadataChanged =
((old_config.hdrMetadata.enabled !=
req_config->config.hdrMetadata.enabled) ||
memcmp(&old_config.hdrMetadata.val,
&req_config->config.hdrMetadata.val,
sizeof(struct NvKmsHDRStaticMetadata)));
req_config->flags.tfChanged = (old_config.tf != req_config->config.tf);
#endif
/*
@@ -692,9 +706,11 @@ static inline void __nv_drm_plane_atomic_destroy_state(
#endif
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
struct nv_drm_plane_state *nv_drm_plane_state =
to_nv_drm_plane_state(state);
drm_property_blob_put(nv_drm_plane_state->hdr_output_metadata);
{
struct nv_drm_plane_state *nv_drm_plane_state =
to_nv_drm_plane_state(state);
drm_property_blob_put(nv_drm_plane_state->hdr_output_metadata);
}
#endif
}
@@ -800,6 +816,9 @@ nv_drm_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
&(to_nv_crtc_state(crtc->state)->req_config),
&nv_state->req_config);
nv_state->ilut_ramps = NULL;
nv_state->olut_ramps = NULL;
return &nv_state->base;
}
@@ -823,6 +842,9 @@ static void nv_drm_atomic_crtc_destroy_state(struct drm_crtc *crtc,
__nv_drm_atomic_helper_crtc_destroy_state(crtc, &nv_state->base);
nv_drm_free(nv_state->ilut_ramps);
nv_drm_free(nv_state->olut_ramps);
nv_drm_free(nv_state);
}
@@ -833,6 +855,9 @@ static struct drm_crtc_funcs nv_crtc_funcs = {
.destroy = nv_drm_crtc_destroy,
.atomic_duplicate_state = nv_drm_atomic_crtc_duplicate_state,
.atomic_destroy_state = nv_drm_atomic_crtc_destroy_state,
#if defined(NV_DRM_ATOMIC_HELPER_LEGACY_GAMMA_SET_PRESENT)
.gamma_set = drm_atomic_helper_legacy_gamma_set,
#endif
};
/*
@@ -866,6 +891,198 @@ static int head_modeset_config_attach_connector(
return 0;
}
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
static int color_mgmt_config_copy_lut(struct NvKmsLutRamps *nvkms_lut,
struct drm_color_lut *drm_lut,
uint64_t lut_len)
{
uint64_t i = 0;
if (lut_len != NVKMS_LUT_ARRAY_SIZE) {
return -EINVAL;
}
/*
* Both NvKms and drm LUT values are 16-bit linear values. NvKms LUT ramps
* are in arrays in a single struct while drm LUT ramps are an array of
* structs.
*/
for (i = 0; i < lut_len; i++) {
nvkms_lut->red[i] = drm_lut[i].red;
nvkms_lut->green[i] = drm_lut[i].green;
nvkms_lut->blue[i] = drm_lut[i].blue;
}
return 0;
}
static void color_mgmt_config_ctm_to_csc(struct NvKmsCscMatrix *nvkms_csc,
struct drm_color_ctm *drm_ctm)
{
int y;
/* CTM is a 3x3 matrix while ours is 3x4. Zero out the last column. */
nvkms_csc->m[0][3] = nvkms_csc->m[1][3] = nvkms_csc->m[2][3] = 0;
for (y = 0; y < 3; y++) {
int x;
for (x = 0; x < 3; x++) {
/*
* Values in the CTM are encoded in S31.32 sign-magnitude fixed-
* point format, while NvKms CSC values are signed 2's-complement
* S15.16 (Ssign-extend12-3.16?) fixed-point format.
*/
NvU64 ctmVal = drm_ctm->matrix[y*3 + x];
NvU64 signBit = ctmVal & (1ULL << 63);
NvU64 magnitude = ctmVal & ~signBit;
/*
* Drop the low 16 bits of the fractional part and the high 17 bits
* of the integral part. Drop 17 bits to avoid corner cases where
* the highest resulting bit is a 1, causing the `cscVal = -cscVal`
* line to result in a positive number.
*/
NvS32 cscVal = (magnitude >> 16) & ((1ULL << 31) - 1);
if (signBit) {
cscVal = -cscVal;
}
nvkms_csc->m[y][x] = cscVal;
}
}
}
static int color_mgmt_config_set(struct nv_drm_crtc_state *nv_crtc_state,
struct NvKmsKapiHeadRequestedConfig *req_config)
{
struct NvKmsKapiHeadModeSetConfig *modeset_config =
&req_config->modeSetConfig;
struct drm_crtc_state *crtc_state = &nv_crtc_state->base;
int ret = 0;
struct drm_color_lut *degamma_lut = NULL;
struct drm_color_ctm *ctm = NULL;
struct drm_color_lut *gamma_lut = NULL;
uint64_t degamma_len = 0;
uint64_t gamma_len = 0;
int i;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
/*
* According to the comment in the Linux kernel's
* drivers/gpu/drm/drm_color_mgmt.c, if any of these properties are NULL,
* that LUT or CTM needs to be changed to a linear LUT or identity matrix
* respectively.
*/
req_config->flags.lutChanged = NV_TRUE;
if (crtc_state->degamma_lut) {
nv_crtc_state->ilut_ramps = nv_drm_calloc(1, sizeof(*nv_crtc_state->ilut_ramps));
if (!nv_crtc_state->ilut_ramps) {
ret = -ENOMEM;
goto fail;
}
degamma_lut = (struct drm_color_lut *)crtc_state->degamma_lut->data;
degamma_len = crtc_state->degamma_lut->length /
sizeof(struct drm_color_lut);
if ((ret = color_mgmt_config_copy_lut(nv_crtc_state->ilut_ramps,
degamma_lut,
degamma_len)) != 0) {
goto fail;
}
modeset_config->lut.input.specified = NV_TRUE;
modeset_config->lut.input.depth = 30; /* specify the full LUT */
modeset_config->lut.input.start = 0;
modeset_config->lut.input.end = degamma_len - 1;
modeset_config->lut.input.pRamps = nv_crtc_state->ilut_ramps;
} else {
/* setting input.end to 0 is equivalent to disabling the LUT, which
* should be equivalent to a linear LUT */
modeset_config->lut.input.specified = NV_TRUE;
modeset_config->lut.input.depth = 30; /* specify the full LUT */
modeset_config->lut.input.start = 0;
modeset_config->lut.input.end = 0;
modeset_config->lut.input.pRamps = NULL;
}
nv_drm_for_each_new_plane_in_state(crtc_state->state, plane,
plane_state, i) {
struct nv_drm_plane *nv_plane = to_nv_plane(plane);
uint32_t layer = nv_plane->layer_idx;
struct NvKmsKapiLayerRequestedConfig *layer_config;
if (layer == NVKMS_KAPI_LAYER_INVALID_IDX || plane_state->crtc != crtc_state->crtc) {
continue;
}
layer_config = &req_config->layerRequestedConfig[layer];
if (layer == NVKMS_KAPI_LAYER_PRIMARY_IDX && crtc_state->ctm) {
ctm = (struct drm_color_ctm *)crtc_state->ctm->data;
color_mgmt_config_ctm_to_csc(&layer_config->config.csc, ctm);
layer_config->config.cscUseMain = NV_FALSE;
} else {
/* When crtc_state->ctm is unset, this also sets the main layer to
* the identity matrix.
*/
layer_config->config.csc = NVKMS_IDENTITY_CSC_MATRIX;
}
layer_config->flags.cscChanged = NV_TRUE;
}
if (crtc_state->gamma_lut) {
nv_crtc_state->olut_ramps = nv_drm_calloc(1, sizeof(*nv_crtc_state->olut_ramps));
if (!nv_crtc_state->olut_ramps) {
ret = -ENOMEM;
goto fail;
}
gamma_lut = (struct drm_color_lut *)crtc_state->gamma_lut->data;
gamma_len = crtc_state->gamma_lut->length /
sizeof(struct drm_color_lut);
if ((ret = color_mgmt_config_copy_lut(nv_crtc_state->olut_ramps,
gamma_lut,
gamma_len)) != 0) {
goto fail;
}
modeset_config->lut.output.specified = NV_TRUE;
modeset_config->lut.output.enabled = NV_TRUE;
modeset_config->lut.output.pRamps = nv_crtc_state->olut_ramps;
} else {
/* disabling the output LUT should be equivalent to setting a linear
* LUT */
modeset_config->lut.output.specified = NV_TRUE;
modeset_config->lut.output.enabled = NV_FALSE;
modeset_config->lut.output.pRamps = NULL;
}
return 0;
fail:
/* free allocated state */
nv_drm_free(nv_crtc_state->ilut_ramps);
nv_drm_free(nv_crtc_state->olut_ramps);
/* remove dangling pointers */
nv_crtc_state->ilut_ramps = NULL;
nv_crtc_state->olut_ramps = NULL;
modeset_config->lut.input.pRamps = NULL;
modeset_config->lut.output.pRamps = NULL;
/* prevent attempts at reading NULLs */
modeset_config->lut.input.specified = NV_FALSE;
modeset_config->lut.output.specified = NV_FALSE;
return ret;
}
#endif /* NV_DRM_COLOR_MGMT_AVAILABLE */
/**
* nv_drm_crtc_atomic_check() can fail after it has modified
* the 'nv_drm_crtc_state::req_config', that is fine because 'nv_drm_crtc_state'
@@ -887,6 +1104,9 @@ static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc,
struct NvKmsKapiHeadRequestedConfig *req_config =
&nv_crtc_state->req_config;
int ret = 0;
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
struct nv_drm_device *nv_dev = to_nv_device(crtc_state->crtc->dev);
#endif
if (crtc_state->mode_changed) {
drm_mode_to_nvkms_display_mode(&crtc_state->mode,
@@ -925,6 +1145,25 @@ static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc,
req_config->flags.activeChanged = NV_TRUE;
}
#if defined(NV_DRM_CRTC_STATE_HAS_VRR_ENABLED)
req_config->modeSetConfig.vrrEnabled = crtc_state->vrr_enabled;
#endif
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
if (nv_dev->drmMasterChangedSinceLastAtomicCommit &&
(crtc_state->degamma_lut ||
crtc_state->ctm ||
crtc_state->gamma_lut)) {
crtc_state->color_mgmt_changed = NV_TRUE;
}
if (crtc_state->color_mgmt_changed) {
if ((ret = color_mgmt_config_set(nv_crtc_state, req_config)) != 0) {
return ret;
}
}
#endif
return ret;
}
@@ -1156,6 +1395,8 @@ nv_drm_plane_create(struct drm_device *dev,
plane,
validLayerRRTransforms);
nv_drm_free(formats);
return plane;
failed_plane_init:
@@ -1220,6 +1461,22 @@ static struct drm_crtc *__nv_drm_crtc_create(struct nv_drm_device *nv_dev,
drm_crtc_helper_add(&nv_crtc->base, &nv_crtc_helper_funcs);
#if defined(NV_DRM_COLOR_MGMT_AVAILABLE)
#if defined(NV_DRM_CRTC_ENABLE_COLOR_MGMT_PRESENT)
drm_crtc_enable_color_mgmt(&nv_crtc->base, NVKMS_LUT_ARRAY_SIZE, true,
NVKMS_LUT_ARRAY_SIZE);
#else
drm_helper_crtc_enable_color_mgmt(&nv_crtc->base, NVKMS_LUT_ARRAY_SIZE,
NVKMS_LUT_ARRAY_SIZE);
#endif
ret = drm_mode_crtc_set_gamma_size(&nv_crtc->base, NVKMS_LUT_ARRAY_SIZE);
if (ret != 0) {
NV_DRM_DEV_LOG_WARN(
nv_dev,
"Failed to initialize legacy gamma support for head %u", head);
}
#endif
return &nv_crtc->base;
failed_init_crtc:
@@ -1328,10 +1585,16 @@ static void NvKmsKapiCrcsToDrm(const struct NvKmsKapiCrcs *crcs,
{
drmCrcs->outputCrc32.value = crcs->outputCrc32.value;
drmCrcs->outputCrc32.supported = crcs->outputCrc32.supported;
drmCrcs->outputCrc32.__pad0 = 0;
drmCrcs->outputCrc32.__pad1 = 0;
drmCrcs->rasterGeneratorCrc32.value = crcs->rasterGeneratorCrc32.value;
drmCrcs->rasterGeneratorCrc32.supported = crcs->rasterGeneratorCrc32.supported;
drmCrcs->rasterGeneratorCrc32.__pad0 = 0;
drmCrcs->rasterGeneratorCrc32.__pad1 = 0;
drmCrcs->compositorCrc32.value = crcs->compositorCrc32.value;
drmCrcs->compositorCrc32.supported = crcs->compositorCrc32.supported;
drmCrcs->compositorCrc32.__pad0 = 0;
drmCrcs->compositorCrc32.__pad1 = 0;
}
int nv_drm_get_crtc_crc32_v2_ioctl(struct drm_device *dev,

View File

@@ -129,6 +129,9 @@ struct nv_drm_crtc_state {
*/
struct NvKmsKapiHeadRequestedConfig req_config;
struct NvKmsLutRamps *ilut_ramps;
struct NvKmsLutRamps *olut_ramps;
/**
* @nv_flip:
*

View File

@@ -44,6 +44,10 @@
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_ATOMIC_UAPI_H_PRESENT)
#include <drm/drm_atomic_uapi.h>
#endif
#if defined(NV_DRM_DRM_VBLANK_H_PRESENT)
#include <drm/drm_vblank.h>
#endif
@@ -60,6 +64,15 @@
#include <drm/drm_ioctl.h>
#endif
#if defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
#include <drm/drm_aperture.h>
#include <drm/drm_fb_helper.h>
#endif
#if defined(NV_DRM_DRM_FBDEV_GENERIC_H_PRESENT)
#include <drm/drm_fbdev_generic.h>
#endif
#include <linux/pci.h>
/*
@@ -84,6 +97,11 @@
#include <drm/drm_atomic_helper.h>
#endif
static int nv_drm_revoke_modeset_permission(struct drm_device *dev,
struct drm_file *filep,
NvU32 dpyId);
static int nv_drm_revoke_sub_ownership(struct drm_device *dev);
static struct nv_drm_device *dev_list = NULL;
static const char* nv_get_input_colorspace_name(
@@ -460,6 +478,11 @@ static int nv_drm_load(struct drm_device *dev, unsigned long flags)
nv_dev->supportsSyncpts = resInfo.caps.supportsSyncpts;
nv_dev->semsurf_stride = resInfo.caps.semsurf.stride;
nv_dev->semsurf_max_submitted_offset =
resInfo.caps.semsurf.maxSubmittedOffset;
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
gen = nv_dev->pageKindGeneration;
kind = nv_dev->genericPageKind;
@@ -546,6 +569,8 @@ static void __nv_drm_unload(struct drm_device *dev)
mutex_lock(&nv_dev->lock);
WARN_ON(nv_dev->subOwnershipGranted);
/* Disable event handling */
atomic_set(&nv_dev->enable_event_handling, false);
@@ -595,9 +620,15 @@ static int __nv_drm_master_set(struct drm_device *dev,
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
if (!nvKms->grabOwnership(nv_dev->pDevice)) {
/*
* If this device is driving a framebuffer, then nvidia-drm already has
* modeset ownership. Otherwise, grab ownership now.
*/
if (!nv_dev->hasFramebufferConsole &&
!nvKms->grabOwnership(nv_dev->pDevice)) {
return -EINVAL;
}
nv_dev->drmMasterChangedSinceLastAtomicCommit = NV_TRUE;
return 0;
}
@@ -631,6 +662,9 @@ void nv_drm_master_drop(struct drm_device *dev, struct drm_file *file_priv)
struct nv_drm_device *nv_dev = to_nv_device(dev);
int err;
nv_drm_revoke_modeset_permission(dev, file_priv, 0);
nv_drm_revoke_sub_ownership(dev);
/*
* After dropping nvkms modeset onwership, it is not guaranteed that
* drm and nvkms modeset state will remain in sync. Therefore, disable
@@ -655,7 +689,9 @@ void nv_drm_master_drop(struct drm_device *dev, struct drm_file *file_priv)
drm_modeset_unlock_all(dev);
nvKms->releaseOwnership(nv_dev->pDevice);
if (!nv_dev->hasFramebufferConsole) {
nvKms->releaseOwnership(nv_dev->pDevice);
}
}
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
@@ -693,15 +729,24 @@ static int nv_drm_get_dev_info_ioctl(struct drm_device *dev,
params->gpu_id = nv_dev->gpu_info.gpu_id;
params->primary_index = dev->primary->index;
params->generic_page_kind = 0;
params->page_kind_generation = 0;
params->sector_layout = 0;
params->supports_sync_fd = false;
params->supports_semsurf = false;
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
params->generic_page_kind = nv_dev->genericPageKind;
params->page_kind_generation = nv_dev->pageKindGeneration;
params->sector_layout = nv_dev->sectorLayout;
#else
params->generic_page_kind = 0;
params->page_kind_generation = 0;
params->sector_layout = 0;
#endif
/* Semaphore surfaces are only supported if the modeset = 1 parameter is set */
if ((nv_dev->pDevice) != NULL && (nv_dev->semsurf_stride != 0)) {
params->supports_semsurf = true;
#if defined(NV_SYNC_FILE_GET_FENCE_PRESENT)
params->supports_sync_fd = true;
#endif /* defined(NV_SYNC_FILE_GET_FENCE_PRESENT) */
}
#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */
return 0;
}
@@ -833,10 +878,10 @@ static NvU32 nv_drm_get_head_bit_from_connector(struct drm_connector *connector)
return 0;
}
static int nv_drm_grant_permission_ioctl(struct drm_device *dev, void *data,
struct drm_file *filep)
static int nv_drm_grant_modeset_permission(struct drm_device *dev,
struct drm_nvidia_grant_permissions_params *params,
struct drm_file *filep)
{
struct drm_nvidia_grant_permissions_params *params = data;
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_connector *target_nv_connector = NULL;
struct nv_drm_crtc *target_nv_crtc = NULL;
@@ -958,26 +1003,102 @@ done:
return ret;
}
static bool nv_drm_revoke_connector(struct nv_drm_device *nv_dev,
struct nv_drm_connector *nv_connector)
static int nv_drm_grant_sub_ownership(struct drm_device *dev,
struct drm_nvidia_grant_permissions_params *params)
{
bool ret = true;
if (nv_connector->modeset_permission_crtc) {
if (nv_connector->nv_detected_encoder) {
ret = nvKms->revokePermissions(
nv_dev->pDevice, nv_connector->modeset_permission_crtc->head,
nv_connector->nv_detected_encoder->hDisplay);
}
nv_connector->modeset_permission_crtc->modeset_permission_filep = NULL;
nv_connector->modeset_permission_crtc = NULL;
int ret = -EINVAL;
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_modeset_acquire_ctx *pctx;
#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
struct drm_modeset_acquire_ctx ctx;
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE,
ret);
pctx = &ctx;
#else
mutex_lock(&dev->mode_config.mutex);
pctx = dev->mode_config.acquire_ctx;
#endif
if (nv_dev->subOwnershipGranted ||
!nvKms->grantSubOwnership(params->fd, nv_dev->pDevice)) {
goto done;
}
nv_connector->modeset_permission_filep = NULL;
return ret;
/*
* When creating an ownership grant, shut down all heads and disable flip
* notifications.
*/
ret = nv_drm_atomic_helper_disable_all(dev, pctx);
if (ret != 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"nv_drm_atomic_helper_disable_all failed with error code %d!",
ret);
}
atomic_set(&nv_dev->enable_event_handling, false);
nv_dev->subOwnershipGranted = NV_TRUE;
ret = 0;
done:
#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
#else
mutex_unlock(&dev->mode_config.mutex);
#endif
return 0;
}
static int nv_drm_revoke_permission(struct drm_device *dev,
struct drm_file *filep, NvU32 dpyId)
static int nv_drm_grant_permission_ioctl(struct drm_device *dev, void *data,
struct drm_file *filep)
{
struct drm_nvidia_grant_permissions_params *params = data;
if (params->type == NV_DRM_PERMISSIONS_TYPE_MODESET) {
return nv_drm_grant_modeset_permission(dev, params, filep);
} else if (params->type == NV_DRM_PERMISSIONS_TYPE_SUB_OWNER) {
return nv_drm_grant_sub_ownership(dev, params);
}
return -EINVAL;
}
static int
nv_drm_atomic_disable_connector(struct drm_atomic_state *state,
struct nv_drm_connector *nv_connector)
{
struct drm_crtc_state *crtc_state;
struct drm_connector_state *connector_state;
int ret = 0;
if (nv_connector->modeset_permission_crtc) {
crtc_state = drm_atomic_get_crtc_state(
state, &nv_connector->modeset_permission_crtc->base);
if (!crtc_state) {
return -EINVAL;
}
crtc_state->active = false;
ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
if (ret < 0) {
return ret;
}
}
connector_state = drm_atomic_get_connector_state(state, &nv_connector->base);
if (!connector_state) {
return -EINVAL;
}
return drm_atomic_set_crtc_for_connector(connector_state, NULL);
}
static int nv_drm_revoke_modeset_permission(struct drm_device *dev,
struct drm_file *filep, NvU32 dpyId)
{
struct drm_modeset_acquire_ctx *pctx;
struct drm_atomic_state *state;
struct drm_connector *connector;
struct drm_crtc *crtc;
int ret = 0;
@@ -988,10 +1109,19 @@ static int nv_drm_revoke_permission(struct drm_device *dev,
struct drm_modeset_acquire_ctx ctx;
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE,
ret);
pctx = &ctx;
#else
mutex_lock(&dev->mode_config.mutex);
pctx = dev->mode_config.acquire_ctx;
#endif
state = drm_atomic_state_alloc(dev);
if (!state) {
ret = -ENOMEM;
goto done;
}
state->acquire_ctx = pctx;
/*
* If dpyId is set, only revoke those specific resources. Otherwise,
* it is from closing the file so revoke all resources for that filep.
@@ -1003,10 +1133,13 @@ static int nv_drm_revoke_permission(struct drm_device *dev,
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
if (nv_connector->modeset_permission_filep == filep &&
(!dpyId || nv_drm_connector_is_dpy_id(connector, dpyId))) {
if (!nv_drm_connector_revoke_permissions(dev, nv_connector)) {
ret = -EINVAL;
// Continue trying to revoke as much as possible.
ret = nv_drm_atomic_disable_connector(state, nv_connector);
if (ret < 0) {
goto done;
}
// Continue trying to revoke as much as possible.
nv_drm_connector_revoke_permissions(dev, nv_connector);
}
}
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
@@ -1020,6 +1153,25 @@ static int nv_drm_revoke_permission(struct drm_device *dev,
}
}
ret = drm_atomic_commit(state);
done:
#if defined(NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT)
drm_atomic_state_put(state);
#else
if (ret != 0) {
drm_atomic_state_free(state);
} else {
/*
* In case of success, drm_atomic_commit() takes care to cleanup and
* free @state.
*
* Comment placed above drm_atomic_commit() says: The caller must not
* free or in any other way access @state. If the function fails then
* the caller must clean up @state itself.
*/
}
#endif
#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
#else
@@ -1029,14 +1181,55 @@ static int nv_drm_revoke_permission(struct drm_device *dev,
return ret;
}
static int nv_drm_revoke_sub_ownership(struct drm_device *dev)
{
int ret = -EINVAL;
struct nv_drm_device *nv_dev = to_nv_device(dev);
#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
struct drm_modeset_acquire_ctx ctx;
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE,
ret);
#else
mutex_lock(&dev->mode_config.mutex);
#endif
if (!nv_dev->subOwnershipGranted) {
goto done;
}
if (!nvKms->revokeSubOwnership(nv_dev->pDevice)) {
NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to revoke sub-ownership from NVKMS");
goto done;
}
nv_dev->subOwnershipGranted = NV_FALSE;
atomic_set(&nv_dev->enable_event_handling, true);
ret = 0;
done:
#if NV_DRM_MODESET_LOCK_ALL_END_ARGUMENT_COUNT == 3
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
#else
mutex_unlock(&dev->mode_config.mutex);
#endif
return ret;
}
static int nv_drm_revoke_permission_ioctl(struct drm_device *dev, void *data,
struct drm_file *filep)
{
struct drm_nvidia_revoke_permissions_params *params = data;
if (!params->dpyId) {
return -EINVAL;
if (params->type == NV_DRM_PERMISSIONS_TYPE_MODESET) {
if (!params->dpyId) {
return -EINVAL;
}
return nv_drm_revoke_modeset_permission(dev, filep, params->dpyId);
} else if (params->type == NV_DRM_PERMISSIONS_TYPE_SUB_OWNER) {
return nv_drm_revoke_sub_ownership(dev);
}
return nv_drm_revoke_permission(dev, filep, params->dpyId);
return -EINVAL;
}
static void nv_drm_postclose(struct drm_device *dev, struct drm_file *filep)
@@ -1051,7 +1244,7 @@ static void nv_drm_postclose(struct drm_device *dev, struct drm_file *filep)
dev->mode_config.num_connector > 0 &&
dev->mode_config.connector_list.next != NULL &&
dev->mode_config.connector_list.prev != NULL) {
nv_drm_revoke_permission(dev, filep, 0);
nv_drm_revoke_modeset_permission(dev, filep, 0);
}
}
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
@@ -1310,6 +1503,18 @@ static const struct drm_ioctl_desc nv_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(NVIDIA_GEM_PRIME_FENCE_ATTACH,
nv_drm_gem_prime_fence_attach_ioctl,
DRM_RENDER_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(NVIDIA_SEMSURF_FENCE_CTX_CREATE,
nv_drm_semsurf_fence_ctx_create_ioctl,
DRM_RENDER_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(NVIDIA_SEMSURF_FENCE_CREATE,
nv_drm_semsurf_fence_create_ioctl,
DRM_RENDER_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(NVIDIA_SEMSURF_FENCE_WAIT,
nv_drm_semsurf_fence_wait_ioctl,
DRM_RENDER_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(NVIDIA_SEMSURF_FENCE_ATTACH,
nv_drm_semsurf_fence_attach_ioctl,
DRM_RENDER_ALLOW|DRM_UNLOCKED),
#endif
DRM_IOCTL_DEF_DRV(NVIDIA_GET_CLIENT_CAPABILITY,
@@ -1513,6 +1718,30 @@ static void nv_drm_register_drm_device(const nv_gpu_info_t *gpu_info)
goto failed_drm_register;
}
#if defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
if (nv_drm_fbdev_module_param &&
drm_core_check_feature(dev, DRIVER_MODESET)) {
if (!nvKms->grabOwnership(nv_dev->pDevice)) {
NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to grab NVKMS modeset ownership");
goto failed_grab_ownership;
}
if (device->bus == &pci_bus_type) {
struct pci_dev *pdev = to_pci_dev(device);
#if defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_HAS_DRIVER_ARG)
drm_aperture_remove_conflicting_pci_framebuffers(pdev, &nv_drm_driver);
#else
drm_aperture_remove_conflicting_pci_framebuffers(pdev, nv_drm_driver.name);
#endif
}
drm_fbdev_generic_setup(dev, 32);
nv_dev->hasFramebufferConsole = NV_TRUE;
}
#endif /* defined(NV_DRM_FBDEV_GENERIC_AVAILABLE) */
/* Add NVIDIA-DRM device into list */
nv_dev->next = dev_list;
@@ -1520,6 +1749,12 @@ static void nv_drm_register_drm_device(const nv_gpu_info_t *gpu_info)
return; /* Success */
#if defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
failed_grab_ownership:
drm_dev_unregister(dev);
#endif
failed_drm_register:
nv_drm_dev_free(dev);
@@ -1582,9 +1817,16 @@ void nv_drm_remove_devices(void)
{
while (dev_list != NULL) {
struct nv_drm_device *next = dev_list->next;
struct drm_device *dev = dev_list->dev;
drm_dev_unregister(dev_list->dev);
nv_drm_dev_free(dev_list->dev);
#if defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
if (dev_list->hasFramebufferConsole) {
drm_atomic_helper_shutdown(dev);
nvKms->releaseOwnership(dev_list->pDevice);
}
#endif
drm_dev_unregister(dev);
nv_drm_dev_free(dev);
nv_drm_free(dev_list);

File diff suppressed because it is too large Load Diff

View File

@@ -41,6 +41,22 @@ int nv_drm_prime_fence_context_create_ioctl(struct drm_device *dev,
int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
int nv_drm_semsurf_fence_ctx_create_ioctl(struct drm_device *dev,
void *data,
struct drm_file *filep);
int nv_drm_semsurf_fence_create_ioctl(struct drm_device *dev,
void *data,
struct drm_file *filep);
int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
void *data,
struct drm_file *filep);
int nv_drm_semsurf_fence_attach_ioctl(struct drm_device *dev,
void *data,
struct drm_file *filep);
#endif /* NV_DRM_FENCE_AVAILABLE */
#endif /* NV_DRM_AVAILABLE */

View File

@@ -465,7 +465,7 @@ int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev,
goto failed;
}
if (p->__pad != 0) {
if ((p->__pad0 != 0) || (p->__pad1 != 0)) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(nv_dev, "non-zero value in padding field");
goto failed;

View File

@@ -95,6 +95,16 @@ static inline struct nv_drm_gem_object *to_nv_gem_object(
* 3e70fd160cf0b1945225eaa08dd2cb8544f21cb8 (2018-11-15).
*/
static inline void
nv_drm_gem_object_reference(struct nv_drm_gem_object *nv_gem)
{
#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT)
drm_gem_object_get(&nv_gem->base);
#else
drm_gem_object_reference(&nv_gem->base);
#endif
}
static inline void
nv_drm_gem_object_unreference_unlocked(struct nv_drm_gem_object *nv_gem)
{

View File

@@ -306,6 +306,36 @@ int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
for_each_plane_in_state(__state, plane, plane_state, __i)
#endif
/*
* for_each_new_plane_in_state() was added by kernel commit
* 581e49fe6b411f407102a7f2377648849e0fa37f which was Signed-off-by:
* Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
* Daniel Vetter <daniel.vetter@ffwll.ch>
*
* This commit also added the old_state and new_state pointers to
* __drm_planes_state. Because of this, the best that can be done on kernel
* versions without this macro is for_each_plane_in_state.
*/
/**
* nv_drm_for_each_new_plane_in_state - iterate over all planes in an atomic update
* @__state: &struct drm_atomic_state pointer
* @plane: &struct drm_plane iteration cursor
* @new_plane_state: &struct drm_plane_state iteration cursor for the new state
* @__i: int iteration cursor, for macro-internal use
*
* This iterates over all planes in an atomic update, tracking only the new
* state. This is useful in enable functions, where we need the new state the
* hardware should be in when the atomic commit operation has completed.
*/
#if !defined(for_each_new_plane_in_state)
#define nv_drm_for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \
nv_drm_for_each_plane_in_state(__state, plane, new_plane_state, __i)
#else
#define nv_drm_for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \
for_each_new_plane_in_state(__state, plane, new_plane_state, __i)
#endif
static inline struct drm_connector *
nv_drm_connector_lookup(struct drm_device *dev, struct drm_file *filep,
uint32_t id)

View File

@@ -48,6 +48,10 @@
#define DRM_NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID 0x11
#define DRM_NVIDIA_GRANT_PERMISSIONS 0x12
#define DRM_NVIDIA_REVOKE_PERMISSIONS 0x13
#define DRM_NVIDIA_SEMSURF_FENCE_CTX_CREATE 0x14
#define DRM_NVIDIA_SEMSURF_FENCE_CREATE 0x15
#define DRM_NVIDIA_SEMSURF_FENCE_WAIT 0x16
#define DRM_NVIDIA_SEMSURF_FENCE_ATTACH 0x17
#define DRM_IOCTL_NVIDIA_GEM_IMPORT_NVKMS_MEMORY \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IMPORT_NVKMS_MEMORY), \
@@ -133,6 +137,26 @@
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_REVOKE_PERMISSIONS), \
struct drm_nvidia_revoke_permissions_params)
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_CTX_CREATE \
DRM_IOWR((DRM_COMMAND_BASE + \
DRM_NVIDIA_SEMSURF_FENCE_CTX_CREATE), \
struct drm_nvidia_semsurf_fence_ctx_create_params)
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_CREATE \
DRM_IOWR((DRM_COMMAND_BASE + \
DRM_NVIDIA_SEMSURF_FENCE_CREATE), \
struct drm_nvidia_semsurf_fence_create_params)
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_WAIT \
DRM_IOW((DRM_COMMAND_BASE + \
DRM_NVIDIA_SEMSURF_FENCE_WAIT), \
struct drm_nvidia_semsurf_fence_wait_params)
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_ATTACH \
DRM_IOW((DRM_COMMAND_BASE + \
DRM_NVIDIA_SEMSURF_FENCE_ATTACH), \
struct drm_nvidia_semsurf_fence_attach_params)
struct drm_nvidia_gem_import_nvkms_memory_params {
uint64_t mem_size; /* IN */
@@ -158,6 +182,8 @@ struct drm_nvidia_get_dev_info_params {
uint32_t generic_page_kind; /* OUT */
uint32_t page_kind_generation; /* OUT */
uint32_t sector_layout; /* OUT */
uint32_t supports_sync_fd; /* OUT */
uint32_t supports_semsurf; /* OUT */
};
struct drm_nvidia_prime_fence_context_create_params {
@@ -179,6 +205,7 @@ struct drm_nvidia_gem_prime_fence_attach_params {
uint32_t handle; /* IN GEM handle to attach fence to */
uint32_t fence_context_handle; /* IN GEM handle to fence context on which fence is run on */
uint32_t sem_thresh; /* IN Semaphore value to reach before signal */
uint32_t __pad;
};
struct drm_nvidia_get_client_capability_params {
@@ -190,6 +217,8 @@ struct drm_nvidia_get_client_capability_params {
struct drm_nvidia_crtc_crc32 {
uint32_t value; /* Read value, undefined if supported is false */
uint8_t supported; /* Supported boolean, true if readable by hardware */
uint8_t __pad0;
uint16_t __pad1;
};
struct drm_nvidia_crtc_crc32_v2_out {
@@ -229,10 +258,11 @@ struct drm_nvidia_gem_alloc_nvkms_memory_params {
uint32_t handle; /* OUT */
uint8_t block_linear; /* IN */
uint8_t compressible; /* IN/OUT */
uint16_t __pad;
uint16_t __pad0;
uint64_t memory_size; /* IN */
uint32_t flags; /* IN */
uint32_t __pad1;
};
struct drm_nvidia_gem_export_dmabuf_memory_params {
@@ -266,13 +296,90 @@ struct drm_nvidia_get_connector_id_for_dpy_id_params {
uint32_t connectorId; /* OUT */
};
enum drm_nvidia_permissions_type {
NV_DRM_PERMISSIONS_TYPE_MODESET = 2,
NV_DRM_PERMISSIONS_TYPE_SUB_OWNER = 3
};
struct drm_nvidia_grant_permissions_params {
int32_t fd; /* IN */
uint32_t dpyId; /* IN */
uint32_t type; /* IN */
};
struct drm_nvidia_revoke_permissions_params {
uint32_t dpyId; /* IN */
uint32_t type; /* IN */
};
struct drm_nvidia_semsurf_fence_ctx_create_params {
uint64_t index; /* IN Index of the desired semaphore in the
* fence context's semaphore surface */
/* Params for importing userspace semaphore surface */
uint64_t nvkms_params_ptr; /* IN */
uint64_t nvkms_params_size; /* IN */
uint32_t handle; /* OUT GEM handle to fence context */
uint32_t __pad;
};
struct drm_nvidia_semsurf_fence_create_params {
uint32_t fence_context_handle; /* IN GEM handle to fence context on which
* fence is run on */
uint32_t timeout_value_ms; /* IN Timeout value in ms for the fence
* after which the fence will be signaled
* with its error status set to -ETIMEDOUT.
* Default timeout value is 5000ms */
uint64_t wait_value; /* IN Semaphore value to reach before signal */
int32_t fd; /* OUT sync FD object representing the
* semaphore at the specified index reaching
* a value >= wait_value */
uint32_t __pad;
};
/*
* Note there is no provision for timeouts in this ioctl. The kernel
* documentation asserts timeouts should be handled by fence producers, and
* that waiters should not second-guess their logic, as it is producers rather
* than consumers that have better information when it comes to determining a
* reasonable timeout for a given workload.
*/
struct drm_nvidia_semsurf_fence_wait_params {
uint32_t fence_context_handle; /* IN GEM handle to fence context which will
* be used to wait on the sync FD. Need not
* be the fence context used to create the
* sync FD. */
int32_t fd; /* IN sync FD object to wait on */
uint64_t pre_wait_value; /* IN Wait for the semaphore represented by
* fence_context to reach this value before
* waiting for the sync file. */
uint64_t post_wait_value; /* IN Signal the semaphore represented by
* fence_context to this value after waiting
* for the sync file */
};
struct drm_nvidia_semsurf_fence_attach_params {
uint32_t handle; /* IN GEM handle of buffer */
uint32_t fence_context_handle; /* IN GEM handle of fence context */
uint32_t timeout_value_ms; /* IN Timeout value in ms for the fence
* after which the fence will be signaled
* with its error status set to -ETIMEDOUT.
* Default timeout value is 5000ms */
uint32_t shared; /* IN If true, fence will reserve shared
* access to the buffer, otherwise it will
* reserve exclusive access */
uint64_t wait_value; /* IN Semaphore value to reach before signal */
};
#endif /* _UAPI_NVIDIA_DRM_IOCTL_H_ */

View File

@@ -35,7 +35,13 @@
#include <drm/drmP.h>
#endif
#if defined(NV_LINUX_SYNC_FILE_H_PRESENT)
#include <linux/file.h>
#include <linux/sync_file.h>
#endif
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include "nv-mm.h"
@@ -45,6 +51,14 @@ MODULE_PARM_DESC(
bool nv_drm_modeset_module_param = false;
module_param_named(modeset, nv_drm_modeset_module_param, bool, 0400);
#if defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
MODULE_PARM_DESC(
fbdev,
"Create a framebuffer device (1 = enable, 0 = disable (default)) (EXPERIMENTAL)");
bool nv_drm_fbdev_module_param = false;
module_param_named(fbdev, nv_drm_fbdev_module_param, bool, 0400);
#endif
void *nv_drm_calloc(size_t nmemb, size_t size)
{
size_t total_size = nmemb * size;
@@ -81,14 +95,10 @@ char *nv_drm_asprintf(const char *fmt, ...)
#if defined(NVCPU_X86) || defined(NVCPU_X86_64)
#define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory")
#elif defined(NVCPU_FAMILY_ARM)
#if defined(NVCPU_ARM)
#define WRITE_COMBINE_FLUSH() { dsb(); outer_sync(); }
#elif defined(NVCPU_AARCH64)
#define WRITE_COMBINE_FLUSH() mb()
#endif
#elif defined(NVCPU_PPC64LE)
#define WRITE_COMBINE_FLUSH() asm volatile("sync":::"memory")
#else
#define WRITE_COMBINE_FLUSH() mb()
#endif
void nv_drm_write_combine_flush(void)
@@ -160,6 +170,122 @@ void nv_drm_vunmap(void *address)
vunmap(address);
}
bool nv_drm_workthread_init(nv_drm_workthread *worker, const char *name)
{
worker->shutting_down = false;
if (nv_kthread_q_init(&worker->q, name)) {
return false;
}
spin_lock_init(&worker->lock);
return true;
}
void nv_drm_workthread_shutdown(nv_drm_workthread *worker)
{
unsigned long flags;
spin_lock_irqsave(&worker->lock, flags);
worker->shutting_down = true;
spin_unlock_irqrestore(&worker->lock, flags);
nv_kthread_q_stop(&worker->q);
}
void nv_drm_workthread_work_init(nv_drm_work *work,
void (*callback)(void *),
void *arg)
{
nv_kthread_q_item_init(work, callback, arg);
}
int nv_drm_workthread_add_work(nv_drm_workthread *worker, nv_drm_work *work)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&worker->lock, flags);
if (!worker->shutting_down) {
ret = nv_kthread_q_schedule_q_item(&worker->q, work);
}
spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
void nv_drm_timer_setup(nv_drm_timer *timer, void (*callback)(nv_drm_timer *nv_drm_timer))
{
nv_timer_setup(timer, callback);
}
void nv_drm_mod_timer(nv_drm_timer *timer, unsigned long timeout_native)
{
mod_timer(&timer->kernel_timer, timeout_native);
}
unsigned long nv_drm_timer_now(void)
{
return jiffies;
}
unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms)
{
return jiffies + msecs_to_jiffies(relative_timeout_ms);
}
bool nv_drm_del_timer_sync(nv_drm_timer *timer)
{
if (del_timer_sync(&timer->kernel_timer)) {
return true;
} else {
return false;
}
}
#if defined(NV_DRM_FENCE_AVAILABLE)
int nv_drm_create_sync_file(nv_dma_fence_t *fence)
{
#if defined(NV_LINUX_SYNC_FILE_H_PRESENT)
struct sync_file *sync;
int fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0) {
return fd;
}
/* sync_file_create() generates its own reference to the fence */
sync = sync_file_create(fence);
if (IS_ERR(sync)) {
put_unused_fd(fd);
return PTR_ERR(sync);
}
fd_install(fd, sync->file);
return fd;
#else /* defined(NV_LINUX_SYNC_FILE_H_PRESENT) */
return -EINVAL;
#endif /* defined(NV_LINUX_SYNC_FILE_H_PRESENT) */
}
nv_dma_fence_t *nv_drm_sync_file_get_fence(int fd)
{
#if defined(NV_SYNC_FILE_GET_FENCE_PRESENT)
return sync_file_get_fence(fd);
#else /* defined(NV_SYNC_FILE_GET_FENCE_PRESENT) */
return NULL;
#endif /* defined(NV_SYNC_FILE_GET_FENCE_PRESENT) */
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
void nv_drm_yield(void)
{
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
}
#endif /* NV_DRM_AVAILABLE */
/*************************************************************************

View File

@@ -237,6 +237,14 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
int i;
int ret;
/*
* If sub-owner permission was granted to another NVKMS client, disallow
* modesets through the DRM interface.
*/
if (nv_dev->subOwnershipGranted) {
return -EINVAL;
}
memset(requested_config, 0, sizeof(*requested_config));
/* Loop over affected crtcs and construct NvKmsKapiRequestedModeSetConfig */
@@ -274,9 +282,6 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
nv_new_crtc_state->nv_flip = NULL;
}
#if defined(NV_DRM_CRTC_STATE_HAS_VRR_ENABLED)
requested_config->headRequestedConfig[nv_crtc->head].modeSetConfig.vrrEnabled = new_crtc_state->vrr_enabled;
#endif
}
}
@@ -292,7 +297,9 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
requested_config,
&reply_config,
commit)) {
return -EINVAL;
if (commit || reply_config.flipResult != NV_KMS_FLIP_RESULT_IN_PROGRESS) {
return -EINVAL;
}
}
if (commit && nv_dev->supportsSyncpts) {
@@ -388,42 +395,56 @@ int nv_drm_atomic_commit(struct drm_device *dev,
struct nv_drm_device *nv_dev = to_nv_device(dev);
/*
* drm_mode_config_funcs::atomic_commit() mandates to return -EBUSY
* for nonblocking commit if previous updates (commit tasks/flip event) are
* pending. In case of blocking commits it mandates to wait for previous
* updates to complete.
* XXX: drm_mode_config_funcs::atomic_commit() mandates to return -EBUSY
* for nonblocking commit if the commit would need to wait for previous
* updates (commit tasks/flip event) to complete. In case of blocking
* commits it mandates to wait for previous updates to complete. However,
* the kernel DRM-KMS documentation does explicitly allow maintaining a
* queue of outstanding commits.
*
* Our system already implements such a queue, but due to
* bug 4054608, it is currently not used.
*/
if (nonblock) {
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
/*
* Here you aren't required to hold nv_drm_crtc::flip_list_lock
* because:
*
* The core DRM driver acquires lock for all affected crtcs before
* calling into ->commit() hook, therefore it is not possible for
* other threads to call into ->commit() hook affecting same crtcs
* and enqueue flip objects into flip_list -
*
* nv_drm_atomic_commit_internal()
* |-> nv_drm_atomic_apply_modeset_config(commit=true)
* |-> nv_drm_crtc_enqueue_flip()
*
* Only possibility is list_empty check races with code path
* dequeuing flip object -
*
* __nv_drm_handle_flip_event()
* |-> nv_drm_crtc_dequeue_flip()
*
* But this race condition can't lead list_empty() to return
* incorrect result. nv_drm_crtc_dequeue_flip() in the middle of
* updating the list could not trick us into thinking the list is
* empty when it isn't.
*/
/*
* Here you aren't required to hold nv_drm_crtc::flip_list_lock
* because:
*
* The core DRM driver acquires lock for all affected crtcs before
* calling into ->commit() hook, therefore it is not possible for
* other threads to call into ->commit() hook affecting same crtcs
* and enqueue flip objects into flip_list -
*
* nv_drm_atomic_commit_internal()
* |-> nv_drm_atomic_apply_modeset_config(commit=true)
* |-> nv_drm_crtc_enqueue_flip()
*
* Only possibility is list_empty check races with code path
* dequeuing flip object -
*
* __nv_drm_handle_flip_event()
* |-> nv_drm_crtc_dequeue_flip()
*
* But this race condition can't lead list_empty() to return
* incorrect result. nv_drm_crtc_dequeue_flip() in the middle of
* updating the list could not trick us into thinking the list is
* empty when it isn't.
*/
if (nonblock) {
if (!list_empty(&nv_crtc->flip_list)) {
return -EBUSY;
}
} else {
if (wait_event_timeout(
nv_dev->flip_event_wq,
list_empty(&nv_crtc->flip_list),
3 * HZ /* 3 second */) == 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Flip event timeout on head %u", nv_crtc->head);
}
}
}
@@ -467,6 +488,7 @@ int nv_drm_atomic_commit(struct drm_device *dev,
goto done;
}
nv_dev->drmMasterChangedSinceLastAtomicCommit = NV_FALSE;
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);

View File

@@ -29,10 +29,47 @@
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_FENCE_AVAILABLE)
#include "nvidia-dma-fence-helper.h"
#endif
#if defined(NV_LINUX)
#include "nv-kthread-q.h"
#include "linux/spinlock.h"
typedef struct nv_drm_workthread {
spinlock_t lock;
struct nv_kthread_q q;
bool shutting_down;
} nv_drm_workthread;
typedef nv_kthread_q_item_t nv_drm_work;
#else /* defined(NV_LINUX) */
#error "Need to define deferred work primitives for this OS"
#endif /* else defined(NV_LINUX) */
#if defined(NV_LINUX)
#include "nv-timer.h"
typedef struct nv_timer nv_drm_timer;
#else /* defined(NV_LINUX) */
#error "Need to define kernel timer callback primitives for this OS"
#endif /* else defined(NV_LINUX) */
#if defined(NV_DRM_FBDEV_GENERIC_SETUP_PRESENT) && defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
#define NV_DRM_FBDEV_GENERIC_AVAILABLE
#endif
struct page;
/* Set to true when the atomic modeset feature is enabled. */
extern bool nv_drm_modeset_module_param;
#if defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
/* Set to true when the nvidia-drm driver should install a framebuffer device */
extern bool nv_drm_fbdev_module_param;
#endif
void *nv_drm_calloc(size_t nmemb, size_t size);
@@ -51,6 +88,37 @@ void *nv_drm_vmap(struct page **pages, unsigned long pages_count);
void nv_drm_vunmap(void *address);
#endif
bool nv_drm_workthread_init(nv_drm_workthread *worker, const char *name);
/* Can be called concurrently with nv_drm_workthread_add_work() */
void nv_drm_workthread_shutdown(nv_drm_workthread *worker);
void nv_drm_workthread_work_init(nv_drm_work *work,
void (*callback)(void *),
void *arg);
/* Can be called concurrently with nv_drm_workthread_shutdown() */
int nv_drm_workthread_add_work(nv_drm_workthread *worker, nv_drm_work *work);
void nv_drm_timer_setup(nv_drm_timer *timer,
void (*callback)(nv_drm_timer *nv_drm_timer));
void nv_drm_mod_timer(nv_drm_timer *timer, unsigned long relative_timeout_ms);
bool nv_drm_del_timer_sync(nv_drm_timer *timer);
unsigned long nv_drm_timer_now(void);
unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms);
#if defined(NV_DRM_FENCE_AVAILABLE)
int nv_drm_create_sync_file(nv_dma_fence_t *fence);
nv_dma_fence_t *nv_drm_sync_file_get_fence(int fd);
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
void nv_drm_yield(void);
#endif /* defined(NV_DRM_AVAILABLE) */
#endif /* __NVIDIA_DRM_OS_INTERFACE_H__ */

View File

@@ -46,12 +46,33 @@
#define NV_DRM_LOG_ERR(__fmt, ...) \
DRM_ERROR("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
/*
* DRM_WARN() was added in v4.9 by kernel commit
* 30b0da8d556e65ff935a56cd82c05ba0516d3e4a
*
* Before this commit, only DRM_INFO and DRM_ERROR were defined and
* DRM_INFO(fmt, ...) was defined as
* printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__). So, if
* DRM_WARN is undefined this defines NV_DRM_LOG_WARN following the
* same pattern as DRM_INFO.
*/
#ifdef DRM_WARN
#define NV_DRM_LOG_WARN(__fmt, ...) \
DRM_WARN("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
#else
#define NV_DRM_LOG_WARN(__fmt, ...) \
printk(KERN_WARNING "[" DRM_NAME "] [nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
#endif
#define NV_DRM_LOG_INFO(__fmt, ...) \
DRM_INFO("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
#define NV_DRM_DEV_LOG_INFO(__dev, __fmt, ...) \
NV_DRM_LOG_INFO("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
#define NV_DRM_DEV_LOG_WARN(__dev, __fmt, ...) \
NV_DRM_LOG_WARN("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
#define NV_DRM_DEV_LOG_ERR(__dev, __fmt, ...) \
NV_DRM_LOG_ERR("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
@@ -117,9 +138,26 @@ struct nv_drm_device {
#endif
#if defined(NV_DRM_FENCE_AVAILABLE)
NvU64 semsurf_stride;
NvU64 semsurf_max_submitted_offset;
#endif
NvBool hasVideoMemory;
NvBool supportsSyncpts;
NvBool subOwnershipGranted;
NvBool hasFramebufferConsole;
/**
* @drmMasterChangedSinceLastAtomicCommit:
*
* This flag is set in nv_drm_master_set and reset after a completed atomic
* commit. It is used to restore or recommit state that is lost by the
* NvKms modeset owner change, such as the CRTC color management
* properties.
*/
NvBool drmMasterChangedSinceLastAtomicCommit;
struct drm_property *nv_out_fence_property;
struct drm_property *nv_input_colorspace_property;

View File

@@ -19,6 +19,7 @@ NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-modeset.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fence.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-linux.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-helper.c
NVIDIA_DRM_SOURCES += nvidia-drm/nv-kthread-q.c
NVIDIA_DRM_SOURCES += nvidia-drm/nv-pci-table.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-nvkms-memory.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-user-memory.c
@@ -79,6 +80,17 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_rotation_available
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_vma_offset_exact_lookup_locked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += nvhost_dma_fence_unpack
NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_fence_set_error
NV_CONFTEST_FUNCTION_COMPILE_TESTS += fence_set_error
NV_CONFTEST_FUNCTION_COMPILE_TESTS += sync_file_get_fence
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffers
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_fbdev_generic_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_attach_hdr_output_metadata_property
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_helper_crtc_enable_color_mgmt
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_crtc_enable_color_mgmt
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_helper_legacy_gamma_set
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_bus_type
@@ -133,3 +145,6 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_lookup
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_put
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_dumb_destroy
NV_CONFTEST_TYPE_COMPILE_TESTS += fence_ops_use_64bit_seqno
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffers_has_driver_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_create_dp_colorspace_property_has_supported_colorspaces_arg

View File

@@ -247,6 +247,11 @@ int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferr
return 0;
}
int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname)
{
return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE);
}
// Returns true (non-zero) if the item was actually scheduled, and false if the
// item was already pending in a queue.
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-21 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -65,9 +65,15 @@
static bool output_rounding_fix = true;
module_param_named(output_rounding_fix, output_rounding_fix, bool, 0400);
static bool disable_hdmi_frl = false;
module_param_named(disable_hdmi_frl, disable_hdmi_frl, bool, 0400);
static bool disable_vrr_memclk_switch = false;
module_param_named(disable_vrr_memclk_switch, disable_vrr_memclk_switch, bool, 0400);
static bool hdmi_deepcolor = false;
module_param_named(hdmi_deepcolor, hdmi_deepcolor, bool, 0400);
/* These parameters are used for fault injection tests. Normally the defaults
* should be used. */
MODULE_PARM_DESC(fail_malloc, "Fail the Nth call to nvkms_alloc");
@@ -78,6 +84,7 @@ MODULE_PARM_DESC(malloc_verbose, "Report information about malloc calls on modul
static bool malloc_verbose = false;
module_param_named(malloc_verbose, malloc_verbose, bool, 0400);
#if NVKMS_CONFIG_FILE_SUPPORTED
/* This parameter is used to find the dpy override conf file */
#define NVKMS_CONF_FILE_SPECIFIED (nvkms_conf != NULL)
@@ -86,6 +93,7 @@ MODULE_PARM_DESC(config_file,
"(default: disabled)");
static char *nvkms_conf = NULL;
module_param_named(config_file, nvkms_conf, charp, 0400);
#endif
static atomic_t nvkms_alloc_called_count;
@@ -94,11 +102,21 @@ NvBool nvkms_output_rounding_fix(void)
return output_rounding_fix;
}
NvBool nvkms_disable_hdmi_frl(void)
{
return disable_hdmi_frl;
}
NvBool nvkms_disable_vrr_memclk_switch(void)
{
return disable_vrr_memclk_switch;
}
NvBool nvkms_hdmi_deepcolor(void)
{
return hdmi_deepcolor;
}
#define NVKMS_SYNCPT_STUBS_NEEDED
/*************************************************************************
@@ -335,7 +353,7 @@ NvU64 nvkms_get_usec(void)
struct timespec64 ts;
NvU64 ns;
ktime_get_real_ts64(&ts);
ktime_get_raw_ts64(&ts);
ns = timespec64_to_ns(&ts);
return ns / 1000;
@@ -1382,6 +1400,7 @@ static void nvkms_proc_exit(void)
/*************************************************************************
* NVKMS Config File Read
************************************************************************/
#if NVKMS_CONFIG_FILE_SUPPORTED
static NvBool nvkms_fs_mounted(void)
{
return current->fs != NULL;
@@ -1489,6 +1508,11 @@ static void nvkms_read_config_file_locked(void)
nvkms_free(buffer, buf_size);
}
#else
static void nvkms_read_config_file_locked(void)
{
}
#endif
/*************************************************************************
* NVKMS KAPI functions

View File

@@ -97,8 +97,9 @@ typedef struct {
} NvKmsSyncPtOpParams;
NvBool nvkms_output_rounding_fix(void);
NvBool nvkms_disable_hdmi_frl(void);
NvBool nvkms_disable_vrr_memclk_switch(void);
NvBool nvkms_hdmi_deepcolor(void);
void nvkms_call_rm (void *ops);
void* nvkms_alloc (size_t size,

View File

@@ -58,6 +58,18 @@ nvidia-modeset-y += $(NVIDIA_MODESET_BINARY_OBJECT_O)
NVIDIA_MODESET_CFLAGS += -I$(src)/nvidia-modeset
NVIDIA_MODESET_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_BUILD_MODULE_INSTANCES=0
# Some Android kernels prohibit driver use of filesystem functions like
# filp_open() and kernel_read(). Disable the NVKMS_CONFIG_FILE_SUPPORTED
# functionality that uses those functions when building for Android.
PLATFORM_IS_ANDROID ?= 0
ifeq ($(PLATFORM_IS_ANDROID),1)
NVIDIA_MODESET_CFLAGS += -DNVKMS_CONFIG_FILE_SUPPORTED=0
else
NVIDIA_MODESET_CFLAGS += -DNVKMS_CONFIG_FILE_SUPPORTED=1
endif
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_MODESET_OBJECTS), $(NVIDIA_MODESET_CFLAGS))

View File

@@ -66,6 +66,8 @@ enum NvKmsClientType {
NVKMS_CLIENT_KERNEL_SPACE,
};
struct NvKmsPerOpenDev;
NvBool nvKmsIoctl(
void *pOpenVoid,
NvU32 cmd,
@@ -104,4 +106,6 @@ NvBool nvKmsKapiGetFunctionsTableInternal
NvBool nvKmsGetBacklight(NvU32 display_id, void *drv_priv, NvU32 *brightness);
NvBool nvKmsSetBacklight(NvU32 display_id, void *drv_priv, NvU32 brightness);
NvBool nvKmsOpenDevHasSubOwnerPermissionOrBetter(const struct NvKmsPerOpenDev *pOpenDev);
#endif /* __NV_KMS_H__ */

View File

@@ -249,8 +249,8 @@ static int nv_dma_map(struct sg_table *sg_head, void *context,
nv_mem_context->sg_allocated = 1;
for_each_sg(sg_head->sgl, sg, nv_mem_context->npages, i) {
sg_set_page(sg, NULL, nv_mem_context->page_size, 0);
sg->dma_address = dma_mapping->dma_addresses[i];
sg->dma_length = nv_mem_context->page_size;
sg_dma_address(sg) = dma_mapping->dma_addresses[i];
sg_dma_len(sg) = nv_mem_context->page_size;
}
nv_mem_context->sg_head = *sg_head;
*nmap = nv_mem_context->npages;
@@ -304,8 +304,13 @@ static void nv_mem_put_pages_common(int nc,
return;
if (nc) {
#ifdef NVIDIA_P2P_CAP_GET_PAGES_PERSISTENT_API
ret = nvidia_p2p_put_pages_persistent(nv_mem_context->page_virt_start,
nv_mem_context->page_table, 0);
#else
ret = nvidia_p2p_put_pages(0, 0, nv_mem_context->page_virt_start,
nv_mem_context->page_table);
#endif
} else {
ret = nvidia_p2p_put_pages(0, 0, nv_mem_context->page_virt_start,
nv_mem_context->page_table);
@@ -412,9 +417,15 @@ static int nv_mem_get_pages_nc(unsigned long addr,
nv_mem_context->core_context = core_context;
nv_mem_context->page_size = GPU_PAGE_SIZE;
#ifdef NVIDIA_P2P_CAP_GET_PAGES_PERSISTENT_API
ret = nvidia_p2p_get_pages_persistent(nv_mem_context->page_virt_start,
nv_mem_context->mapped_size,
&nv_mem_context->page_table, 0);
#else
ret = nvidia_p2p_get_pages(0, 0, nv_mem_context->page_virt_start, nv_mem_context->mapped_size,
&nv_mem_context->page_table, NULL, NULL);
#endif
if (ret < 0) {
peer_err("error %d while calling nvidia_p2p_get_pages() with NULL callback\n", ret);
return ret;
@@ -459,8 +470,6 @@ static int __init nv_mem_client_init(void)
}
#if defined (NV_MLNX_IB_PEER_MEM_SYMBOLS_PRESENT)
int status = 0;
// off by one, to leave space for the trailing '1' which is flagging
// the new client type
BUG_ON(strlen(DRV_NAME) > IB_PEER_MEMORY_NAME_MAX-1);
@@ -489,7 +498,7 @@ static int __init nv_mem_client_init(void)
&mem_invalidate_callback);
if (!reg_handle) {
peer_err("nv_mem_client_init -- error while registering traditional client\n");
status = -EINVAL;
rc = -EINVAL;
goto out;
}
@@ -499,12 +508,12 @@ static int __init nv_mem_client_init(void)
reg_handle_nc = ib_register_peer_memory_client(&nv_mem_client_nc, NULL);
if (!reg_handle_nc) {
peer_err("nv_mem_client_init -- error while registering nc client\n");
status = -EINVAL;
rc = -EINVAL;
goto out;
}
out:
if (status) {
if (rc) {
if (reg_handle) {
ib_unregister_peer_memory_client(reg_handle);
reg_handle = NULL;
@@ -516,7 +525,7 @@ out:
}
}
return status;
return rc;
#else
return -EINVAL;
#endif

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2022 NVIDIA Corporation
Copyright (c) 2023 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2022 NVIDIA Corporation
Copyright (c) 2023 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to

View File

@@ -247,6 +247,11 @@ int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferr
return 0;
}
int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname)
{
return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE);
}
// Returns true (non-zero) if the item was actually scheduled, and false if the
// item was already pending in a queue.
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)

View File

@@ -27,6 +27,7 @@ NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_rm_mem.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_channel.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_lock.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hal.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_processors.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_tree.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_rb_tree.c
NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_allocator.c

View File

@@ -81,12 +81,13 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_uc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_uc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioasid_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mm_pasid_set
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mm_pasid_drop
NV_CONFTEST_FUNCTION_COMPILE_TESTS += migrate_vma_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mmget_not_zero
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mmgrab
NV_CONFTEST_FUNCTION_COMPILE_TESTS += iommu_sva_bind_device_has_drvdata_arg
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vm_fault_to_errno
NV_CONFTEST_FUNCTION_COMPILE_TESTS += find_next_bit_wrap
NV_CONFTEST_TYPE_COMPILE_TESTS += backing_dev_info
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_context_t
@@ -100,6 +101,7 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += kmem_cache_has_kobj_remove_work
NV_CONFTEST_TYPE_COMPILE_TESTS += sysfs_slab_unlink
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_t
NV_CONFTEST_TYPE_COMPILE_TESTS += mmu_notifier_ops_invalidate_range
NV_CONFTEST_TYPE_COMPILE_TESTS += mmu_notifier_ops_arch_invalidate_secondary_tlbs
NV_CONFTEST_TYPE_COMPILE_TESTS += proc_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += timespec64
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
@@ -110,6 +112,7 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += handle_mm_fault_has_mm_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += handle_mm_fault_has_pt_regs_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_unified_nodes
NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_home_node
NV_CONFTEST_TYPE_COMPILE_TESTS += mpol_preferred_many_present
NV_CONFTEST_TYPE_COMPILE_TESTS += mmu_interval_notifier
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_int_active_memcg

View File

@@ -24,11 +24,11 @@
#include "nvstatus.h"
#if !defined(NV_PRINTF_STRING_SECTION)
#if defined(NVRM) && NVCPU_IS_RISCV64
#if defined(NVRM) && NVOS_IS_LIBOS
#define NV_PRINTF_STRING_SECTION __attribute__ ((section (".logging")))
#else // defined(NVRM) && NVCPU_IS_RISCV64
#else // defined(NVRM) && NVOS_IS_LIBOS
#define NV_PRINTF_STRING_SECTION
#endif // defined(NVRM) && NVCPU_IS_RISCV64
#endif // defined(NVRM) && NVOS_IS_LIBOS
#endif // !defined(NV_PRINTF_STRING_SECTION)
/*

View File

@@ -571,7 +571,6 @@ static void uvm_vm_open_managed_entry(struct vm_area_struct *vma)
static void uvm_vm_close_managed(struct vm_area_struct *vma)
{
uvm_va_space_t *va_space = uvm_va_space_get(vma->vm_file);
uvm_processor_id_t gpu_id;
bool make_zombie = false;
if (current->mm != NULL)
@@ -606,12 +605,6 @@ static void uvm_vm_close_managed(struct vm_area_struct *vma)
uvm_destroy_vma_managed(vma, make_zombie);
// Notify GPU address spaces that the fault buffer needs to be flushed to
// avoid finding stale entries that can be attributed to new VA ranges
// reallocated at the same address.
for_each_gpu_id_in_mask(gpu_id, &va_space->registered_gpu_va_spaces) {
uvm_processor_mask_set_atomic(&va_space->needs_fault_buffer_flush, gpu_id);
}
uvm_va_space_up_write(va_space);
if (current->mm != NULL)

View File

@@ -216,6 +216,10 @@ NV_STATUS UvmDeinitialize(void);
// Note that it is not required to release VA ranges that were reserved with
// UvmReserveVa().
//
// This is useful for per-process checkpoint and restore, where kernel-mode
// state needs to be reconfigured to match the expectations of a pre-existing
// user-mode process.
//
// UvmReopen() closes the open file returned by UvmGetFileDescriptor() and
// replaces it with a new open file with the same name.
//

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2018 NVIDIA Corporation
Copyright (c) 2023 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -114,6 +114,8 @@ static void flush_tlb_write_faults(uvm_gpu_va_space_t *gpu_va_space,
{
uvm_ats_fault_invalidate_t *ats_invalidate;
uvm_ats_smmu_invalidate_tlbs(gpu_va_space, addr, size);
if (client_type == UVM_FAULT_CLIENT_TYPE_GPC)
ats_invalidate = &gpu_va_space->gpu->parent->fault_buffer_info.replayable.ats_invalidate;
else
@@ -149,7 +151,11 @@ static void ats_batch_select_residency(uvm_gpu_va_space_t *gpu_va_space,
mode = vma_policy->mode;
if ((mode == MPOL_BIND) || (mode == MPOL_PREFERRED_MANY) || (mode == MPOL_PREFERRED)) {
if ((mode == MPOL_BIND)
#if defined(NV_MPOL_PREFERRED_MANY_PRESENT)
|| (mode == MPOL_PREFERRED_MANY)
#endif
|| (mode == MPOL_PREFERRED)) {
int home_node = NUMA_NO_NODE;
#if defined(NV_MEMPOLICY_HAS_HOME_NODE)
@@ -467,6 +473,10 @@ NV_STATUS uvm_ats_service_faults(uvm_gpu_va_space_t *gpu_va_space,
uvm_page_mask_and(write_fault_mask, write_fault_mask, read_fault_mask);
else
uvm_page_mask_zero(write_fault_mask);
// There are no pending faults beyond write faults to RO region.
if (uvm_page_mask_empty(read_fault_mask))
return status;
}
ats_batch_select_residency(gpu_va_space, vma, ats_context);
@@ -580,4 +590,3 @@ NV_STATUS uvm_ats_invalidate_tlbs(uvm_gpu_va_space_t *gpu_va_space,
return status;
}

View File

@@ -29,8 +29,12 @@
#include "uvm_va_space.h"
#include "uvm_va_space_mm.h"
#include <asm/io.h>
#include <linux/iommu.h>
#include <linux/mm_types.h>
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/mmu_context.h>
// linux/sched/mm.h is needed for mmget_not_zero and mmput to get the mm
// reference required for the iommu_sva_bind_device() call. This header is not
@@ -46,17 +50,271 @@
#define UVM_IOMMU_SVA_BIND_DEVICE(dev, mm) iommu_sva_bind_device(dev, mm)
#endif
// Base address of SMMU CMDQ-V for GSMMU0.
#define SMMU_CMDQV_BASE_ADDR(smmu_base) (smmu_base + 0x200000)
#define SMMU_CMDQV_BASE_LEN 0x00830000
// CMDQV configuration is done by firmware but we check status here.
#define SMMU_CMDQV_CONFIG 0x0
#define SMMU_CMDQV_CONFIG_CMDQV_EN BIT(0)
// Used to map a particular VCMDQ to a VINTF.
#define SMMU_CMDQV_CMDQ_ALLOC_MAP(vcmdq_id) (0x200 + 0x4 * (vcmdq_id))
#define SMMU_CMDQV_CMDQ_ALLOC_MAP_ALLOC BIT(0)
// Shift for the field containing the index of the virtual interface
// owning the VCMDQ.
#define SMMU_CMDQV_CMDQ_ALLOC_MAP_VIRT_INTF_INDX_SHIFT 15
// Base address for the VINTF registers.
#define SMMU_VINTF_BASE_ADDR(cmdqv_base_addr, vintf_id) (cmdqv_base_addr + 0x1000 + 0x100 * (vintf_id))
// Virtual interface (VINTF) configuration registers. The WAR only
// works on baremetal so we need to configure ourselves as the
// hypervisor owner.
#define SMMU_VINTF_CONFIG 0x0
#define SMMU_VINTF_CONFIG_ENABLE BIT(0)
#define SMMU_VINTF_CONFIG_HYP_OWN BIT(17)
#define SMMU_VINTF_STATUS 0x0
#define SMMU_VINTF_STATUS_ENABLED BIT(0)
// Caclulates the base address for a particular VCMDQ instance.
#define SMMU_VCMDQ_BASE_ADDR(cmdqv_base_addr, vcmdq_id) (cmdqv_base_addr + 0x10000 + 0x80 * (vcmdq_id))
// SMMU command queue consumer index register. Updated by SMMU
// when commands are consumed.
#define SMMU_VCMDQ_CONS 0x0
// SMMU command queue producer index register. Updated by UVM when
// commands are added to the queue.
#define SMMU_VCMDQ_PROD 0x4
// Configuration register used to enable a VCMDQ.
#define SMMU_VCMDQ_CONFIG 0x8
#define SMMU_VCMDQ_CONFIG_ENABLE BIT(0)
// Status register used to check the VCMDQ is enabled.
#define SMMU_VCMDQ_STATUS 0xc
#define SMMU_VCMDQ_STATUS_ENABLED BIT(0)
// Base address offset for the VCMDQ registers.
#define SMMU_VCMDQ_CMDQ_BASE 0x10000
// Size of the command queue. Each command is 8 bytes and we can't
// have a command queue greater than one page.
#define SMMU_VCMDQ_CMDQ_BASE_LOG2SIZE 9
#define SMMU_VCMDQ_CMDQ_ENTRIES (1UL << SMMU_VCMDQ_CMDQ_BASE_LOG2SIZE)
// We always use VINTF63 for the WAR
#define VINTF 63
static void smmu_vintf_write32(void __iomem *smmu_cmdqv_base, int reg, NvU32 val)
{
iowrite32(val, SMMU_VINTF_BASE_ADDR(smmu_cmdqv_base, VINTF) + reg);
}
static NvU32 smmu_vintf_read32(void __iomem *smmu_cmdqv_base, int reg)
{
return ioread32(SMMU_VINTF_BASE_ADDR(smmu_cmdqv_base, VINTF) + reg);
}
// We always use VCMDQ127 for the WAR
#define VCMDQ 127
void smmu_vcmdq_write32(void __iomem *smmu_cmdqv_base, int reg, NvU32 val)
{
iowrite32(val, SMMU_VCMDQ_BASE_ADDR(smmu_cmdqv_base, VCMDQ) + reg);
}
NvU32 smmu_vcmdq_read32(void __iomem *smmu_cmdqv_base, int reg)
{
return ioread32(SMMU_VCMDQ_BASE_ADDR(smmu_cmdqv_base, VCMDQ) + reg);
}
static void smmu_vcmdq_write64(void __iomem *smmu_cmdqv_base, int reg, NvU64 val)
{
iowrite64(val, SMMU_VCMDQ_BASE_ADDR(smmu_cmdqv_base, VCMDQ) + reg);
}
// Fix for Bug 4130089: [GH180][r535] WAR for kernel not issuing SMMU
// TLB invalidates on read-only to read-write upgrades
static NV_STATUS uvm_ats_smmu_war_init(uvm_parent_gpu_t *parent_gpu)
{
uvm_spin_loop_t spin;
NV_STATUS status;
unsigned long cmdqv_config;
void __iomem *smmu_cmdqv_base;
struct acpi_iort_node *node;
struct acpi_iort_smmu_v3 *iort_smmu;
node = *(struct acpi_iort_node **) dev_get_platdata(parent_gpu->pci_dev->dev.iommu->iommu_dev->dev->parent);
iort_smmu = (struct acpi_iort_smmu_v3 *) node->node_data;
smmu_cmdqv_base = ioremap(SMMU_CMDQV_BASE_ADDR(iort_smmu->base_address), SMMU_CMDQV_BASE_LEN);
if (!smmu_cmdqv_base)
return NV_ERR_NO_MEMORY;
parent_gpu->smmu_war.smmu_cmdqv_base = smmu_cmdqv_base;
cmdqv_config = ioread32(smmu_cmdqv_base + SMMU_CMDQV_CONFIG);
if (!(cmdqv_config & SMMU_CMDQV_CONFIG_CMDQV_EN)) {
status = NV_ERR_OBJECT_NOT_FOUND;
goto out;
}
// Allocate SMMU CMDQ pages for WAR
parent_gpu->smmu_war.smmu_cmdq = alloc_page(NV_UVM_GFP_FLAGS | __GFP_ZERO);
if (!parent_gpu->smmu_war.smmu_cmdq) {
status = NV_ERR_NO_MEMORY;
goto out;
}
// Initialise VINTF for the WAR
smmu_vintf_write32(smmu_cmdqv_base, SMMU_VINTF_CONFIG, SMMU_VINTF_CONFIG_ENABLE | SMMU_VINTF_CONFIG_HYP_OWN);
UVM_SPIN_WHILE(!(smmu_vintf_read32(smmu_cmdqv_base, SMMU_VINTF_STATUS) & SMMU_VINTF_STATUS_ENABLED), &spin);
// Allocate VCMDQ to VINTF
iowrite32((VINTF << SMMU_CMDQV_CMDQ_ALLOC_MAP_VIRT_INTF_INDX_SHIFT) | SMMU_CMDQV_CMDQ_ALLOC_MAP_ALLOC,
smmu_cmdqv_base + SMMU_CMDQV_CMDQ_ALLOC_MAP(VCMDQ));
BUILD_BUG_ON((SMMU_VCMDQ_CMDQ_BASE_LOG2SIZE + 3) > PAGE_SHIFT);
smmu_vcmdq_write64(smmu_cmdqv_base, SMMU_VCMDQ_CMDQ_BASE,
page_to_phys(parent_gpu->smmu_war.smmu_cmdq) | SMMU_VCMDQ_CMDQ_BASE_LOG2SIZE);
smmu_vcmdq_write32(smmu_cmdqv_base, SMMU_VCMDQ_CONS, 0);
smmu_vcmdq_write32(smmu_cmdqv_base, SMMU_VCMDQ_PROD, 0);
smmu_vcmdq_write32(smmu_cmdqv_base, SMMU_VCMDQ_CONFIG, SMMU_VCMDQ_CONFIG_ENABLE);
UVM_SPIN_WHILE(!(smmu_vcmdq_read32(smmu_cmdqv_base, SMMU_VCMDQ_STATUS) & SMMU_VCMDQ_STATUS_ENABLED), &spin);
uvm_mutex_init(&parent_gpu->smmu_war.smmu_lock, UVM_LOCK_ORDER_LEAF);
parent_gpu->smmu_war.smmu_prod = 0;
parent_gpu->smmu_war.smmu_cons = 0;
return NV_OK;
out:
iounmap(parent_gpu->smmu_war.smmu_cmdqv_base);
parent_gpu->smmu_war.smmu_cmdqv_base = NULL;
return status;
}
static void uvm_ats_smmu_war_deinit(uvm_parent_gpu_t *parent_gpu)
{
void __iomem *smmu_cmdqv_base = parent_gpu->smmu_war.smmu_cmdqv_base;
NvU32 cmdq_alloc_map;
if (parent_gpu->smmu_war.smmu_cmdqv_base) {
smmu_vcmdq_write32(smmu_cmdqv_base, SMMU_VCMDQ_CONFIG, 0);
cmdq_alloc_map = ioread32(smmu_cmdqv_base + SMMU_CMDQV_CMDQ_ALLOC_MAP(VCMDQ));
iowrite32(cmdq_alloc_map & SMMU_CMDQV_CMDQ_ALLOC_MAP_ALLOC, smmu_cmdqv_base + SMMU_CMDQV_CMDQ_ALLOC_MAP(VCMDQ));
smmu_vintf_write32(smmu_cmdqv_base, SMMU_VINTF_CONFIG, 0);
}
if (parent_gpu->smmu_war.smmu_cmdq)
__free_page(parent_gpu->smmu_war.smmu_cmdq);
if (parent_gpu->smmu_war.smmu_cmdqv_base)
iounmap(parent_gpu->smmu_war.smmu_cmdqv_base);
}
// The SMMU on ARM64 can run under different translation regimes depending on
// what features the OS and CPU variant support. The CPU for GH180 supports
// virtualisation extensions and starts the kernel at EL2 meaning SMMU operates
// under the NS-EL2-E2H translation regime. Therefore we need to use the
// TLBI_EL2_* commands which invalidate TLB entries created under this
// translation regime.
#define CMDQ_OP_TLBI_EL2_ASID 0x21;
#define CMDQ_OP_TLBI_EL2_VA 0x22;
#define CMDQ_OP_CMD_SYNC 0x46
// Use the same maximum as used for MAX_TLBI_OPS in the upstream
// kernel.
#define UVM_MAX_TLBI_OPS (1UL << (PAGE_SHIFT - 3))
#if UVM_ATS_SMMU_WAR_REQUIRED()
void uvm_ats_smmu_invalidate_tlbs(uvm_gpu_va_space_t *gpu_va_space, NvU64 addr, size_t size)
{
struct mm_struct *mm = gpu_va_space->va_space->va_space_mm.mm;
uvm_parent_gpu_t *parent_gpu = gpu_va_space->gpu->parent;
struct {
NvU64 low;
NvU64 high;
} *vcmdq;
unsigned long vcmdq_prod;
NvU64 end;
uvm_spin_loop_t spin;
NvU16 asid;
if (!parent_gpu->smmu_war.smmu_cmdqv_base)
return;
asid = arm64_mm_context_get(mm);
vcmdq = kmap(parent_gpu->smmu_war.smmu_cmdq);
uvm_mutex_lock(&parent_gpu->smmu_war.smmu_lock);
vcmdq_prod = parent_gpu->smmu_war.smmu_prod;
// Our queue management is very simple. The mutex prevents multiple
// producers writing to the queue and all our commands require waiting for
// the queue to drain so we know it's empty. If we can't fit enough commands
// in the queue we just invalidate the whole ASID.
//
// The command queue is a cirular buffer with the MSB representing a wrap
// bit that must toggle on each wrap. See the SMMU architecture
// specification for more details.
//
// SMMU_VCMDQ_CMDQ_ENTRIES - 1 because we need to leave space for the
// CMD_SYNC.
if ((size >> PAGE_SHIFT) > min(UVM_MAX_TLBI_OPS, SMMU_VCMDQ_CMDQ_ENTRIES - 1)) {
vcmdq[vcmdq_prod % SMMU_VCMDQ_CMDQ_ENTRIES].low = CMDQ_OP_TLBI_EL2_ASID;
vcmdq[vcmdq_prod % SMMU_VCMDQ_CMDQ_ENTRIES].low |= (NvU64) asid << 48;
vcmdq[vcmdq_prod % SMMU_VCMDQ_CMDQ_ENTRIES].high = 0;
vcmdq_prod++;
}
else {
for (end = addr + size; addr < end; addr += PAGE_SIZE) {
vcmdq[vcmdq_prod % SMMU_VCMDQ_CMDQ_ENTRIES].low = CMDQ_OP_TLBI_EL2_VA;
vcmdq[vcmdq_prod % SMMU_VCMDQ_CMDQ_ENTRIES].low |= (NvU64) asid << 48;
vcmdq[vcmdq_prod % SMMU_VCMDQ_CMDQ_ENTRIES].high = addr & ~((1UL << 12) - 1);
vcmdq_prod++;
}
}
vcmdq[vcmdq_prod % SMMU_VCMDQ_CMDQ_ENTRIES].low = CMDQ_OP_CMD_SYNC;
vcmdq[vcmdq_prod % SMMU_VCMDQ_CMDQ_ENTRIES].high = 0x0;
vcmdq_prod++;
// MSB is the wrap bit
vcmdq_prod &= (1UL << (SMMU_VCMDQ_CMDQ_BASE_LOG2SIZE + 1)) - 1;
parent_gpu->smmu_war.smmu_prod = vcmdq_prod;
smmu_vcmdq_write32(parent_gpu->smmu_war.smmu_cmdqv_base, SMMU_VCMDQ_PROD, parent_gpu->smmu_war.smmu_prod);
UVM_SPIN_WHILE(
(smmu_vcmdq_read32(parent_gpu->smmu_war.smmu_cmdqv_base, SMMU_VCMDQ_CONS) & GENMASK(19, 0)) != vcmdq_prod,
&spin);
uvm_mutex_unlock(&parent_gpu->smmu_war.smmu_lock);
kunmap(parent_gpu->smmu_war.smmu_cmdq);
arm64_mm_context_put(mm);
}
#endif
NV_STATUS uvm_ats_sva_add_gpu(uvm_parent_gpu_t *parent_gpu)
{
int ret;
ret = iommu_dev_enable_feature(&parent_gpu->pci_dev->dev, IOMMU_DEV_FEAT_SVA);
if (ret)
return errno_to_nv_status(ret);
return errno_to_nv_status(ret);
if (UVM_ATS_SMMU_WAR_REQUIRED())
return uvm_ats_smmu_war_init(parent_gpu);
else
return NV_OK;
}
void uvm_ats_sva_remove_gpu(uvm_parent_gpu_t *parent_gpu)
{
if (UVM_ATS_SMMU_WAR_REQUIRED())
uvm_ats_smmu_war_deinit(parent_gpu);
iommu_dev_disable_feature(&parent_gpu->pci_dev->dev, IOMMU_DEV_FEAT_SVA);
}

View File

@@ -32,23 +32,38 @@
// For ATS support on aarch64, arm_smmu_sva_bind() is needed for
// iommu_sva_bind_device() calls. Unfortunately, arm_smmu_sva_bind() is not
// conftest-able. We instead look for the presence of ioasid_get() or
// mm_pasid_set(). ioasid_get() was added in the same patch series as
// arm_smmu_sva_bind() and removed in v6.0. mm_pasid_set() was added in the
// mm_pasid_drop(). ioasid_get() was added in the same patch series as
// arm_smmu_sva_bind() and removed in v6.0. mm_pasid_drop() was added in the
// same patch as the removal of ioasid_get(). We assume the presence of
// arm_smmu_sva_bind() if ioasid_get(v5.11 - v5.17) or mm_pasid_set(v5.18+) is
// arm_smmu_sva_bind() if ioasid_get(v5.11 - v5.17) or mm_pasid_drop(v5.18+) is
// present.
//
// arm_smmu_sva_bind() was added with commit
// 32784a9562fb0518b12e9797ee2aec52214adf6f and ioasid_get() was added with
// commit cb4789b0d19ff231ce9f73376a023341300aed96 (11/23/2020). Commit
// 701fac40384f07197b106136012804c3cae0b3de (02/15/2022) removed ioasid_get()
// and added mm_pasid_set().
#if UVM_CAN_USE_MMU_NOTIFIERS() && (defined(NV_IOASID_GET_PRESENT) || defined(NV_MM_PASID_SET_PRESENT))
#define UVM_ATS_SVA_SUPPORTED() 1
// and added mm_pasid_drop().
#if UVM_CAN_USE_MMU_NOTIFIERS() && (defined(NV_IOASID_GET_PRESENT) || defined(NV_MM_PASID_DROP_PRESENT))
#if defined(CONFIG_IOMMU_SVA)
#define UVM_ATS_SVA_SUPPORTED() 1
#else
#define UVM_ATS_SVA_SUPPORTED() 0
#endif
#else
#define UVM_ATS_SVA_SUPPORTED() 0
#endif
// If NV_ARCH_INVALIDATE_SECONDARY_TLBS is defined it means the upstream fix is
// in place so no need for the WAR from Bug 4130089: [GH180][r535] WAR for
// kernel not issuing SMMU TLB invalidates on read-only
#if defined(NV_ARCH_INVALIDATE_SECONDARY_TLBS)
#define UVM_ATS_SMMU_WAR_REQUIRED() 0
#elif NVCPU_IS_AARCH64
#define UVM_ATS_SMMU_WAR_REQUIRED() 1
#else
#define UVM_ATS_SMMU_WAR_REQUIRED() 0
#endif
typedef struct
{
int placeholder;
@@ -77,6 +92,17 @@ typedef struct
// LOCKING: None
void uvm_ats_sva_unregister_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space);
// Fix for Bug 4130089: [GH180][r535] WAR for kernel not issuing SMMU
// TLB invalidates on read-only to read-write upgrades
#if UVM_ATS_SMMU_WAR_REQUIRED()
void uvm_ats_smmu_invalidate_tlbs(uvm_gpu_va_space_t *gpu_va_space, NvU64 addr, size_t size);
#else
static void uvm_ats_smmu_invalidate_tlbs(uvm_gpu_va_space_t *gpu_va_space, NvU64 addr, size_t size)
{
}
#endif
#else
static NV_STATUS uvm_ats_sva_add_gpu(uvm_parent_gpu_t *parent_gpu)
{
@@ -107,6 +133,11 @@ typedef struct
{
}
static void uvm_ats_smmu_invalidate_tlbs(uvm_gpu_va_space_t *gpu_va_space, NvU64 addr, size_t size)
{
}
#endif // UVM_ATS_SVA_SUPPORTED
#endif // __UVM_ATS_SVA_H__

View File

@@ -191,7 +191,7 @@ static NV_STATUS test_membar(uvm_gpu_t *gpu)
for (i = 0; i < REDUCTIONS; ++i) {
uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE);
gpu->parent->ce_hal->semaphore_reduction_inc(&push, host_mem_gpu_va, REDUCTIONS + 1);
gpu->parent->ce_hal->semaphore_reduction_inc(&push, host_mem_gpu_va, REDUCTIONS);
}
// Without a sys membar the channel tracking semaphore can and does complete
@@ -577,7 +577,7 @@ static NV_STATUS test_semaphore_reduction_inc(uvm_gpu_t *gpu)
for (i = 0; i < REDUCTIONS; i++) {
uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE);
gpu->parent->ce_hal->semaphore_reduction_inc(&push, gpu_va, i+1);
gpu->parent->ce_hal->semaphore_reduction_inc(&push, gpu_va, REDUCTIONS);
}
status = uvm_push_end_and_wait(&push);

View File

@@ -2683,7 +2683,7 @@ static void init_channel_manager_conf(uvm_channel_manager_t *manager)
// caches vidmem (and sysmem), we place GPFIFO and GPPUT on sysmem to avoid
// cache thrash. The memory access latency is reduced, despite the required
// access through the bus, because no cache coherence message is exchanged.
if (uvm_gpu_is_coherent(gpu->parent)) {
if (uvm_parent_gpu_is_coherent(gpu->parent)) {
manager->conf.gpfifo_loc = UVM_BUFFER_LOCATION_SYS;
// On GPUs with limited ESCHED addressing range, e.g., Volta on P9, RM

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2013-2021 NVIDIA Corporation
Copyright (c) 2013-2023 NVIDIA Corporation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
@@ -233,18 +233,6 @@ unsigned uvm_get_stale_thread_id(void)
return (unsigned)task_pid_vnr(current);
}
//
// A simple security rule for allowing access to UVM user space memory: if you
// are the same user as the owner of the memory, or if you are root, then you
// are granted access. The idea is to allow debuggers and profilers to work, but
// without opening up any security holes.
//
NvBool uvm_user_id_security_check(uid_t euidTarget)
{
return (NV_CURRENT_EUID() == euidTarget) ||
(UVM_ROOT_UID == euidTarget);
}
void on_uvm_test_fail(void)
{
(void)NULL;

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2013-2021 NVIDIA Corporation
Copyright (c) 2013-2023 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -282,9 +282,6 @@ static inline void kmem_cache_destroy_safe(struct kmem_cache **ppCache)
}
}
static const uid_t UVM_ROOT_UID = 0;
typedef struct
{
NvU64 start_time_ns;
@@ -335,7 +332,6 @@ NV_STATUS errno_to_nv_status(int errnoCode);
int nv_status_to_errno(NV_STATUS status);
unsigned uvm_get_stale_process_id(void);
unsigned uvm_get_stale_thread_id(void);
NvBool uvm_user_id_security_check(uid_t euidTarget);
extern int uvm_enable_builtin_tests;

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2021-2023 NVIDIA Corporation
Copyright (c) 2021 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -54,26 +54,23 @@ bool uvm_conf_computing_mode_is_hcc(const uvm_gpu_t *gpu)
return uvm_conf_computing_get_mode(gpu->parent) == UVM_GPU_CONF_COMPUTE_MODE_HCC;
}
void uvm_conf_computing_check_parent_gpu(const uvm_parent_gpu_t *parent)
NV_STATUS uvm_conf_computing_init_parent_gpu(const uvm_parent_gpu_t *parent)
{
uvm_gpu_t *first_gpu;
UvmGpuConfComputeMode cc, sys_cc;
uvm_gpu_t *first;
uvm_assert_mutex_locked(&g_uvm_global.global_lock);
// The Confidential Computing state of the GPU should match that of the
// system.
UVM_ASSERT(uvm_conf_computing_mode_enabled_parent(parent) == g_uvm_global.conf_computing_enabled);
// TODO: Bug 2844714: since we have no routine to traverse parent GPUs,
// find first child GPU and get its parent.
first_gpu = uvm_global_processor_mask_find_first_gpu(&g_uvm_global.retained_gpus);
if (first_gpu == NULL)
return;
first = uvm_global_processor_mask_find_first_gpu(&g_uvm_global.retained_gpus);
if (!first)
return NV_OK;
// All GPUs derive Confidential Computing status from their parent. By
// current policy all parent GPUs have identical Confidential Computing
// status.
UVM_ASSERT(uvm_conf_computing_get_mode(parent) == uvm_conf_computing_get_mode(first_gpu->parent));
sys_cc = uvm_conf_computing_get_mode(first->parent);
cc = uvm_conf_computing_get_mode(parent);
return cc == sys_cc ? NV_OK : NV_ERR_NOT_SUPPORTED;
}
static void dma_buffer_destroy_locked(uvm_conf_computing_dma_buffer_pool_t *dma_buffer_pool,

View File

@@ -60,8 +60,10 @@
// UVM_METHOD_SIZE * 2 * 10 = 80.
#define UVM_CONF_COMPUTING_SIGN_BUF_MAX_SIZE 80
void uvm_conf_computing_check_parent_gpu(const uvm_parent_gpu_t *parent);
// All GPUs derive confidential computing status from their parent.
// By current policy all parent GPUs have identical confidential
// computing status.
NV_STATUS uvm_conf_computing_init_parent_gpu(const uvm_parent_gpu_t *parent);
bool uvm_conf_computing_mode_enabled_parent(const uvm_parent_gpu_t *parent);
bool uvm_conf_computing_mode_enabled(const uvm_gpu_t *gpu);
bool uvm_conf_computing_mode_is_hcc(const uvm_gpu_t *gpu);

View File

@@ -71,6 +71,11 @@ static void uvm_unregister_callbacks(void)
}
}
static void sev_init(const UvmPlatformInfo *platform_info)
{
g_uvm_global.sev_enabled = platform_info->sevEnabled;
}
NV_STATUS uvm_global_init(void)
{
NV_STATUS status;
@@ -119,7 +124,8 @@ NV_STATUS uvm_global_init(void)
uvm_ats_init(&platform_info);
g_uvm_global.num_simulated_devices = 0;
g_uvm_global.conf_computing_enabled = platform_info.confComputingEnabled;
sev_init(&platform_info);
status = uvm_gpu_init();
if (status != NV_OK) {

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2015-2023 NVIDIA Corporation
Copyright (c) 2015-2021 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -143,16 +143,11 @@ struct uvm_global_struct
struct page *page;
} unload_state;
// True if the VM has AMD's SEV, or equivalent HW security extensions such
// as Intel's TDX, enabled. The flag is always false on the host.
//
// This value moves in tandem with that of Confidential Computing in the
// GPU(s) in all supported configurations, so it is used as a proxy for the
// Confidential Computing state.
//
// This field is set once during global initialization (uvm_global_init),
// and can be read afterwards without acquiring any locks.
bool conf_computing_enabled;
// AMD Secure Encrypted Virtualization (SEV) status. True if VM has SEV
// enabled. This field is set once during global initialization
// (uvm_global_init), and can be read afterwards without acquiring any
// locks.
bool sev_enabled;
};
// Initialize global uvm state
@@ -238,10 +233,8 @@ static uvm_gpu_t *uvm_gpu_get_by_processor_id(uvm_processor_id_t id)
return gpu;
}
static uvmGpuSessionHandle uvm_gpu_session_handle(uvm_gpu_t *gpu)
static uvmGpuSessionHandle uvm_global_session_handle(void)
{
if (gpu->parent->smc.enabled)
return gpu->smc.rm_session_handle;
return g_uvm_global.rm_session_handle;
}

View File

@@ -99,8 +99,8 @@ static void fill_gpu_info(uvm_parent_gpu_t *parent_gpu, const UvmGpuInfo *gpu_in
parent_gpu->system_bus.link_rate_mbyte_per_s = gpu_info->sysmemLinkRateMBps;
if (gpu_info->systemMemoryWindowSize > 0) {
// memory_window_end is inclusive but uvm_gpu_is_coherent() checks
// memory_window_end > memory_window_start as its condition.
// memory_window_end is inclusive but uvm_parent_gpu_is_coherent()
// checks memory_window_end > memory_window_start as its condition.
UVM_ASSERT(gpu_info->systemMemoryWindowSize > 1);
parent_gpu->system_bus.memory_window_start = gpu_info->systemMemoryWindowStart;
parent_gpu->system_bus.memory_window_end = gpu_info->systemMemoryWindowStart +
@@ -136,12 +136,12 @@ static NV_STATUS get_gpu_caps(uvm_gpu_t *gpu)
return status;
if (gpu_caps.numaEnabled) {
UVM_ASSERT(uvm_gpu_is_coherent(gpu->parent));
UVM_ASSERT(uvm_parent_gpu_is_coherent(gpu->parent));
gpu->mem_info.numa.enabled = true;
gpu->mem_info.numa.node_id = gpu_caps.numaNodeId;
}
else {
UVM_ASSERT(!uvm_gpu_is_coherent(gpu->parent));
UVM_ASSERT(!uvm_parent_gpu_is_coherent(gpu->parent));
}
return NV_OK;
@@ -1089,7 +1089,7 @@ static NV_STATUS init_parent_gpu(uvm_parent_gpu_t *parent_gpu,
{
NV_STATUS status;
status = uvm_rm_locked_call(nvUvmInterfaceDeviceCreate(g_uvm_global.rm_session_handle,
status = uvm_rm_locked_call(nvUvmInterfaceDeviceCreate(uvm_global_session_handle(),
gpu_info,
gpu_uuid,
&parent_gpu->rm_device,
@@ -1099,7 +1099,12 @@ static NV_STATUS init_parent_gpu(uvm_parent_gpu_t *parent_gpu,
return status;
}
uvm_conf_computing_check_parent_gpu(parent_gpu);
status = uvm_conf_computing_init_parent_gpu(parent_gpu);
if (status != NV_OK) {
UVM_ERR_PRINT("Confidential computing: %s, GPU %s\n",
nvstatusToString(status), parent_gpu->name);
return status;
}
parent_gpu->pci_dev = gpu_platform_info->pci_dev;
parent_gpu->closest_cpu_numa_node = dev_to_node(&parent_gpu->pci_dev->dev);
@@ -1161,19 +1166,8 @@ static NV_STATUS init_gpu(uvm_gpu_t *gpu, const UvmGpuInfo *gpu_info)
{
NV_STATUS status;
// Presently, an RM client can only subscribe to a single partition per
// GPU. Therefore, UVM needs to create several RM clients. For simplicity,
// and since P2P is not supported when SMC partitions are created, we
// create a client (session) per GPU partition.
if (gpu->parent->smc.enabled) {
UvmPlatformInfo platform_info;
status = uvm_rm_locked_call(nvUvmInterfaceSessionCreate(&gpu->smc.rm_session_handle, &platform_info));
if (status != NV_OK) {
UVM_ERR_PRINT("Creating RM session failed: %s\n", nvstatusToString(status));
return status;
}
status = uvm_rm_locked_call(nvUvmInterfaceDeviceCreate(uvm_gpu_session_handle(gpu),
status = uvm_rm_locked_call(nvUvmInterfaceDeviceCreate(uvm_global_session_handle(),
gpu_info,
uvm_gpu_uuid(gpu),
&gpu->smc.rm_device,
@@ -1543,9 +1537,6 @@ static void deinit_gpu(uvm_gpu_t *gpu)
if (gpu->parent->smc.enabled) {
if (gpu->smc.rm_device != 0)
uvm_rm_locked_call_void(nvUvmInterfaceDeviceDestroy(gpu->smc.rm_device));
if (gpu->smc.rm_session_handle != 0)
uvm_rm_locked_call_void(nvUvmInterfaceSessionDestroy(gpu->smc.rm_session_handle));
}
gpu->magic = 0;
@@ -2575,7 +2566,7 @@ static void disable_peer_access(uvm_gpu_t *gpu0, uvm_gpu_t *gpu1)
uvm_mmu_destroy_peer_identity_mappings(gpu0, gpu1);
uvm_mmu_destroy_peer_identity_mappings(gpu1, gpu0);
uvm_rm_locked_call_void(nvUvmInterfaceP2pObjectDestroy(uvm_gpu_session_handle(gpu0), p2p_handle));
uvm_rm_locked_call_void(nvUvmInterfaceP2pObjectDestroy(uvm_global_session_handle(), p2p_handle));
UVM_ASSERT(uvm_gpu_get(gpu0->global_id) == gpu0);
UVM_ASSERT(uvm_gpu_get(gpu1->global_id) == gpu1);
@@ -2701,9 +2692,9 @@ uvm_processor_id_t uvm_gpu_get_processor_id_by_address(uvm_gpu_t *gpu, uvm_gpu_p
return id;
}
uvm_gpu_peer_t *uvm_gpu_index_peer_caps(const uvm_gpu_id_t gpu_id1, const uvm_gpu_id_t gpu_id2)
uvm_gpu_peer_t *uvm_gpu_index_peer_caps(const uvm_gpu_id_t gpu_id0, const uvm_gpu_id_t gpu_id1)
{
NvU32 table_index = uvm_gpu_peer_table_index(gpu_id1, gpu_id2);
NvU32 table_index = uvm_gpu_peer_table_index(gpu_id0, gpu_id1);
return &g_uvm_global.peers[table_index];
}

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2015-2022 NVIDIA Corporation
Copyright (c) 2015-2023 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -167,7 +167,7 @@ struct uvm_service_block_context_struct
} per_processor_masks[UVM_ID_MAX_PROCESSORS];
// State used by the VA block routines called by the servicing routine
uvm_va_block_context_t block_context;
uvm_va_block_context_t *block_context;
// Prefetch state hint
uvm_perf_prefetch_hint_t prefetch_hint;
@@ -263,7 +263,10 @@ struct uvm_fault_service_batch_context_struct
NvU32 num_coalesced_faults;
bool has_fatal_faults;
// One of the VA spaces in this batch which had fatal faults. If NULL, no
// faults were fatal. More than one VA space could have fatal faults, but we
// pick one to be the target of the cancel sequence.
uvm_va_space_t *fatal_va_space;
bool has_throttled_faults;
@@ -825,8 +828,6 @@ struct uvm_gpu_struct
{
NvU32 swizz_id;
uvmGpuSessionHandle rm_session_handle;
// RM device handle used in many of the UVM/RM APIs.
//
// Do not read this field directly, use uvm_gpu_device_handle instead.
@@ -1162,6 +1163,16 @@ struct uvm_parent_gpu_struct
NvU64 memory_window_start;
NvU64 memory_window_end;
} system_bus;
// WAR to issue ATS TLB invalidation commands ourselves.
struct
{
uvm_mutex_t smmu_lock;
struct page *smmu_cmdq;
void __iomem *smmu_cmdqv_base;
unsigned long smmu_prod;
unsigned long smmu_cons;
} smmu_war;
};
static const char *uvm_gpu_name(uvm_gpu_t *gpu)
@@ -1336,7 +1347,7 @@ static NvU64 uvm_gpu_retained_count(uvm_gpu_t *gpu)
void uvm_parent_gpu_kref_put(uvm_parent_gpu_t *gpu);
// Calculates peer table index using GPU ids.
NvU32 uvm_gpu_peer_table_index(uvm_gpu_id_t gpu_id1, uvm_gpu_id_t gpu_id2);
NvU32 uvm_gpu_peer_table_index(const uvm_gpu_id_t gpu_id0, const uvm_gpu_id_t gpu_id1);
// Either retains an existing PCIe peer entry or creates a new one. In both
// cases the two GPUs are also each retained.
@@ -1355,7 +1366,7 @@ uvm_aperture_t uvm_gpu_peer_aperture(uvm_gpu_t *local_gpu, uvm_gpu_t *remote_gpu
uvm_processor_id_t uvm_gpu_get_processor_id_by_address(uvm_gpu_t *gpu, uvm_gpu_phys_address_t addr);
// Get the P2P capabilities between the gpus with the given indexes
uvm_gpu_peer_t *uvm_gpu_index_peer_caps(uvm_gpu_id_t gpu_id1, uvm_gpu_id_t gpu_id2);
uvm_gpu_peer_t *uvm_gpu_index_peer_caps(const uvm_gpu_id_t gpu_id0, const uvm_gpu_id_t gpu_id1);
// Get the P2P capabilities between the given gpus
static uvm_gpu_peer_t *uvm_gpu_peer_caps(const uvm_gpu_t *gpu0, const uvm_gpu_t *gpu1)
@@ -1363,10 +1374,10 @@ static uvm_gpu_peer_t *uvm_gpu_peer_caps(const uvm_gpu_t *gpu0, const uvm_gpu_t
return uvm_gpu_index_peer_caps(gpu0->id, gpu1->id);
}
static bool uvm_gpus_are_nvswitch_connected(uvm_gpu_t *gpu1, uvm_gpu_t *gpu2)
static bool uvm_gpus_are_nvswitch_connected(const uvm_gpu_t *gpu0, const uvm_gpu_t *gpu1)
{
if (gpu1->parent->nvswitch_info.is_nvswitch_connected && gpu2->parent->nvswitch_info.is_nvswitch_connected) {
UVM_ASSERT(uvm_gpu_peer_caps(gpu1, gpu2)->link_type >= UVM_GPU_LINK_NVLINK_2);
if (gpu0->parent->nvswitch_info.is_nvswitch_connected && gpu1->parent->nvswitch_info.is_nvswitch_connected) {
UVM_ASSERT(uvm_gpu_peer_caps(gpu0, gpu1)->link_type >= UVM_GPU_LINK_NVLINK_2);
return true;
}
@@ -1511,7 +1522,7 @@ bool uvm_gpu_can_address_kernel(uvm_gpu_t *gpu, NvU64 addr, NvU64 size);
// addresses.
NvU64 uvm_parent_gpu_canonical_address(uvm_parent_gpu_t *parent_gpu, NvU64 addr);
static bool uvm_gpu_is_coherent(const uvm_parent_gpu_t *parent_gpu)
static bool uvm_parent_gpu_is_coherent(const uvm_parent_gpu_t *parent_gpu)
{
return parent_gpu->system_bus.memory_window_end > parent_gpu->system_bus.memory_window_start;
}

View File

@@ -985,7 +985,7 @@ static NV_STATUS service_va_block_locked(uvm_processor_id_t processor,
return NV_OK;
if (uvm_processor_mask_test(&va_block->resident, processor))
residency_mask = uvm_va_block_resident_mask_get(va_block, processor);
residency_mask = uvm_va_block_resident_mask_get(va_block, processor, NUMA_NO_NODE);
else
residency_mask = NULL;
@@ -1036,8 +1036,8 @@ static NV_STATUS service_va_block_locked(uvm_processor_id_t processor,
// If the underlying VMA is gone, skip HMM migrations.
if (uvm_va_block_is_hmm(va_block)) {
status = uvm_hmm_find_vma(service_context->block_context.mm,
&service_context->block_context.hmm.vma,
status = uvm_hmm_find_vma(service_context->block_context->mm,
&service_context->block_context->hmm.vma,
address);
if (status == NV_ERR_INVALID_ADDRESS)
continue;
@@ -1048,7 +1048,7 @@ static NV_STATUS service_va_block_locked(uvm_processor_id_t processor,
policy = uvm_va_policy_get(va_block, address);
new_residency = uvm_va_block_select_residency(va_block,
&service_context->block_context,
service_context->block_context,
page_index,
processor,
uvm_fault_access_type_mask_bit(UVM_FAULT_ACCESS_TYPE_PREFETCH),
@@ -1083,7 +1083,7 @@ static NV_STATUS service_va_block_locked(uvm_processor_id_t processor,
// Remove pages that are already resident in the destination processors
for_each_id_in_mask(id, &update_processors) {
bool migrate_pages;
uvm_page_mask_t *residency_mask = uvm_va_block_resident_mask_get(va_block, id);
uvm_page_mask_t *residency_mask = uvm_va_block_resident_mask_get(va_block, id, NUMA_NO_NODE);
UVM_ASSERT(residency_mask);
migrate_pages = uvm_page_mask_andnot(&service_context->per_processor_masks[uvm_id_value(id)].new_residency,
@@ -1101,9 +1101,9 @@ static NV_STATUS service_va_block_locked(uvm_processor_id_t processor,
if (uvm_va_block_is_hmm(va_block)) {
status = NV_ERR_INVALID_ADDRESS;
if (service_context->block_context.mm) {
if (service_context->block_context->mm) {
status = uvm_hmm_find_policy_vma_and_outer(va_block,
&service_context->block_context.hmm.vma,
&service_context->block_context->hmm.vma,
first_page_index,
&policy,
&outer);
@@ -1206,7 +1206,7 @@ static NV_STATUS service_phys_single_va_block(uvm_gpu_t *gpu,
service_context->operation = UVM_SERVICE_OPERATION_ACCESS_COUNTERS;
service_context->num_retries = 0;
service_context->block_context.mm = mm;
service_context->block_context->mm = mm;
if (uvm_va_block_is_hmm(va_block)) {
uvm_hmm_service_context_init(service_context);

View File

@@ -292,6 +292,7 @@ NV_STATUS uvm_gpu_init_isr(uvm_parent_gpu_t *parent_gpu)
{
NV_STATUS status = NV_OK;
char kthread_name[TASK_COMM_LEN + 1];
uvm_va_block_context_t *block_context;
if (parent_gpu->replayable_faults_supported) {
status = uvm_gpu_fault_buffer_init(parent_gpu);
@@ -311,6 +312,12 @@ NV_STATUS uvm_gpu_init_isr(uvm_parent_gpu_t *parent_gpu)
if (!parent_gpu->isr.replayable_faults.stats.cpu_exec_count)
return NV_ERR_NO_MEMORY;
block_context = uvm_va_block_context_alloc(NULL);
if (!block_context)
return NV_ERR_NO_MEMORY;
parent_gpu->fault_buffer_info.replayable.block_service_context.block_context = block_context;
parent_gpu->isr.replayable_faults.handling = true;
snprintf(kthread_name, sizeof(kthread_name), "UVM GPU%u BH", uvm_id_value(parent_gpu->id));
@@ -333,6 +340,12 @@ NV_STATUS uvm_gpu_init_isr(uvm_parent_gpu_t *parent_gpu)
if (!parent_gpu->isr.non_replayable_faults.stats.cpu_exec_count)
return NV_ERR_NO_MEMORY;
block_context = uvm_va_block_context_alloc(NULL);
if (!block_context)
return NV_ERR_NO_MEMORY;
parent_gpu->fault_buffer_info.non_replayable.block_service_context.block_context = block_context;
parent_gpu->isr.non_replayable_faults.handling = true;
snprintf(kthread_name, sizeof(kthread_name), "UVM GPU%u KC", uvm_id_value(parent_gpu->id));
@@ -356,6 +369,13 @@ NV_STATUS uvm_gpu_init_isr(uvm_parent_gpu_t *parent_gpu)
return status;
}
block_context = uvm_va_block_context_alloc(NULL);
if (!block_context)
return NV_ERR_NO_MEMORY;
parent_gpu->access_counter_buffer_info.batch_service_context.block_service_context.block_context =
block_context;
nv_kthread_q_item_init(&parent_gpu->isr.access_counters.bottom_half_q_item,
access_counters_isr_bottom_half_entry,
parent_gpu);
@@ -410,6 +430,8 @@ void uvm_gpu_disable_isr(uvm_parent_gpu_t *parent_gpu)
void uvm_gpu_deinit_isr(uvm_parent_gpu_t *parent_gpu)
{
uvm_va_block_context_t *block_context;
// Return ownership to RM:
if (parent_gpu->isr.replayable_faults.was_handling) {
// No user threads could have anything left on
@@ -439,8 +461,18 @@ void uvm_gpu_deinit_isr(uvm_parent_gpu_t *parent_gpu)
// It is safe to deinitialize access counters even if they have not been
// successfully initialized.
uvm_gpu_deinit_access_counters(parent_gpu);
block_context =
parent_gpu->access_counter_buffer_info.batch_service_context.block_service_context.block_context;
uvm_va_block_context_free(block_context);
}
if (parent_gpu->non_replayable_faults_supported) {
block_context = parent_gpu->fault_buffer_info.non_replayable.block_service_context.block_context;
uvm_va_block_context_free(block_context);
}
block_context = parent_gpu->fault_buffer_info.replayable.block_service_context.block_context;
uvm_va_block_context_free(block_context);
uvm_kvfree(parent_gpu->isr.replayable_faults.stats.cpu_exec_count);
uvm_kvfree(parent_gpu->isr.non_replayable_faults.stats.cpu_exec_count);
uvm_kvfree(parent_gpu->isr.access_counters.stats.cpu_exec_count);

View File

@@ -370,7 +370,7 @@ static NV_STATUS service_managed_fault_in_block_locked(uvm_gpu_t *gpu,
// Check logical permissions
status = uvm_va_block_check_logical_permissions(va_block,
&service_context->block_context,
service_context->block_context,
gpu->id,
uvm_va_block_cpu_page_index(va_block,
fault_entry->fault_address),
@@ -393,7 +393,7 @@ static NV_STATUS service_managed_fault_in_block_locked(uvm_gpu_t *gpu,
// Compute new residency and update the masks
new_residency = uvm_va_block_select_residency(va_block,
&service_context->block_context,
service_context->block_context,
page_index,
gpu->id,
fault_entry->access_type_mask,
@@ -629,7 +629,7 @@ static NV_STATUS service_fault(uvm_gpu_t *gpu, uvm_fault_buffer_entry_t *fault_e
uvm_gpu_va_space_t *gpu_va_space;
uvm_non_replayable_fault_buffer_info_t *non_replayable_faults = &gpu->parent->fault_buffer_info.non_replayable;
uvm_va_block_context_t *va_block_context =
&gpu->parent->fault_buffer_info.non_replayable.block_service_context.block_context;
gpu->parent->fault_buffer_info.non_replayable.block_service_context.block_context;
status = uvm_gpu_fault_entry_to_va_space(gpu, fault_entry, &va_space);
if (status != NV_OK) {
@@ -655,7 +655,7 @@ static NV_STATUS service_fault(uvm_gpu_t *gpu, uvm_fault_buffer_entry_t *fault_e
// to remain valid until we release. If no mm is registered, we
// can only service managed faults, not ATS/HMM faults.
mm = uvm_va_space_mm_retain_lock(va_space);
va_block_context->mm = mm;
uvm_va_block_context_init(va_block_context, mm);
uvm_va_space_down_read(va_space);

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2015-2022 NVIDIA Corporation
Copyright (c) 2015-2023 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -697,9 +697,6 @@ static inline int cmp_access_type(uvm_fault_access_type_t a, uvm_fault_access_ty
typedef enum
{
// Fetch a batch of faults from the buffer.
FAULT_FETCH_MODE_BATCH_ALL,
// Fetch a batch of faults from the buffer. Stop at the first entry that is
// not ready yet
FAULT_FETCH_MODE_BATCH_READY,
@@ -857,9 +854,7 @@ static NV_STATUS fetch_fault_buffer_entries(uvm_gpu_t *gpu,
// written out of order
UVM_SPIN_WHILE(!gpu->parent->fault_buffer_hal->entry_is_valid(gpu->parent, get), &spin) {
// We have some entry to work on. Let's do the rest later.
if (fetch_mode != FAULT_FETCH_MODE_ALL &&
fetch_mode != FAULT_FETCH_MODE_BATCH_ALL &&
fault_index > 0)
if (fetch_mode == FAULT_FETCH_MODE_BATCH_READY && fault_index > 0)
goto done;
}
@@ -888,6 +883,7 @@ static NV_STATUS fetch_fault_buffer_entries(uvm_gpu_t *gpu,
current_entry->va_space = NULL;
current_entry->filtered = false;
current_entry->replayable.cancel_va_mode = UVM_FAULT_CANCEL_VA_MODE_ALL;
if (current_entry->fault_source.utlb_id > batch_context->max_utlb_id) {
UVM_ASSERT(current_entry->fault_source.utlb_id < replayable_faults->utlb_count);
@@ -1184,7 +1180,11 @@ static void mark_fault_fatal(uvm_fault_service_batch_context_t *batch_context,
fault_entry->replayable.cancel_va_mode = cancel_va_mode;
utlb->has_fatal_faults = true;
batch_context->has_fatal_faults = true;
if (!batch_context->fatal_va_space) {
UVM_ASSERT(fault_entry->va_space);
batch_context->fatal_va_space = fault_entry->va_space;
}
}
static void fault_entry_duplicate_flags(uvm_fault_service_batch_context_t *batch_context,
@@ -1234,7 +1234,7 @@ static uvm_fault_access_type_t check_fault_access_permissions(uvm_gpu_t *gpu,
UvmEventFatalReason fatal_reason;
uvm_fault_cancel_va_mode_t cancel_va_mode;
uvm_fault_access_type_t ret = UVM_FAULT_ACCESS_TYPE_COUNT;
uvm_va_block_context_t *va_block_context = &service_block_context->block_context;
uvm_va_block_context_t *va_block_context = service_block_context->block_context;
perm_status = uvm_va_block_check_logical_permissions(va_block,
va_block_context,
@@ -1349,7 +1349,7 @@ static NV_STATUS service_fault_batch_block_locked(uvm_gpu_t *gpu,
if (uvm_va_block_is_hmm(va_block)) {
policy = uvm_hmm_find_policy_end(va_block,
block_context->block_context.hmm.vma,
block_context->block_context->hmm.vma,
ordered_fault_cache[first_fault_index]->fault_address,
&end);
}
@@ -1378,7 +1378,10 @@ static NV_STATUS service_fault_batch_block_locked(uvm_gpu_t *gpu,
UVM_ASSERT(current_entry->fault_access_type ==
uvm_fault_access_type_mask_highest(current_entry->access_type_mask));
current_entry->is_fatal = false;
// Unserviceable faults were already skipped by the caller. There are no
// unserviceable fault types that could be in the same VA block as a
// serviceable fault.
UVM_ASSERT(!current_entry->is_fatal);
current_entry->is_throttled = false;
current_entry->is_invalid_prefetch = false;
@@ -1470,7 +1473,7 @@ static NV_STATUS service_fault_batch_block_locked(uvm_gpu_t *gpu,
// Compute new residency and update the masks
new_residency = uvm_va_block_select_residency(va_block,
&block_context->block_context,
block_context->block_context,
page_index,
gpu->id,
service_access_type_mask,
@@ -1512,8 +1515,8 @@ static NV_STATUS service_fault_batch_block_locked(uvm_gpu_t *gpu,
++block_context->num_retries;
if (status == NV_OK && batch_context->has_fatal_faults)
status = uvm_va_block_set_cancel(va_block, &block_context->block_context, gpu);
if (status == NV_OK && batch_context->fatal_va_space)
status = uvm_va_block_set_cancel(va_block, block_context->block_context, gpu);
return status;
}
@@ -1735,6 +1738,10 @@ static NV_STATUS service_fault_batch_ats_sub(uvm_gpu_va_space_t *gpu_va_space,
uvm_fault_access_type_t access_type = current_entry->fault_access_type;
bool is_duplicate = check_fault_entry_duplicate(current_entry, previous_entry);
// ATS faults can't be unserviceable, since unserviceable faults require
// GMMU PTEs.
UVM_ASSERT(!current_entry->is_fatal);
i++;
update_batch_and_notify_fault(gpu_va_space->gpu,
@@ -1857,7 +1864,7 @@ static NV_STATUS service_fault_batch_dispatch(uvm_va_space_t *va_space,
uvm_va_block_t *va_block;
uvm_gpu_t *gpu = gpu_va_space->gpu;
uvm_va_block_context_t *va_block_context =
&gpu->parent->fault_buffer_info.replayable.block_service_context.block_context;
gpu->parent->fault_buffer_info.replayable.block_service_context.block_context;
uvm_fault_buffer_entry_t *current_entry = batch_context->ordered_fault_cache[fault_index];
struct mm_struct *mm = va_block_context->mm;
NvU64 fault_address = current_entry->fault_address;
@@ -1934,14 +1941,198 @@ static NV_STATUS service_fault_batch_dispatch(uvm_va_space_t *va_space,
return status;
}
// Called when a fault in the batch has been marked fatal. Flush the buffer
// under the VA and mmap locks to remove any potential stale fatal faults, then
// service all new faults for just that VA space and cancel those which are
// fatal. Faults in other VA spaces are replayed when done and will be processed
// when normal fault servicing resumes.
static NV_STATUS service_fault_batch_for_cancel(uvm_gpu_t *gpu, uvm_fault_service_batch_context_t *batch_context)
{
NV_STATUS status = NV_OK;
NvU32 i;
uvm_va_space_t *va_space = batch_context->fatal_va_space;
uvm_gpu_va_space_t *gpu_va_space = NULL;
struct mm_struct *mm;
uvm_replayable_fault_buffer_info_t *replayable_faults = &gpu->parent->fault_buffer_info.replayable;
uvm_service_block_context_t *service_context = &gpu->parent->fault_buffer_info.replayable.block_service_context;
uvm_va_block_context_t *va_block_context = service_context->block_context;
UVM_ASSERT(gpu->parent->replayable_faults_supported);
UVM_ASSERT(va_space);
// Perform the flush and re-fetch while holding the mmap_lock and the
// VA space lock. This avoids stale faults because it prevents any vma
// modifications (mmap, munmap, mprotect) from happening between the time HW
// takes the fault and we cancel it.
mm = uvm_va_space_mm_retain_lock(va_space);
uvm_va_block_context_init(va_block_context, mm);
uvm_va_space_down_read(va_space);
// We saw fatal faults in this VA space before. Flush while holding
// mmap_lock to make sure those faults come back (aren't stale).
//
// We need to wait until all old fault messages have arrived before
// flushing, hence UVM_GPU_BUFFER_FLUSH_MODE_WAIT_UPDATE_PUT.
status = fault_buffer_flush_locked(gpu,
UVM_GPU_BUFFER_FLUSH_MODE_WAIT_UPDATE_PUT,
UVM_FAULT_REPLAY_TYPE_START,
batch_context);
if (status != NV_OK)
goto done;
// Wait for the flush's replay to finish to give the legitimate faults a
// chance to show up in the buffer again.
status = uvm_tracker_wait(&replayable_faults->replay_tracker);
if (status != NV_OK)
goto done;
// We expect all replayed faults to have arrived in the buffer so we can re-
// service them. The replay-and-wait sequence above will ensure they're all
// in the HW buffer. When GSP owns the HW buffer, we also have to wait for
// GSP to copy all available faults from the HW buffer into the shadow
// buffer.
//
// TODO: Bug 2533557: This flush does not actually guarantee that GSP will
// copy over all faults.
status = hw_fault_buffer_flush_locked(gpu->parent);
if (status != NV_OK)
goto done;
// If there is no GPU VA space for the GPU, ignore all faults in the VA
// space. This can happen if the GPU VA space has been destroyed since we
// unlocked the VA space in service_fault_batch. That means the fatal faults
// are stale, because unregistering the GPU VA space requires preempting the
// context and detaching all channels in that VA space. Restart fault
// servicing from the top.
gpu_va_space = uvm_gpu_va_space_get_by_parent_gpu(va_space, gpu->parent);
if (!gpu_va_space)
goto done;
// Re-parse the new faults
batch_context->num_invalid_prefetch_faults = 0;
batch_context->num_duplicate_faults = 0;
batch_context->num_replays = 0;
batch_context->fatal_va_space = NULL;
batch_context->has_throttled_faults = false;
status = fetch_fault_buffer_entries(gpu, batch_context, FAULT_FETCH_MODE_ALL);
if (status != NV_OK)
goto done;
// No more faults left. Either the previously-seen fatal entry was stale, or
// RM killed the context underneath us.
if (batch_context->num_cached_faults == 0)
goto done;
++batch_context->batch_id;
status = preprocess_fault_batch(gpu, batch_context);
if (status != NV_OK) {
if (status == NV_WARN_MORE_PROCESSING_REQUIRED) {
// Another flush happened due to stale faults or a context-fatal
// error. The previously-seen fatal fault might not exist anymore,
// so restart fault servicing from the top.
status = NV_OK;
}
goto done;
}
// Search for the target VA space
for (i = 0; i < batch_context->num_coalesced_faults; i++) {
uvm_fault_buffer_entry_t *current_entry = batch_context->ordered_fault_cache[i];
UVM_ASSERT(current_entry->va_space);
if (current_entry->va_space == va_space)
break;
}
while (i < batch_context->num_coalesced_faults) {
uvm_fault_buffer_entry_t *current_entry = batch_context->ordered_fault_cache[i];
if (current_entry->va_space != va_space)
break;
// service_fault_batch_dispatch() doesn't expect unserviceable faults.
// Just cancel them directly.
if (current_entry->is_fatal) {
status = cancel_fault_precise_va(gpu, current_entry, UVM_FAULT_CANCEL_VA_MODE_ALL);
if (status != NV_OK)
break;
++i;
}
else {
uvm_ats_fault_invalidate_t *ats_invalidate = &gpu->parent->fault_buffer_info.replayable.ats_invalidate;
NvU32 block_faults;
ats_invalidate->write_faults_in_batch = false;
uvm_hmm_service_context_init(service_context);
// Service all the faults that we can. We only really need to search
// for fatal faults, but attempting to service all is the easiest
// way to do that.
status = service_fault_batch_dispatch(va_space, gpu_va_space, batch_context, i, &block_faults, false);
if (status != NV_OK) {
// TODO: Bug 3900733: clean up locking in service_fault_batch().
// We need to drop lock and retry. That means flushing and
// starting over.
if (status == NV_WARN_MORE_PROCESSING_REQUIRED)
status = NV_OK;
break;
}
// Invalidate TLBs before cancel to ensure that fatal faults don't
// get stuck in HW behind non-fatal faults to the same line.
status = uvm_ats_invalidate_tlbs(gpu_va_space, ats_invalidate, &batch_context->tracker);
if (status != NV_OK)
break;
while (block_faults-- > 0) {
current_entry = batch_context->ordered_fault_cache[i];
if (current_entry->is_fatal) {
status = cancel_fault_precise_va(gpu, current_entry, current_entry->replayable.cancel_va_mode);
if (status != NV_OK)
break;
}
++i;
}
}
}
done:
uvm_va_space_up_read(va_space);
uvm_va_space_mm_release_unlock(va_space, mm);
if (status == NV_OK) {
// There are two reasons to flush the fault buffer here.
//
// 1) Functional. We need to replay both the serviced non-fatal faults
// and the skipped faults in other VA spaces. The former need to be
// restarted and the latter need to be replayed so the normal fault
// service mechanism can fetch and process them.
//
// 2) Performance. After cancelling the fatal faults, a flush removes
// any potential duplicated fault that may have been added while
// processing the faults in this batch. This flush also avoids doing
// unnecessary processing after the fatal faults have been cancelled,
// so all the rest are unlikely to remain after a replay because the
// context is probably in the process of dying.
status = fault_buffer_flush_locked(gpu,
UVM_GPU_BUFFER_FLUSH_MODE_UPDATE_PUT,
UVM_FAULT_REPLAY_TYPE_START,
batch_context);
}
return status;
}
// Scan the ordered view of faults and group them by different va_blocks
// (managed faults) and service faults for each va_block, in batch.
// Service non-managed faults one at a time as they are encountered during the
// scan.
//
// This function returns NV_WARN_MORE_PROCESSING_REQUIRED if the fault buffer
// was flushed because the needs_fault_buffer_flush flag was set on some GPU VA
// space
// Fatal faults are marked for later processing by the caller.
static NV_STATUS service_fault_batch(uvm_gpu_t *gpu,
fault_service_mode_t service_mode,
uvm_fault_service_batch_context_t *batch_context)
@@ -1956,7 +2147,7 @@ static NV_STATUS service_fault_batch(uvm_gpu_t *gpu,
gpu->parent->fault_buffer_info.replayable.replay_policy == UVM_PERF_FAULT_REPLAY_POLICY_BLOCK;
uvm_service_block_context_t *service_context =
&gpu->parent->fault_buffer_info.replayable.block_service_context;
uvm_va_block_context_t *va_block_context = &service_context->block_context;
uvm_va_block_context_t *va_block_context = service_context->block_context;
UVM_ASSERT(gpu->parent->replayable_faults_supported);
@@ -1992,41 +2183,28 @@ static NV_STATUS service_fault_batch(uvm_gpu_t *gpu,
// to remain valid until we release. If no mm is registered, we
// can only service managed faults, not ATS/HMM faults.
mm = uvm_va_space_mm_retain_lock(va_space);
va_block_context->mm = mm;
uvm_va_block_context_init(va_block_context, mm);
uvm_va_space_down_read(va_space);
gpu_va_space = uvm_gpu_va_space_get_by_parent_gpu(va_space, gpu->parent);
if (uvm_processor_mask_test_and_clear_atomic(&va_space->needs_fault_buffer_flush, gpu->id)) {
status = fault_buffer_flush_locked(gpu,
UVM_GPU_BUFFER_FLUSH_MODE_WAIT_UPDATE_PUT,
UVM_FAULT_REPLAY_TYPE_START,
batch_context);
if (status == NV_OK)
status = NV_WARN_MORE_PROCESSING_REQUIRED;
break;
}
// The case where there is no valid GPU VA space for the GPU in this
// VA space is handled next
}
// Some faults could be already fatal if they cannot be handled by
// the UVM driver
if (current_entry->is_fatal) {
++i;
batch_context->has_fatal_faults = true;
if (!batch_context->fatal_va_space)
batch_context->fatal_va_space = va_space;
utlb->has_fatal_faults = true;
UVM_ASSERT(utlb->num_pending_faults > 0);
continue;
}
if (!uvm_processor_mask_test(&va_space->registered_gpu_va_spaces, gpu->parent->id)) {
if (!gpu_va_space) {
// If there is no GPU VA space for the GPU, ignore the fault. This
// can happen if a GPU VA space is destroyed without explicitly
// freeing all memory ranges (destroying the VA range triggers a
// flush of the fault buffer) and there are stale entries in the
// freeing all memory ranges and there are stale entries in the
// buffer that got fixed by the servicing in a previous batch.
++i;
continue;
@@ -2044,15 +2222,17 @@ static NV_STATUS service_fault_batch(uvm_gpu_t *gpu,
uvm_va_space_mm_release_unlock(va_space, mm);
mm = NULL;
va_space = NULL;
status = NV_OK;
continue;
}
if (status != NV_OK)
goto fail;
i += block_faults;
// Don't issue replays in cancel mode
if (replay_per_va_block && !batch_context->has_fatal_faults) {
if (replay_per_va_block && !batch_context->fatal_va_space) {
status = push_replay_on_gpu(gpu, UVM_FAULT_REPLAY_TYPE_START, batch_context);
if (status != NV_OK)
goto fail;
@@ -2064,8 +2244,6 @@ static NV_STATUS service_fault_batch(uvm_gpu_t *gpu,
}
}
// Only clobber status if invalidate_status != NV_OK, since status may also
// contain NV_WARN_MORE_PROCESSING_REQUIRED.
if (va_space != NULL) {
NV_STATUS invalidate_status = uvm_ats_invalidate_tlbs(gpu_va_space, ats_invalidate, &batch_context->tracker);
if (invalidate_status != NV_OK)
@@ -2273,77 +2451,48 @@ static NvU32 is_fatal_fault_in_buffer(uvm_fault_service_batch_context_t *batch_c
return false;
}
typedef enum
{
// Only cancel faults flagged as fatal
FAULT_CANCEL_MODE_FATAL,
// Cancel all faults in the batch unconditionally
FAULT_CANCEL_MODE_ALL,
} fault_cancel_mode_t;
// Cancel faults in the given fault service batch context. The function provides
// two different modes depending on the value of cancel_mode:
// - If cancel_mode == FAULT_CANCEL_MODE_FATAL, only faults flagged as fatal
// will be cancelled. In this case, the reason reported to tools is the one
// contained in the fault entry itself.
// - If cancel_mode == FAULT_CANCEL_MODE_ALL, all faults will be cancelled
// unconditionally. In this case, the reason reported to tools for non-fatal
// faults is the one passed to this function.
static NV_STATUS cancel_faults_precise_va(uvm_gpu_t *gpu,
uvm_fault_service_batch_context_t *batch_context,
fault_cancel_mode_t cancel_mode,
UvmEventFatalReason reason)
// Cancel all faults in the given fault service batch context, even those not
// marked as fatal.
static NV_STATUS cancel_faults_all(uvm_gpu_t *gpu,
uvm_fault_service_batch_context_t *batch_context,
UvmEventFatalReason reason)
{
NV_STATUS status = NV_OK;
NV_STATUS fault_status;
uvm_va_space_t *va_space = NULL;
NvU32 i;
NvU32 i = 0;
UVM_ASSERT(gpu->parent->fault_cancel_va_supported);
if (cancel_mode == FAULT_CANCEL_MODE_ALL)
UVM_ASSERT(reason != UvmEventFatalReasonInvalid);
UVM_ASSERT(reason != UvmEventFatalReasonInvalid);
for (i = 0; i < batch_context->num_coalesced_faults; ++i) {
while (i < batch_context->num_coalesced_faults && status == NV_OK) {
uvm_fault_buffer_entry_t *current_entry = batch_context->ordered_fault_cache[i];
uvm_va_space_t *va_space = current_entry->va_space;
bool skip_va_space;
UVM_ASSERT(current_entry->va_space);
UVM_ASSERT(va_space);
if (current_entry->va_space != va_space) {
// Fault on a different va_space, drop the lock of the old one...
if (va_space != NULL)
uvm_va_space_up_read(va_space);
uvm_va_space_down_read(va_space);
va_space = current_entry->va_space;
// If there is no GPU VA space for the GPU, ignore all faults in
// that VA space. This can happen if the GPU VA space has been
// destroyed since we unlocked the VA space in service_fault_batch.
// Ignoring the fault avoids targetting a PDB that might have been
// reused by another process.
skip_va_space = !uvm_gpu_va_space_get_by_parent_gpu(va_space, gpu->parent);
// ... and take the lock of the new one
uvm_va_space_down_read(va_space);
for (;
i < batch_context->num_coalesced_faults && current_entry->va_space == va_space;
current_entry = batch_context->ordered_fault_cache[++i]) {
uvm_fault_cancel_va_mode_t cancel_va_mode;
// We don't need to check whether a buffer flush is required
// (due to VA range destruction).
// - For cancel_mode == FAULT_CANCEL_MODE_FATAL, once a fault is
// flagged as fatal we need to cancel it, even if its VA range no
// longer exists.
// - For cancel_mode == FAULT_CANCEL_MODE_ALL we don't care about
// any of this, we just want to trigger RC in RM.
}
if (skip_va_space)
continue;
if (!uvm_processor_mask_test(&va_space->registered_gpu_va_spaces, gpu->parent->id)) {
// If there is no GPU VA space for the GPU, ignore the fault.
// This can happen if the GPU VA did not exist in
// service_fault_batch(), or it was destroyed since then.
// This is to avoid targetting a PDB that might have been reused
// by another process.
continue;
}
// Cancel the fault
if (cancel_mode == FAULT_CANCEL_MODE_ALL || current_entry->is_fatal) {
uvm_fault_cancel_va_mode_t cancel_va_mode = current_entry->replayable.cancel_va_mode;
// If cancelling unconditionally and the fault was not fatal,
// set the cancel reason passed to this function
if (!current_entry->is_fatal) {
if (current_entry->is_fatal) {
UVM_ASSERT(current_entry->fatal_reason != UvmEventFatalReasonInvalid);
cancel_va_mode = current_entry->replayable.cancel_va_mode;
}
else {
current_entry->fatal_reason = reason;
cancel_va_mode = UVM_FAULT_CANCEL_VA_MODE_ALL;
}
@@ -2352,17 +2501,13 @@ static NV_STATUS cancel_faults_precise_va(uvm_gpu_t *gpu,
if (status != NV_OK)
break;
}
uvm_va_space_up_read(va_space);
}
if (va_space != NULL)
uvm_va_space_up_read(va_space);
// After cancelling the fatal faults, the fault buffer is flushed to remove
// any potential duplicated fault that may have been added while processing
// the faults in this batch. This flush also avoids doing unnecessary
// processing after the fatal faults have been cancelled, so all the rest
// are unlikely to remain after a replay because the context is probably in
// the process of dying.
// Because each cancel itself triggers a replay, there may be a large number
// of new duplicated faults in the buffer after cancelling all the known
// ones. Flushing the buffer discards them to avoid unnecessary processing.
fault_status = fault_buffer_flush_locked(gpu,
UVM_GPU_BUFFER_FLUSH_MODE_UPDATE_PUT,
UVM_FAULT_REPLAY_TYPE_START,
@@ -2410,12 +2555,12 @@ static void cancel_fault_batch(uvm_gpu_t *gpu,
uvm_fault_service_batch_context_t *batch_context,
UvmEventFatalReason reason)
{
if (gpu->parent->fault_cancel_va_supported) {
cancel_faults_precise_va(gpu, batch_context, FAULT_CANCEL_MODE_ALL, reason);
return;
}
cancel_fault_batch_tlb(gpu, batch_context, reason);
// Return code is ignored since we're on a global error path and wouldn't be
// able to recover anyway.
if (gpu->parent->fault_cancel_va_supported)
cancel_faults_all(gpu, batch_context, reason);
else
cancel_fault_batch_tlb(gpu, batch_context, reason);
}
@@ -2502,7 +2647,7 @@ static NV_STATUS cancel_faults_precise_tlb(uvm_gpu_t *gpu, uvm_fault_service_bat
batch_context->num_invalid_prefetch_faults = 0;
batch_context->num_replays = 0;
batch_context->has_fatal_faults = false;
batch_context->fatal_va_space = NULL;
batch_context->has_throttled_faults = false;
// 5) Fetch all faults from buffer
@@ -2549,9 +2694,6 @@ static NV_STATUS cancel_faults_precise_tlb(uvm_gpu_t *gpu, uvm_fault_service_bat
// 8) Service all non-fatal faults and mark all non-serviceable faults
// as fatal
status = service_fault_batch(gpu, FAULT_SERVICE_MODE_CANCEL, batch_context);
if (status == NV_WARN_MORE_PROCESSING_REQUIRED)
continue;
UVM_ASSERT(batch_context->num_replays == 0);
if (status == NV_ERR_NO_MEMORY)
continue;
@@ -2559,7 +2701,7 @@ static NV_STATUS cancel_faults_precise_tlb(uvm_gpu_t *gpu, uvm_fault_service_bat
break;
// No more fatal faults left, we are done
if (!batch_context->has_fatal_faults)
if (!batch_context->fatal_va_space)
break;
// 9) Search for uTLBs that contain fatal faults and meet the
@@ -2581,13 +2723,9 @@ static NV_STATUS cancel_faults_precise_tlb(uvm_gpu_t *gpu, uvm_fault_service_bat
static NV_STATUS cancel_faults_precise(uvm_gpu_t *gpu, uvm_fault_service_batch_context_t *batch_context)
{
UVM_ASSERT(batch_context->has_fatal_faults);
if (gpu->parent->fault_cancel_va_supported) {
return cancel_faults_precise_va(gpu,
batch_context,
FAULT_CANCEL_MODE_FATAL,
UvmEventFatalReasonInvalid);
}
UVM_ASSERT(batch_context->fatal_va_space);
if (gpu->parent->fault_cancel_va_supported)
return service_fault_batch_for_cancel(gpu, batch_context);
return cancel_faults_precise_tlb(gpu, batch_context);
}
@@ -2643,7 +2781,7 @@ void uvm_gpu_service_replayable_faults(uvm_gpu_t *gpu)
batch_context->num_invalid_prefetch_faults = 0;
batch_context->num_duplicate_faults = 0;
batch_context->num_replays = 0;
batch_context->has_fatal_faults = false;
batch_context->fatal_va_space = NULL;
batch_context->has_throttled_faults = false;
status = fetch_fault_buffer_entries(gpu, batch_context, FAULT_FETCH_MODE_BATCH_READY);
@@ -2671,9 +2809,6 @@ void uvm_gpu_service_replayable_faults(uvm_gpu_t *gpu)
// was flushed
num_replays += batch_context->num_replays;
if (status == NV_WARN_MORE_PROCESSING_REQUIRED)
continue;
enable_disable_prefetch_faults(gpu->parent, batch_context);
if (status != NV_OK) {
@@ -2687,10 +2822,17 @@ void uvm_gpu_service_replayable_faults(uvm_gpu_t *gpu)
break;
}
if (batch_context->has_fatal_faults) {
if (batch_context->fatal_va_space) {
status = uvm_tracker_wait(&batch_context->tracker);
if (status == NV_OK)
if (status == NV_OK) {
status = cancel_faults_precise(gpu, batch_context);
if (status == NV_OK) {
// Cancel handling should've issued at least one replay
UVM_ASSERT(batch_context->num_replays > 0);
++num_batches;
continue;
}
}
break;
}

View File

@@ -794,7 +794,7 @@ uvm_membar_t uvm_hal_downgrade_membar_type(uvm_gpu_t *gpu, bool is_local_vidmem)
// memory, including those from other processors like the CPU or peer GPUs,
// must come through this GPU's L2. In all current architectures, MEMBAR_GPU
// is sufficient to resolve ordering at the L2 level.
if (is_local_vidmem && !uvm_gpu_is_coherent(gpu->parent) && !uvm_downgrade_force_membar_sys)
if (is_local_vidmem && !uvm_parent_gpu_is_coherent(gpu->parent) && !uvm_downgrade_force_membar_sys)
return UVM_MEMBAR_GPU;
// If the mapped memory was remote, or if a coherence protocol can cache

View File

@@ -60,6 +60,8 @@ module_param(uvm_disable_hmm, bool, 0444);
#include "uvm_gpu.h"
#include "uvm_pmm_gpu.h"
#include "uvm_hal_types.h"
#include "uvm_push.h"
#include "uvm_hal.h"
#include "uvm_va_block_types.h"
#include "uvm_va_space_mm.h"
#include "uvm_va_space.h"
@@ -110,20 +112,7 @@ typedef struct
bool uvm_hmm_is_enabled_system_wide(void)
{
if (uvm_disable_hmm)
return false;
if (g_uvm_global.ats.enabled)
return false;
// Confidential Computing and HMM impose mutually exclusive constraints. In
// Confidential Computing the GPU can only access pages resident in vidmem,
// but in HMM pages may be required to be resident in sysmem: file backed
// VMAs, huge pages, etc.
if (g_uvm_global.conf_computing_enabled)
return false;
return uvm_va_space_mm_enabled_system();
return !uvm_disable_hmm && !g_uvm_global.ats.enabled && uvm_va_space_mm_enabled_system();
}
bool uvm_hmm_is_enabled(uvm_va_space_t *va_space)
@@ -140,6 +129,100 @@ static uvm_va_block_t *hmm_va_block_from_node(uvm_range_tree_node_t *node)
return container_of(node, uvm_va_block_t, hmm.node);
}
// Copies the contents of the source device-private page to the
// destination CPU page. This will invalidate mappings, so cannot be
// called while holding any va_block locks.
static NV_STATUS uvm_hmm_copy_devmem_page(struct page *dst_page, struct page *src_page, uvm_tracker_t *tracker)
{
uvm_gpu_phys_address_t src_addr;
uvm_gpu_phys_address_t dst_addr;
uvm_gpu_chunk_t *gpu_chunk;
NvU64 dma_addr;
uvm_push_t push;
NV_STATUS status = NV_OK;
uvm_gpu_t *gpu;
// Holding a reference on the device-private page ensures the gpu
// is already retained. This is because when a GPU is unregistered
// all device-private pages are migrated back to the CPU and freed
// before releasing the GPU. Therefore if we could get a reference
// to the page the GPU must be retained.
UVM_ASSERT(is_device_private_page(src_page) && page_count(src_page));
gpu_chunk = uvm_pmm_devmem_page_to_chunk(src_page);
gpu = uvm_gpu_chunk_get_gpu(gpu_chunk);
status = uvm_mmu_chunk_map(gpu_chunk);
if (status != NV_OK)
return status;
status = uvm_gpu_map_cpu_pages(gpu->parent, dst_page, PAGE_SIZE, &dma_addr);
if (status != NV_OK)
goto out_unmap_gpu;
dst_addr = uvm_gpu_phys_address(UVM_APERTURE_SYS, dma_addr);
src_addr = uvm_gpu_phys_address(UVM_APERTURE_VID, gpu_chunk->address);
status = uvm_push_begin_acquire(gpu->channel_manager,
UVM_CHANNEL_TYPE_GPU_TO_CPU,
tracker,
&push,
"Copy for remote process fault");
if (status != NV_OK)
goto out_unmap_cpu;
gpu->parent->ce_hal->memcopy(&push,
uvm_gpu_address_copy(gpu, dst_addr),
uvm_gpu_address_copy(gpu, src_addr),
PAGE_SIZE);
uvm_push_end(&push);
status = uvm_tracker_add_push_safe(tracker, &push);
out_unmap_cpu:
uvm_gpu_unmap_cpu_pages(gpu->parent, dma_addr, PAGE_SIZE);
out_unmap_gpu:
uvm_mmu_chunk_unmap(gpu_chunk, NULL);
return status;
}
static NV_STATUS uvm_hmm_pmm_gpu_evict_pfn(unsigned long pfn)
{
unsigned long src_pfn = 0;
unsigned long dst_pfn = 0;
struct page *dst_page;
NV_STATUS status = NV_OK;
int ret;
ret = migrate_device_range(&src_pfn, pfn, 1);
if (ret)
return errno_to_nv_status(ret);
if (src_pfn & MIGRATE_PFN_MIGRATE) {
uvm_tracker_t tracker = UVM_TRACKER_INIT();
dst_page = alloc_page(GFP_HIGHUSER_MOVABLE);
if (!dst_page) {
status = NV_ERR_NO_MEMORY;
goto out;
}
lock_page(dst_page);
if (WARN_ON(uvm_hmm_copy_devmem_page(dst_page, migrate_pfn_to_page(src_pfn), &tracker) != NV_OK))
memzero_page(dst_page, 0, PAGE_SIZE);
dst_pfn = migrate_pfn(page_to_pfn(dst_page));
migrate_device_pages(&src_pfn, &dst_pfn, 1);
uvm_tracker_wait_deinit(&tracker);
}
out:
migrate_device_finalize(&src_pfn, &dst_pfn, 1);
if (!(src_pfn & MIGRATE_PFN_MIGRATE))
status = NV_ERR_BUSY_RETRY;
return status;
}
void uvm_hmm_va_space_initialize(uvm_va_space_t *va_space)
{
uvm_hmm_va_space_t *hmm_va_space = &va_space->hmm;
@@ -199,6 +282,9 @@ void uvm_hmm_unregister_gpu(uvm_va_space_t *va_space, uvm_gpu_t *gpu, struct mm_
{
uvm_range_tree_node_t *node;
uvm_va_block_t *va_block;
struct range range = gpu->pmm.devmem.pagemap.range;
unsigned long pfn;
bool retry;
if (!uvm_hmm_is_enabled(va_space))
return;
@@ -207,6 +293,29 @@ void uvm_hmm_unregister_gpu(uvm_va_space_t *va_space, uvm_gpu_t *gpu, struct mm_
uvm_assert_mmap_lock_locked(mm);
uvm_assert_rwsem_locked_write(&va_space->lock);
// There could be pages with page->zone_device_data pointing to the va_space
// which may be about to be freed. Migrate those back to the CPU so we don't
// fault on them. Normally infinite retries are bad, but we don't have any
// option here. Device-private pages can't be pinned so migration should
// eventually succeed. Even if we did eventually bail out of the loop we'd
// just stall in memunmap_pages() anyway.
do {
retry = false;
for (pfn = __phys_to_pfn(range.start); pfn <= __phys_to_pfn(range.end); pfn++) {
struct page *page = pfn_to_page(pfn);
UVM_ASSERT(is_device_private_page(page));
// This check is racy because nothing stops the page being freed and
// even reused. That doesn't matter though - worst case the
// migration fails, we retry and find the va_space doesn't match.
if (page->zone_device_data == va_space)
if (uvm_hmm_pmm_gpu_evict_pfn(pfn) != NV_OK)
retry = true;
}
} while (retry);
uvm_range_tree_for_each(node, &va_space->hmm.blocks) {
va_block = hmm_va_block_from_node(node);
@@ -568,7 +677,7 @@ bool uvm_hmm_check_context_vma_is_valid(uvm_va_block_t *va_block,
void uvm_hmm_service_context_init(uvm_service_block_context_t *service_context)
{
// TODO: Bug 4050579: Remove this when swap cached pages can be migrated.
service_context->block_context.hmm.swap_cached = false;
service_context->block_context->hmm.swap_cached = false;
}
NV_STATUS uvm_hmm_migrate_begin(uvm_va_block_t *va_block)
@@ -631,47 +740,6 @@ static NV_STATUS hmm_migrate_range(uvm_va_block_t *va_block,
return status;
}
void uvm_hmm_evict_va_blocks(uvm_va_space_t *va_space)
{
// We can't use uvm_va_space_mm_retain(), because the va_space_mm
// should already be dead by now.
struct mm_struct *mm = va_space->va_space_mm.mm;
uvm_hmm_va_space_t *hmm_va_space = &va_space->hmm;
uvm_range_tree_node_t *node, *next;
uvm_va_block_t *va_block;
uvm_va_block_context_t *block_context;
uvm_down_read_mmap_lock(mm);
uvm_va_space_down_write(va_space);
uvm_range_tree_for_each_safe(node, next, &hmm_va_space->blocks) {
uvm_va_block_region_t region;
struct vm_area_struct *vma;
va_block = hmm_va_block_from_node(node);
block_context = uvm_va_space_block_context(va_space, mm);
uvm_hmm_migrate_begin_wait(va_block);
uvm_mutex_lock(&va_block->lock);
for_each_va_block_vma_region(va_block, mm, vma, &region) {
if (!uvm_hmm_vma_is_valid(vma, vma->vm_start, false))
continue;
block_context->hmm.vma = vma;
uvm_hmm_va_block_migrate_locked(va_block,
NULL,
block_context,
UVM_ID_CPU,
region,
UVM_MAKE_RESIDENT_CAUSE_API_MIGRATE);
}
uvm_mutex_unlock(&va_block->lock);
uvm_hmm_migrate_finish(va_block);
}
uvm_va_space_up_write(va_space);
uvm_up_read_mmap_lock(mm);
}
NV_STATUS uvm_hmm_test_va_block_inject_split_error(uvm_va_space_t *va_space, NvU64 addr)
{
uvm_va_block_test_t *block_test;
@@ -1476,40 +1544,59 @@ static NV_STATUS hmm_va_block_cpu_page_populate(uvm_va_block_t *va_block,
return status;
}
status = uvm_va_block_map_cpu_chunk_on_gpus(va_block, page_index);
status = uvm_va_block_map_cpu_chunk_on_gpus(va_block, chunk, page_index);
if (status != NV_OK) {
uvm_cpu_chunk_remove_from_block(va_block, page_index);
uvm_cpu_chunk_remove_from_block(va_block, page_to_nid(page), page_index);
uvm_cpu_chunk_free(chunk);
}
return status;
}
static void hmm_va_block_cpu_page_unpopulate(uvm_va_block_t *va_block,
uvm_page_index_t page_index)
static void hmm_va_block_cpu_unpopulate_chunk(uvm_va_block_t *va_block,
uvm_cpu_chunk_t *chunk,
int chunk_nid,
uvm_page_index_t page_index)
{
uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_get_chunk_for_page(va_block, page_index);
UVM_ASSERT(uvm_va_block_is_hmm(va_block));
if (!chunk)
return;
UVM_ASSERT(!uvm_processor_mask_test(&va_block->resident, UVM_ID_CPU) ||
!uvm_page_mask_test(&va_block->cpu.resident, page_index));
!uvm_va_block_cpu_is_page_resident_on(va_block, NUMA_NO_NODE, page_index));
UVM_ASSERT(uvm_cpu_chunk_get_size(chunk) == PAGE_SIZE);
uvm_cpu_chunk_remove_from_block(va_block, page_index);
uvm_cpu_chunk_remove_from_block(va_block, chunk_nid, page_index);
uvm_va_block_unmap_cpu_chunk_on_gpus(va_block, chunk, page_index);
uvm_cpu_chunk_free(chunk);
}
static void hmm_va_block_cpu_page_unpopulate(uvm_va_block_t *va_block, uvm_page_index_t page_index, struct page *page)
{
uvm_cpu_chunk_t *chunk;
UVM_ASSERT(uvm_va_block_is_hmm(va_block));
if (page) {
chunk = uvm_cpu_chunk_get_chunk_for_page(va_block, page_to_nid(page), page_index);
hmm_va_block_cpu_unpopulate_chunk(va_block, chunk, page_to_nid(page), page_index);
}
else {
int nid;
for_each_possible_uvm_node(nid) {
chunk = uvm_cpu_chunk_get_chunk_for_page(va_block, nid, page_index);
hmm_va_block_cpu_unpopulate_chunk(va_block, chunk, nid, page_index);
}
}
}
static bool hmm_va_block_cpu_page_is_same(uvm_va_block_t *va_block,
uvm_page_index_t page_index,
struct page *page)
{
struct page *old_page = uvm_cpu_chunk_get_cpu_page(va_block, page_index);
struct page *old_page = uvm_va_block_get_cpu_page(va_block, page_index);
UVM_ASSERT(uvm_cpu_chunk_is_hmm(uvm_cpu_chunk_get_chunk_for_page(va_block, page_index)));
UVM_ASSERT(uvm_cpu_chunk_is_hmm(uvm_cpu_chunk_get_chunk_for_page(va_block, page_to_nid(page), page_index)));
return old_page == page;
}
@@ -1522,7 +1609,7 @@ static void clear_service_context_masks(uvm_service_block_context_t *service_con
uvm_processor_id_t new_residency,
uvm_page_index_t page_index)
{
uvm_page_mask_clear(&service_context->block_context.caller_page_mask, page_index);
uvm_page_mask_clear(&service_context->block_context->caller_page_mask, page_index);
uvm_page_mask_clear(&service_context->per_processor_masks[uvm_id_value(new_residency)].new_residency,
page_index);
@@ -1549,7 +1636,6 @@ static void cpu_mapping_set(uvm_va_block_t *va_block,
uvm_page_index_t page_index)
{
uvm_processor_mask_set(&va_block->mapped, UVM_ID_CPU);
uvm_page_mask_set(&va_block->maybe_mapped_pages, page_index);
uvm_page_mask_set(&va_block->cpu.pte_bits[UVM_PTE_BITS_CPU_READ], page_index);
if (is_write)
uvm_page_mask_set(&va_block->cpu.pte_bits[UVM_PTE_BITS_CPU_WRITE], page_index);
@@ -1699,7 +1785,7 @@ static NV_STATUS sync_page_and_chunk_state(uvm_va_block_t *va_block,
// migrate_vma_finalize() will release the reference so we should
// clear our pointer to it.
// TODO: Bug 3660922: Need to handle read duplication at some point.
hmm_va_block_cpu_page_unpopulate(va_block, page_index);
hmm_va_block_cpu_page_unpopulate(va_block, page_index, page);
}
}
@@ -1725,7 +1811,7 @@ static void clean_up_non_migrating_page(uvm_va_block_t *va_block,
else {
UVM_ASSERT(page_ref_count(dst_page) == 1);
hmm_va_block_cpu_page_unpopulate(va_block, page_index);
hmm_va_block_cpu_page_unpopulate(va_block, page_index, dst_page);
}
unlock_page(dst_page);
@@ -1760,7 +1846,7 @@ static void lock_block_cpu_page(uvm_va_block_t *va_block,
unsigned long *dst_pfns,
uvm_page_mask_t *same_devmem_page_mask)
{
uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_get_chunk_for_page(va_block, page_index);
uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_get_chunk_for_page(va_block, page_to_nid(src_page), page_index);
uvm_va_block_region_t chunk_region;
struct page *dst_page;
@@ -1786,7 +1872,7 @@ static void lock_block_cpu_page(uvm_va_block_t *va_block,
// hmm_va_block_cpu_page_unpopulate() or block_kill(). If the page
// does not migrate, it will be freed though.
UVM_ASSERT(!uvm_processor_mask_test(&va_block->resident, UVM_ID_CPU) ||
!uvm_page_mask_test(&va_block->cpu.resident, page_index));
!uvm_va_block_cpu_is_page_resident_on(va_block, NUMA_NO_NODE, page_index));
UVM_ASSERT(chunk->type == UVM_CPU_CHUNK_TYPE_PHYSICAL);
UVM_ASSERT(page_ref_count(dst_page) == 1);
uvm_cpu_chunk_make_hmm(chunk);
@@ -1934,7 +2020,7 @@ static NV_STATUS alloc_and_copy_to_cpu(uvm_va_block_t *va_block,
}
UVM_ASSERT(!uvm_processor_mask_test(&va_block->resident, UVM_ID_CPU) ||
!uvm_page_mask_test(&va_block->cpu.resident, page_index));
!uvm_va_block_cpu_is_page_resident_on(va_block, NUMA_NO_NODE, page_index));
// Allocate a user system memory page for the destination.
// This is the typical case since Linux will free the source page when
@@ -2012,8 +2098,8 @@ static NV_STATUS uvm_hmm_devmem_fault_alloc_and_copy(uvm_hmm_devmem_fault_contex
service_context = devmem_fault_context->service_context;
va_block_retry = devmem_fault_context->va_block_retry;
va_block = devmem_fault_context->va_block;
src_pfns = service_context->block_context.hmm.src_pfns;
dst_pfns = service_context->block_context.hmm.dst_pfns;
src_pfns = service_context->block_context->hmm.src_pfns;
dst_pfns = service_context->block_context->hmm.dst_pfns;
// Build the migration page mask.
// Note that thrashing pinned pages and prefetch pages are already
@@ -2022,7 +2108,7 @@ static NV_STATUS uvm_hmm_devmem_fault_alloc_and_copy(uvm_hmm_devmem_fault_contex
uvm_page_mask_copy(page_mask, &service_context->per_processor_masks[UVM_ID_CPU_VALUE].new_residency);
status = alloc_and_copy_to_cpu(va_block,
service_context->block_context.hmm.vma,
service_context->block_context->hmm.vma,
src_pfns,
dst_pfns,
service_context->region,
@@ -2057,8 +2143,8 @@ static NV_STATUS uvm_hmm_devmem_fault_finalize_and_map(uvm_hmm_devmem_fault_cont
prefetch_hint = &service_context->prefetch_hint;
va_block = devmem_fault_context->va_block;
va_block_retry = devmem_fault_context->va_block_retry;
src_pfns = service_context->block_context.hmm.src_pfns;
dst_pfns = service_context->block_context.hmm.dst_pfns;
src_pfns = service_context->block_context->hmm.src_pfns;
dst_pfns = service_context->block_context->hmm.dst_pfns;
region = service_context->region;
page_mask = &devmem_fault_context->page_mask;
@@ -2165,8 +2251,7 @@ static NV_STATUS populate_region(uvm_va_block_t *va_block,
// Since we have a stable snapshot of the CPU pages, we can
// update the residency and protection information.
uvm_processor_mask_set(&va_block->resident, UVM_ID_CPU);
uvm_page_mask_set(&va_block->cpu.resident, page_index);
uvm_va_block_cpu_set_resident_page(va_block, page_to_nid(page), page_index);
cpu_mapping_set(va_block, pfns[page_index] & HMM_PFN_WRITE, page_index);
}
@@ -2253,7 +2338,7 @@ static void hmm_release_atomic_pages(uvm_va_block_t *va_block,
uvm_page_index_t page_index;
for_each_va_block_page_in_region(page_index, region) {
struct page *page = service_context->block_context.hmm.pages[page_index];
struct page *page = service_context->block_context->hmm.pages[page_index];
if (!page)
continue;
@@ -2269,14 +2354,14 @@ static NV_STATUS hmm_block_atomic_fault_locked(uvm_processor_id_t processor_id,
uvm_service_block_context_t *service_context)
{
uvm_va_block_region_t region = service_context->region;
struct page **pages = service_context->block_context.hmm.pages;
struct page **pages = service_context->block_context->hmm.pages;
int npages;
uvm_page_index_t page_index;
uvm_make_resident_cause_t cause;
NV_STATUS status;
if (!uvm_processor_mask_test(&va_block->resident, UVM_ID_CPU) ||
!uvm_page_mask_region_full(&va_block->cpu.resident, region)) {
!uvm_va_block_cpu_is_region_resident_on(va_block, NUMA_NO_NODE, region)) {
// There is an atomic GPU fault. We need to make sure no pages are
// GPU resident so that make_device_exclusive_range() doesn't call
// migrate_to_ram() and cause a va_space lock recursion problem.
@@ -2289,7 +2374,7 @@ static NV_STATUS hmm_block_atomic_fault_locked(uvm_processor_id_t processor_id,
status = uvm_hmm_va_block_migrate_locked(va_block,
va_block_retry,
&service_context->block_context,
service_context->block_context,
UVM_ID_CPU,
region,
cause);
@@ -2299,7 +2384,7 @@ static NV_STATUS hmm_block_atomic_fault_locked(uvm_processor_id_t processor_id,
// make_device_exclusive_range() will try to call migrate_to_ram()
// and deadlock with ourself if the data isn't CPU resident.
if (!uvm_processor_mask_test(&va_block->resident, UVM_ID_CPU) ||
!uvm_page_mask_region_full(&va_block->cpu.resident, region)) {
!uvm_va_block_cpu_is_region_resident_on(va_block, NUMA_NO_NODE, region)) {
status = NV_WARN_MORE_PROCESSING_REQUIRED;
goto done;
}
@@ -2309,7 +2394,7 @@ static NV_STATUS hmm_block_atomic_fault_locked(uvm_processor_id_t processor_id,
// mmap() files so we check for that here and report a fatal fault.
// Otherwise with the current Linux 6.1 make_device_exclusive_range(),
// it doesn't make the page exclusive and we end up in an endless loop.
if (service_context->block_context.hmm.vma->vm_flags & VM_SHARED) {
if (service_context->block_context->hmm.vma->vm_flags & (VM_SHARED | VM_HUGETLB)) {
status = NV_ERR_NOT_SUPPORTED;
goto done;
}
@@ -2318,7 +2403,7 @@ static NV_STATUS hmm_block_atomic_fault_locked(uvm_processor_id_t processor_id,
uvm_mutex_unlock(&va_block->lock);
npages = make_device_exclusive_range(service_context->block_context.mm,
npages = make_device_exclusive_range(service_context->block_context->mm,
uvm_va_block_cpu_page_address(va_block, region.first),
uvm_va_block_cpu_page_address(va_block, region.outer - 1) + PAGE_SIZE,
pages + region.first,
@@ -2356,15 +2441,13 @@ static NV_STATUS hmm_block_atomic_fault_locked(uvm_processor_id_t processor_id,
if (uvm_page_mask_test(&va_block->cpu.allocated, page_index)) {
UVM_ASSERT(hmm_va_block_cpu_page_is_same(va_block, page_index, page));
UVM_ASSERT(uvm_processor_mask_test(&va_block->resident, UVM_ID_CPU));
UVM_ASSERT(uvm_page_mask_test(&va_block->cpu.resident, page_index));
UVM_ASSERT(uvm_va_block_cpu_is_page_resident_on(va_block, NUMA_NO_NODE, page_index));
}
else {
NV_STATUS s = hmm_va_block_cpu_page_populate(va_block, page_index, page);
if (s == NV_OK) {
uvm_processor_mask_set(&va_block->resident, UVM_ID_CPU);
uvm_page_mask_set(&va_block->cpu.resident, page_index);
}
if (s == NV_OK)
uvm_va_block_cpu_set_resident_page(va_block, page_to_nid(page), page_index);
}
cpu_mapping_clear(va_block, page_index);
@@ -2419,7 +2502,7 @@ static NV_STATUS hmm_block_cpu_fault_locked(uvm_processor_id_t processor_id,
uvm_service_block_context_t *service_context)
{
uvm_va_block_region_t region = service_context->region;
struct migrate_vma *args = &service_context->block_context.hmm.migrate_vma_args;
struct migrate_vma *args = &service_context->block_context->hmm.migrate_vma_args;
NV_STATUS status;
int ret;
uvm_hmm_devmem_fault_context_t fault_context = {
@@ -2453,8 +2536,8 @@ static NV_STATUS hmm_block_cpu_fault_locked(uvm_processor_id_t processor_id,
}
status = hmm_make_resident_cpu(va_block,
service_context->block_context.hmm.vma,
service_context->block_context.hmm.src_pfns,
service_context->block_context->hmm.vma,
service_context->block_context->hmm.src_pfns,
region,
service_context->access_type,
&fault_context.same_devmem_page_mask);
@@ -2476,9 +2559,9 @@ static NV_STATUS hmm_block_cpu_fault_locked(uvm_processor_id_t processor_id,
}
}
args->vma = service_context->block_context.hmm.vma;
args->src = service_context->block_context.hmm.src_pfns + region.first;
args->dst = service_context->block_context.hmm.dst_pfns + region.first;
args->vma = service_context->block_context->hmm.vma;
args->src = service_context->block_context->hmm.src_pfns + region.first;
args->dst = service_context->block_context->hmm.dst_pfns + region.first;
args->start = uvm_va_block_region_start(va_block, region);
args->end = uvm_va_block_region_end(va_block, region) + 1;
args->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
@@ -2558,7 +2641,7 @@ static NV_STATUS dmamap_src_sysmem_pages(uvm_va_block_t *va_block,
// TODO: Bug 4050579: Remove this when swap cached pages can be
// migrated.
if (service_context) {
service_context->block_context.hmm.swap_cached = true;
service_context->block_context->hmm.swap_cached = true;
break;
}
@@ -2574,7 +2657,7 @@ static NV_STATUS dmamap_src_sysmem_pages(uvm_va_block_t *va_block,
if (uvm_page_mask_test(&va_block->cpu.allocated, page_index)) {
UVM_ASSERT(hmm_va_block_cpu_page_is_same(va_block, page_index, src_page));
UVM_ASSERT(uvm_processor_mask_test(&va_block->resident, UVM_ID_CPU));
UVM_ASSERT(uvm_page_mask_test(&va_block->cpu.resident, page_index));
UVM_ASSERT(uvm_va_block_cpu_is_page_resident_on(va_block, NUMA_NO_NODE, page_index));
}
else {
status = hmm_va_block_cpu_page_populate(va_block, page_index, src_page);
@@ -2588,8 +2671,7 @@ static NV_STATUS dmamap_src_sysmem_pages(uvm_va_block_t *va_block,
// migrate_vma_setup() was able to isolate and lock the page;
// therefore, it is CPU resident and not mapped.
uvm_processor_mask_set(&va_block->resident, UVM_ID_CPU);
uvm_page_mask_set(&va_block->cpu.resident, page_index);
uvm_va_block_cpu_set_resident_page(va_block, page_to_nid(src_page), page_index);
}
// The call to migrate_vma_setup() will have inserted a migration
@@ -2604,7 +2686,7 @@ static NV_STATUS dmamap_src_sysmem_pages(uvm_va_block_t *va_block,
if (uvm_page_mask_test(&va_block->cpu.allocated, page_index)) {
UVM_ASSERT(!uvm_va_block_page_resident_processors_count(va_block, page_index));
hmm_va_block_cpu_page_unpopulate(va_block, page_index);
hmm_va_block_cpu_page_unpopulate(va_block, page_index, NULL);
}
}
@@ -2618,7 +2700,7 @@ static NV_STATUS dmamap_src_sysmem_pages(uvm_va_block_t *va_block,
}
if (uvm_page_mask_empty(page_mask) ||
(service_context && service_context->block_context.hmm.swap_cached))
(service_context && service_context->block_context->hmm.swap_cached))
status = NV_WARN_MORE_PROCESSING_REQUIRED;
if (status != NV_OK)
@@ -2649,8 +2731,8 @@ static NV_STATUS uvm_hmm_gpu_fault_alloc_and_copy(struct vm_area_struct *vma,
service_context = uvm_hmm_gpu_fault_event->service_context;
region = service_context->region;
prefetch_hint = &service_context->prefetch_hint;
src_pfns = service_context->block_context.hmm.src_pfns;
dst_pfns = service_context->block_context.hmm.dst_pfns;
src_pfns = service_context->block_context->hmm.src_pfns;
dst_pfns = service_context->block_context->hmm.dst_pfns;
// Build the migration mask.
// Note that thrashing pinned pages are already accounted for in
@@ -2708,8 +2790,8 @@ static NV_STATUS uvm_hmm_gpu_fault_finalize_and_map(uvm_hmm_gpu_fault_event_t *u
va_block = uvm_hmm_gpu_fault_event->va_block;
va_block_retry = uvm_hmm_gpu_fault_event->va_block_retry;
service_context = uvm_hmm_gpu_fault_event->service_context;
src_pfns = service_context->block_context.hmm.src_pfns;
dst_pfns = service_context->block_context.hmm.dst_pfns;
src_pfns = service_context->block_context->hmm.src_pfns;
dst_pfns = service_context->block_context->hmm.dst_pfns;
region = service_context->region;
page_mask = &uvm_hmm_gpu_fault_event->page_mask;
@@ -2752,11 +2834,11 @@ NV_STATUS uvm_hmm_va_block_service_locked(uvm_processor_id_t processor_id,
uvm_va_block_retry_t *va_block_retry,
uvm_service_block_context_t *service_context)
{
struct mm_struct *mm = service_context->block_context.mm;
struct vm_area_struct *vma = service_context->block_context.hmm.vma;
struct mm_struct *mm = service_context->block_context->mm;
struct vm_area_struct *vma = service_context->block_context->hmm.vma;
uvm_va_block_region_t region = service_context->region;
uvm_hmm_gpu_fault_event_t uvm_hmm_gpu_fault_event;
struct migrate_vma *args = &service_context->block_context.hmm.migrate_vma_args;
struct migrate_vma *args = &service_context->block_context->hmm.migrate_vma_args;
int ret;
NV_STATUS status = NV_ERR_INVALID_ADDRESS;
@@ -2780,8 +2862,8 @@ NV_STATUS uvm_hmm_va_block_service_locked(uvm_processor_id_t processor_id,
uvm_hmm_gpu_fault_event.service_context = service_context;
args->vma = vma;
args->src = service_context->block_context.hmm.src_pfns + region.first;
args->dst = service_context->block_context.hmm.dst_pfns + region.first;
args->src = service_context->block_context->hmm.src_pfns + region.first;
args->dst = service_context->block_context->hmm.dst_pfns + region.first;
args->start = uvm_va_block_region_start(va_block, region);
args->end = uvm_va_block_region_end(va_block, region) + 1;
args->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE | MIGRATE_VMA_SELECT_SYSTEM;
@@ -2815,8 +2897,8 @@ NV_STATUS uvm_hmm_va_block_service_locked(uvm_processor_id_t processor_id,
// since migrate_vma_setup() would have reported that information.
// Try to make it resident in system memory and retry the migration.
status = hmm_make_resident_cpu(va_block,
service_context->block_context.hmm.vma,
service_context->block_context.hmm.src_pfns,
service_context->block_context->hmm.vma,
service_context->block_context->hmm.src_pfns,
region,
service_context->access_type,
NULL);
@@ -2962,16 +3044,6 @@ static NV_STATUS uvm_hmm_migrate_finalize(uvm_hmm_migrate_event_t *uvm_hmm_migra
&uvm_hmm_migrate_event->same_devmem_page_mask);
}
static bool is_resident(uvm_va_block_t *va_block,
uvm_processor_id_t dest_id,
uvm_va_block_region_t region)
{
if (!uvm_processor_mask_test(&va_block->resident, dest_id))
return false;
return uvm_page_mask_region_full(uvm_va_block_resident_mask_get(va_block, dest_id), region);
}
// Note that migrate_vma_*() doesn't handle asynchronous migrations so the
// migration flag UVM_MIGRATE_FLAG_SKIP_CPU_MAP doesn't have an effect.
// TODO: Bug 3900785: investigate ways to implement async migration.
@@ -3063,9 +3135,7 @@ NV_STATUS uvm_hmm_va_block_migrate_locked(uvm_va_block_t *va_block,
uvm_page_mask_init_from_region(page_mask, region, NULL);
for_each_id_in_mask(id, &va_block->resident) {
if (!uvm_page_mask_andnot(page_mask,
page_mask,
uvm_va_block_resident_mask_get(va_block, id)))
if (!uvm_page_mask_andnot(page_mask, page_mask, uvm_va_block_resident_mask_get(va_block, id, NUMA_NO_NODE)))
return NV_OK;
}
@@ -3193,6 +3263,7 @@ static NV_STATUS hmm_va_block_evict_chunks(uvm_va_block_t *va_block,
uvm_page_mask_t *page_mask = &uvm_hmm_migrate_event.page_mask;
const uvm_va_policy_t *policy;
uvm_va_policy_node_t *node;
uvm_page_mask_t *cpu_resident_mask = uvm_va_block_resident_mask_get(va_block, UVM_ID_CPU, NUMA_NO_NODE);
unsigned long npages;
NV_STATUS status;
@@ -3215,7 +3286,7 @@ static NV_STATUS hmm_va_block_evict_chunks(uvm_va_block_t *va_block,
// Pages resident on the GPU should not have a resident page in system
// memory.
// TODO: Bug 3660922: Need to handle read duplication at some point.
UVM_ASSERT(uvm_page_mask_region_empty(&va_block->cpu.resident, region));
UVM_ASSERT(uvm_page_mask_region_empty(cpu_resident_mask, region));
status = alloc_and_copy_to_cpu(va_block,
NULL,
@@ -3314,35 +3385,34 @@ NV_STATUS uvm_hmm_va_block_evict_pages_from_gpu(uvm_va_block_t *va_block,
NULL);
}
NV_STATUS uvm_hmm_pmm_gpu_evict_pfn(unsigned long pfn)
NV_STATUS uvm_hmm_remote_cpu_fault(struct vm_fault *vmf)
{
unsigned long src_pfn = 0;
unsigned long dst_pfn = 0;
struct page *dst_page;
NV_STATUS status = NV_OK;
unsigned long src_pfn;
unsigned long dst_pfn;
struct migrate_vma args;
struct page *src_page = vmf->page;
uvm_tracker_t tracker = UVM_TRACKER_INIT();
int ret;
ret = migrate_device_range(&src_pfn, pfn, 1);
if (ret)
return errno_to_nv_status(ret);
args.vma = vmf->vma;
args.src = &src_pfn;
args.dst = &dst_pfn;
args.start = nv_page_fault_va(vmf);
args.end = args.start + PAGE_SIZE;
args.pgmap_owner = &g_uvm_global;
args.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
args.fault_page = src_page;
// We don't call migrate_vma_setup_locked() here because we don't
// have a va_block and don't want to ignore invalidations.
ret = migrate_vma_setup(&args);
UVM_ASSERT(!ret);
if (src_pfn & MIGRATE_PFN_MIGRATE) {
// All the code for copying a vidmem page to sysmem relies on
// having a va_block. However certain combinations of mremap()
// and fork() can result in device-private pages being mapped
// in a child process without a va_block.
//
// We don't expect the above to be a common occurance so for
// now we allocate a fresh zero page when evicting without a
// va_block. However this results in child processes losing
// data so make sure we warn about it. Ideally we would just
// not migrate and SIGBUS the child if it tries to access the
// page. However that would prevent unloading of the driver so
// we're stuck with this until we fix the problem.
// TODO: Bug 3902536: add code to migrate GPU memory without having a
// va_block.
WARN_ON(1);
dst_page = alloc_page(GFP_HIGHUSER_MOVABLE | __GFP_ZERO);
struct page *dst_page;
dst_page = alloc_page(GFP_HIGHUSER_MOVABLE);
if (!dst_page) {
status = NV_ERR_NO_MEMORY;
goto out;
@@ -3351,11 +3421,15 @@ NV_STATUS uvm_hmm_pmm_gpu_evict_pfn(unsigned long pfn)
lock_page(dst_page);
dst_pfn = migrate_pfn(page_to_pfn(dst_page));
migrate_device_pages(&src_pfn, &dst_pfn, 1);
status = uvm_hmm_copy_devmem_page(dst_page, src_page, &tracker);
if (status == NV_OK)
status = uvm_tracker_wait_deinit(&tracker);
}
migrate_vma_pages(&args);
out:
migrate_device_finalize(&src_pfn, &dst_pfn, 1);
migrate_vma_finalize(&args);
return status;
}
@@ -3606,4 +3680,3 @@ bool uvm_hmm_must_use_sysmem(uvm_va_block_t *va_block,
}
#endif // UVM_IS_CONFIG_HMM()

View File

@@ -307,10 +307,10 @@ typedef struct
uvm_migrate_mode_t mode,
uvm_tracker_t *out_tracker);
// Evicts all va_blocks in the va_space to the CPU. Unlike the
// other va_block eviction functions this is based on virtual
// address and therefore takes mmap_lock for read.
void uvm_hmm_evict_va_blocks(uvm_va_space_t *va_space);
// Handle a fault to a device-private page from a process other than the
// process which created the va_space that originally allocated the
// device-private page.
NV_STATUS uvm_hmm_remote_cpu_fault(struct vm_fault *vmf);
// This sets the va_block_context->hmm.src_pfns[] to the ZONE_DEVICE private
// PFN for the GPU chunk memory.
@@ -343,14 +343,6 @@ typedef struct
const uvm_page_mask_t *pages_to_evict,
uvm_va_block_region_t region);
// Migrate a GPU device-private page to system memory. This is
// called to remove CPU page table references to device private
// struct pages for the given GPU after all other references in
// va_blocks have been released and the GPU is in the process of
// being removed/torn down. Note that there is no mm, VMA,
// va_block or any user channel activity on this GPU.
NV_STATUS uvm_hmm_pmm_gpu_evict_pfn(unsigned long pfn);
// This returns what would be the intersection of va_block start/end and
// VMA start/end-1 for the given 'lookup_address' if
// uvm_hmm_va_block_find_create() was called.
@@ -592,8 +584,10 @@ typedef struct
return NV_ERR_INVALID_ADDRESS;
}
static void uvm_hmm_evict_va_blocks(uvm_va_space_t *va_space)
static NV_STATUS uvm_hmm_remote_cpu_fault(struct vm_fault *vmf)
{
UVM_ASSERT(0);
return NV_ERR_INVALID_ADDRESS;
}
static NV_STATUS uvm_hmm_va_block_evict_chunk_prep(uvm_va_block_t *va_block,
@@ -622,11 +616,6 @@ typedef struct
return NV_OK;
}
static NV_STATUS uvm_hmm_pmm_gpu_evict_pfn(unsigned long pfn)
{
return NV_OK;
}
static NV_STATUS uvm_hmm_va_block_range_bounds(uvm_va_space_t *va_space,
struct mm_struct *mm,
NvU64 lookup_address,

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2020-2022 NVIDIA Corporation
Copyright (c) 2020-2023 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -59,12 +59,12 @@ void uvm_hal_hopper_arch_init_properties(uvm_parent_gpu_t *parent_gpu)
// Physical CE writes to vidmem are non-coherent with respect to the CPU on
// GH180.
parent_gpu->ce_phys_vidmem_write_supported = !uvm_gpu_is_coherent(parent_gpu);
parent_gpu->ce_phys_vidmem_write_supported = !uvm_parent_gpu_is_coherent(parent_gpu);
// TODO: Bug 4174553: [HGX-SkinnyJoe][GH180] channel errors discussion/debug
// portion for the uvm tests became nonresponsive after
// some time and then failed even after reboot
parent_gpu->peer_copy_mode = uvm_gpu_is_coherent(parent_gpu) ?
parent_gpu->peer_copy_mode = uvm_parent_gpu_is_coherent(parent_gpu) ?
UVM_GPU_PEER_COPY_MODE_VIRTUAL : g_uvm_global.peer_copy_mode;
// All GR context buffers may be mapped to 57b wide VAs. All "compute" units

View File

@@ -128,8 +128,9 @@ static inline const struct cpumask *uvm_cpumask_of_node(int node)
// present if we see the callback.
//
// The callback was added in commit 0f0a327fa12cd55de5e7f8c05a70ac3d047f405e,
// v3.19 (2014-11-13).
#if defined(NV_MMU_NOTIFIER_OPS_HAS_INVALIDATE_RANGE)
// v3.19 (2014-11-13) and renamed in commit 1af5a8109904.
#if defined(NV_MMU_NOTIFIER_OPS_HAS_INVALIDATE_RANGE) || \
defined(NV_MMU_NOTIFIER_OPS_HAS_ARCH_INVALIDATE_SECONDARY_TLBS)
#define UVM_CAN_USE_MMU_NOTIFIERS() 1
#else
#define UVM_CAN_USE_MMU_NOTIFIERS() 0
@@ -153,10 +154,6 @@ static inline const struct cpumask *uvm_cpumask_of_node(int node)
#define VM_MIXEDMAP 0x00000000
#endif
#if !defined(MPOL_PREFERRED_MANY)
#define MPOL_PREFERRED_MANY 5
#endif
//
// printk.h already defined pr_fmt, so we have to redefine it so the pr_*
// routines pick up our version
@@ -352,6 +349,47 @@ static inline NvU64 NV_GETTIME(void)
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
#endif
#if !defined(NV_FIND_NEXT_BIT_WRAP_PRESENT)
static inline unsigned long find_next_bit_wrap(const unsigned long *addr, unsigned long size, unsigned long offset)
{
unsigned long bit = find_next_bit(addr, size, offset);
if (bit < size)
return bit;
bit = find_first_bit(addr, offset);
return bit < offset ? bit : size;
}
#endif
// for_each_set_bit_wrap and __for_each_wrap were introduced in v6.1-rc1
// by commit 4fe49b3b97c2640147c46519c2a6fdb06df34f5f
#if !defined(for_each_set_bit_wrap)
static inline unsigned long __for_each_wrap(const unsigned long *bitmap,
unsigned long size,
unsigned long start,
unsigned long n)
{
unsigned long bit;
if (n > start) {
bit = find_next_bit(bitmap, size, n);
if (bit < size)
return bit;
n = 0;
}
bit = find_next_bit(bitmap, start, n);
return bit < start ? bit : size;
}
#define for_each_set_bit_wrap(bit, addr, size, start) \
for ((bit) = find_next_bit_wrap((addr), (size), (start)); \
(bit) < (size); \
(bit) = __for_each_wrap((addr), (size), (start), (bit) + 1))
#endif
// Added in 2.6.24
#ifndef ACCESS_ONCE
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
@@ -583,4 +621,5 @@ static inline pgprot_t uvm_pgprot_decrypted(pgprot_t prot)
#include <asm/page.h>
#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
#endif
#endif // _UVM_LINUX_H

View File

@@ -355,6 +355,7 @@ static uvm_membar_t va_range_downgrade_membar(uvm_va_range_t *va_range, uvm_ext_
if (!ext_gpu_map->mem_handle)
return UVM_MEMBAR_GPU;
// EGM uses the same barriers as sysmem.
return uvm_hal_downgrade_membar_type(ext_gpu_map->gpu,
!ext_gpu_map->is_sysmem && ext_gpu_map->gpu == ext_gpu_map->owning_gpu);
}
@@ -633,6 +634,8 @@ static NV_STATUS set_ext_gpu_map_location(uvm_ext_gpu_map_t *ext_gpu_map,
const UvmGpuMemoryInfo *mem_info)
{
uvm_gpu_t *owning_gpu;
if (mem_info->egm)
UVM_ASSERT(mem_info->sysmem);
if (!mem_info->deviceDescendant && !mem_info->sysmem) {
ext_gpu_map->owning_gpu = NULL;
@@ -641,6 +644,7 @@ static NV_STATUS set_ext_gpu_map_location(uvm_ext_gpu_map_t *ext_gpu_map,
}
// This is a local or peer allocation, so the owning GPU must have been
// registered.
// This also checks for if EGM owning GPU is registered.
owning_gpu = uvm_va_space_get_gpu_by_uuid(va_space, &mem_info->uuid);
if (!owning_gpu)
return NV_ERR_INVALID_DEVICE;
@@ -651,13 +655,10 @@ static NV_STATUS set_ext_gpu_map_location(uvm_ext_gpu_map_t *ext_gpu_map,
// crashes when it's eventually freed.
// TODO: Bug 1811006: Bug tracking the RM issue, its fix might change the
// semantics of sysmem allocations.
if (mem_info->sysmem) {
ext_gpu_map->owning_gpu = owning_gpu;
ext_gpu_map->is_sysmem = true;
return NV_OK;
}
if (owning_gpu != mapping_gpu) {
// Check if peer access for peer memory is enabled.
// This path also handles EGM allocations.
if (owning_gpu != mapping_gpu && (!mem_info->sysmem || mem_info->egm)) {
// TODO: Bug 1757136: In SLI, the returned UUID may be different but a
// local mapping must be used. We need to query SLI groups to know
// that.
@@ -666,7 +667,9 @@ static NV_STATUS set_ext_gpu_map_location(uvm_ext_gpu_map_t *ext_gpu_map,
}
ext_gpu_map->owning_gpu = owning_gpu;
ext_gpu_map->is_sysmem = false;
ext_gpu_map->is_sysmem = mem_info->sysmem;
ext_gpu_map->is_egm = mem_info->egm;
return NV_OK;
}
@@ -719,6 +722,7 @@ static NV_STATUS uvm_ext_gpu_map_split(uvm_range_tree_t *tree,
new->gpu = existing_map->gpu;
new->owning_gpu = existing_map->owning_gpu;
new->is_sysmem = existing_map->is_sysmem;
new->is_egm = existing_map->is_egm;
// Initialize the new ext_gpu_map tracker as a copy of the existing_map tracker.
// This way, any operations on any of the two ext_gpu_maps will be able to

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2016-2023 NVIDIA Corporation
Copyright (c) 2016-2022 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -93,9 +93,8 @@ static bool sysmem_can_be_mapped_on_gpu(uvm_mem_t *sysmem)
{
UVM_ASSERT(uvm_mem_is_sysmem(sysmem));
// In Confidential Computing, only unprotected memory can be mapped on the
// GPU
if (g_uvm_global.conf_computing_enabled)
// If SEV is enabled, only unprotected memory can be mapped
if (g_uvm_global.sev_enabled)
return uvm_mem_is_sysmem_dma(sysmem);
return true;
@@ -738,7 +737,7 @@ static NV_STATUS mem_map_cpu_to_sysmem_kernel(uvm_mem_t *mem)
pages[page_index] = mem_cpu_page(mem, page_index * PAGE_SIZE);
}
if (g_uvm_global.conf_computing_enabled && uvm_mem_is_sysmem_dma(mem))
if (g_uvm_global.sev_enabled && uvm_mem_is_sysmem_dma(mem))
prot = uvm_pgprot_decrypted(PAGE_KERNEL_NOENC);
mem->kernel.cpu_addr = vmap(pages, num_pages, VM_MAP, prot);

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2016-2023 NVIDIA Corporation
Copyright (c) 2016-2021 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -44,10 +44,10 @@ static NvU32 first_page_size(NvU32 page_sizes)
static inline NV_STATUS __alloc_map_sysmem(NvU64 size, uvm_gpu_t *gpu, uvm_mem_t **sys_mem)
{
if (g_uvm_global.conf_computing_enabled)
if (g_uvm_global.sev_enabled)
return uvm_mem_alloc_sysmem_dma_and_map_cpu_kernel(size, gpu, current->mm, sys_mem);
return uvm_mem_alloc_sysmem_and_map_cpu_kernel(size, current->mm, sys_mem);
else
return uvm_mem_alloc_sysmem_and_map_cpu_kernel(size, current->mm, sys_mem);
}
static NV_STATUS check_accessible_from_gpu(uvm_gpu_t *gpu, uvm_mem_t *mem)
@@ -335,6 +335,9 @@ error:
static bool should_test_page_size(size_t alloc_size, NvU32 page_size)
{
if (g_uvm_global.sev_enabled)
return false;
if (g_uvm_global.num_simulated_devices == 0)
return true;

View File

@@ -130,9 +130,9 @@ static NV_STATUS block_migrate_map_unmapped_pages(uvm_va_block_t *va_block,
NV_STATUS status = NV_OK;
NV_STATUS tracker_status;
// Save the mask of unmapped pages because it will change after the
// Get the mask of unmapped pages because it will change after the
// first map operation
uvm_page_mask_complement(&va_block_context->caller_page_mask, &va_block->maybe_mapped_pages);
uvm_va_block_unmapped_pages_get(va_block, region, &va_block_context->caller_page_mask);
if (uvm_va_block_is_hmm(va_block) && !UVM_ID_IS_CPU(dest_id)) {
// Do not map pages that are already resident on the CPU. This is in
@@ -147,7 +147,7 @@ static NV_STATUS block_migrate_map_unmapped_pages(uvm_va_block_t *va_block,
// such pages at all, when migrating.
uvm_page_mask_andnot(&va_block_context->caller_page_mask,
&va_block_context->caller_page_mask,
uvm_va_block_resident_mask_get(va_block, UVM_ID_CPU));
uvm_va_block_resident_mask_get(va_block, UVM_ID_CPU, NUMA_NO_NODE));
}
// Only map those pages that are not mapped anywhere else (likely due
@@ -377,7 +377,7 @@ static bool va_block_should_do_cpu_preunmap(uvm_va_block_t *va_block,
mapped_pages_cpu = uvm_va_block_map_mask_get(va_block, UVM_ID_CPU);
if (uvm_processor_mask_test(&va_block->resident, dest_id)) {
const uvm_page_mask_t *resident_pages_dest = uvm_va_block_resident_mask_get(va_block, dest_id);
const uvm_page_mask_t *resident_pages_dest = uvm_va_block_resident_mask_get(va_block, dest_id, NUMA_NO_NODE);
uvm_page_mask_t *do_not_unmap_pages = &va_block_context->scratch_page_mask;
// TODO: Bug 1877578

View File

@@ -672,14 +672,6 @@ static NV_STATUS nv_migrate_vma(struct migrate_vma *args, migrate_vma_state_t *s
.finalize_and_map = uvm_migrate_vma_finalize_and_map_helper,
};
// WAR for Bug 4130089: [GH180][r535] WAR for kernel not issuing SMMU TLB
// invalidates on read-only to read-write upgrades
//
// This code path isn't used on GH180 but we need to maintain consistent
// behaviour on systems that do.
if (!vma_is_anonymous(args->vma))
return NV_WARN_NOTHING_TO_DO;
ret = migrate_vma(&uvm_migrate_vma_ops, args->vma, args->start, args->end, args->src, args->dst, state);
if (ret < 0)
return errno_to_nv_status(ret);
@@ -693,24 +685,6 @@ static NV_STATUS nv_migrate_vma(struct migrate_vma *args, migrate_vma_state_t *s
if (ret < 0)
return errno_to_nv_status(ret);
// TODO: Bug 2419180: support file-backed pages in migrate_vma, when
// support for it is added to the Linux kernel
//
// A side-effect of migrate_vma_setup() is it calls mmu notifiers even if a
// page can't be migrated (eg. because it's a non-anonymous mapping). We
// need this side-effect for SMMU on GH180 to ensure any cached read-only
// entries are flushed from SMMU on permission upgrade.
//
// TODO: Bug 4130089: [GH180][r535] WAR for kernel not issuing SMMU TLB
// invalidates on read-only to read-write upgrades
//
// The above WAR doesn't work for HugeTLBfs mappings because
// migrate_vma_setup() will fail in that case.
if (!vma_is_anonymous(args->vma)) {
migrate_vma_finalize(args);
return NV_WARN_NOTHING_TO_DO;
}
uvm_migrate_vma_alloc_and_copy(args, state);
if (state->status == NV_OK) {
migrate_vma_pages(args);
@@ -884,13 +858,9 @@ static NV_STATUS migrate_pageable_vma(struct vm_area_struct *vma,
start = max(start, vma->vm_start);
outer = min(outer, vma->vm_end);
// migrate_vma only supports anonymous VMAs. We check for those after
// calling migrate_vma_setup() to workaround Bug 4130089. We need to check
// for HugeTLB VMAs here because migrate_vma_setup() will return a fatal
// error for those.
// TODO: Bug 4130089: [GH180][r535] WAR for kernel not issuing SMMU TLB
// invalidates on read-only to read-write upgrades
if (is_vm_hugetlb_page(vma))
// TODO: Bug 2419180: support file-backed pages in migrate_vma, when
// support for it is added to the Linux kernel
if (!vma_is_anonymous(vma))
return NV_WARN_NOTHING_TO_DO;
if (uvm_processor_mask_empty(&va_space->registered_gpus))

View File

@@ -456,13 +456,13 @@ static void pde_fill_gpu(uvm_page_tree_t *tree,
NvU32 max_inline_entries = UVM_PUSH_INLINE_DATA_MAX_SIZE / sizeof(pde_data);
uvm_gpu_address_t inline_data_addr;
uvm_push_inline_data_t inline_data;
NvU32 membar_flag = 0;
uvm_push_flag_t push_membar_flag = UVM_PUSH_FLAG_COUNT;
NvU32 i;
if (uvm_push_get_and_reset_flag(push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE))
membar_flag = UVM_PUSH_FLAG_NEXT_MEMBAR_NONE;
push_membar_flag = UVM_PUSH_FLAG_NEXT_MEMBAR_NONE;
else if (uvm_push_get_and_reset_flag(push, UVM_PUSH_FLAG_NEXT_MEMBAR_GPU))
membar_flag = UVM_PUSH_FLAG_NEXT_MEMBAR_GPU;
push_membar_flag = UVM_PUSH_FLAG_NEXT_MEMBAR_GPU;
for (i = 0; i < pde_count;) {
NvU32 j;
@@ -482,8 +482,8 @@ static void pde_fill_gpu(uvm_page_tree_t *tree,
// caller's membar flag.
if (i + entry_count < pde_count)
uvm_push_set_flag(push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE);
else if (membar_flag)
uvm_push_set_flag(push, membar_flag);
else if (push_membar_flag != UVM_PUSH_FLAG_COUNT)
uvm_push_set_flag(push, push_membar_flag);
tree->gpu->parent->ce_hal->memcopy(push, pde_entry_addr, inline_data_addr, entry_count * sizeof(pde_data));
@@ -906,10 +906,11 @@ error:
// --------------|-------------------------||----------------|----------------
// vidmem | - || vidmem | false
// sysmem | - || sysmem | false
// default | <not set> || vidmem | true
// default | <not set> || vidmem | true (1)
// default | vidmem || vidmem | false
// default | sysmem || sysmem | false
//
// (1) When SEV mode is enabled, the fallback path is disabled.
//
// In SR-IOV heavy the the page tree must be in vidmem, to prevent guest drivers
// from updating GPU page tables without hypervisor knowledge.
@@ -925,27 +926,28 @@ error:
//
static void page_tree_set_location(uvm_page_tree_t *tree, uvm_aperture_t location)
{
bool should_location_be_vidmem;
UVM_ASSERT(tree->gpu != NULL);
UVM_ASSERT_MSG((location == UVM_APERTURE_VID) ||
(location == UVM_APERTURE_SYS) ||
(location == UVM_APERTURE_DEFAULT),
"Invalid location %s (%d)\n", uvm_aperture_string(location), (int)location);
// The page tree of a "fake" GPU used during page tree testing can be in
// sysmem in scenarios where a "real" GPU must be in vidmem. Fake GPUs can
// be identified by having no channel manager.
if (tree->gpu->channel_manager != NULL) {
should_location_be_vidmem = uvm_gpu_is_virt_mode_sriov_heavy(tree->gpu)
|| uvm_conf_computing_mode_enabled(tree->gpu);
if (uvm_gpu_is_virt_mode_sriov_heavy(tree->gpu))
UVM_ASSERT(location == UVM_APERTURE_VID);
else if (uvm_conf_computing_mode_enabled(tree->gpu))
UVM_ASSERT(location == UVM_APERTURE_VID);
}
// The page tree of a "fake" GPU used during page tree testing can be in
// sysmem even if should_location_be_vidmem is true. A fake GPU can be
// identified by having no channel manager.
if ((tree->gpu->channel_manager != NULL) && should_location_be_vidmem)
UVM_ASSERT(location == UVM_APERTURE_VID);
if (location == UVM_APERTURE_DEFAULT) {
if (page_table_aperture == UVM_APERTURE_DEFAULT) {
tree->location = UVM_APERTURE_VID;
tree->location_sys_fallback = true;
// See the comment (1) above.
tree->location_sys_fallback = !g_uvm_global.sev_enabled;
}
else {
tree->location = page_table_aperture;

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2016-2019 NVIDIA Corporation
Copyright (c) 2016-2023 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -22,10 +22,7 @@
*******************************************************************************/
#include "uvm_perf_events.h"
#include "uvm_va_block.h"
#include "uvm_va_range.h"
#include "uvm_va_space.h"
#include "uvm_kvmalloc.h"
#include "uvm_test.h"
// Global variable used to check that callbacks are correctly executed
@@ -46,10 +43,7 @@ static NV_STATUS test_events(uvm_va_space_t *va_space)
NV_STATUS status;
uvm_perf_event_data_t event_data;
uvm_va_block_t block;
test_data = 0;
memset(&event_data, 0, sizeof(event_data));
// Use CPU id to avoid triggering the GPU stats update code
@@ -58,6 +52,7 @@ static NV_STATUS test_events(uvm_va_space_t *va_space)
// Register a callback for page fault
status = uvm_perf_register_event_callback(&va_space->perf_events, UVM_PERF_EVENT_FAULT, callback_inc_1);
TEST_CHECK_GOTO(status == NV_OK, done);
// Register a callback for page fault
status = uvm_perf_register_event_callback(&va_space->perf_events, UVM_PERF_EVENT_FAULT, callback_inc_2);
TEST_CHECK_GOTO(status == NV_OK, done);
@@ -65,13 +60,14 @@ static NV_STATUS test_events(uvm_va_space_t *va_space)
// va_space read lock is required for page fault event notification
uvm_va_space_down_read(va_space);
// Notify (fake) page fault. The two registered callbacks for this event increment the value of test_value
event_data.fault.block = &block;
// Notify (fake) page fault. The two registered callbacks for this event
// increment the value of test_value
uvm_perf_event_notify(&va_space->perf_events, UVM_PERF_EVENT_FAULT, &event_data);
uvm_va_space_up_read(va_space);
// test_data was initialized to zero. It should have been incremented by 1 and 2, respectively in the callbacks
// test_data was initialized to zero. It should have been incremented by 1
// and 2, respectively in the callbacks
TEST_CHECK_GOTO(test_data == 3, done);
done:
@@ -96,4 +92,3 @@ NV_STATUS uvm_test_perf_events_sanity(UVM_TEST_PERF_EVENTS_SANITY_PARAMS *params
done:
return status;
}

View File

@@ -355,7 +355,7 @@ static NvU32 uvm_perf_prefetch_prenotify_fault_migrations(uvm_va_block_t *va_blo
uvm_page_mask_zero(prefetch_pages);
if (UVM_ID_IS_CPU(new_residency) || va_block->gpus[uvm_id_gpu_index(new_residency)] != NULL)
resident_mask = uvm_va_block_resident_mask_get(va_block, new_residency);
resident_mask = uvm_va_block_resident_mask_get(va_block, new_residency, NUMA_NO_NODE);
// If this is a first-touch fault and the destination processor is the
// preferred location, populate the whole max_prefetch_region.

View File

@@ -164,7 +164,7 @@ typedef struct
uvm_spinlock_t lock;
uvm_va_block_context_t va_block_context;
uvm_va_block_context_t *va_block_context;
// Flag used to avoid scheduling delayed unpinning operations after
// uvm_perf_thrashing_stop has been called.
@@ -601,6 +601,14 @@ static va_space_thrashing_info_t *va_space_thrashing_info_create(uvm_va_space_t
va_space_thrashing = uvm_kvmalloc_zero(sizeof(*va_space_thrashing));
if (va_space_thrashing) {
uvm_va_block_context_t *block_context = uvm_va_block_context_alloc(NULL);
if (!block_context) {
uvm_kvfree(va_space_thrashing);
return NULL;
}
va_space_thrashing->pinned_pages.va_block_context = block_context;
va_space_thrashing->va_space = va_space;
va_space_thrashing_info_init_params(va_space_thrashing);
@@ -621,6 +629,7 @@ static void va_space_thrashing_info_destroy(uvm_va_space_t *va_space)
if (va_space_thrashing) {
uvm_perf_module_type_unset_data(va_space->perf_modules_data, UVM_PERF_MODULE_TYPE_THRASHING);
uvm_va_block_context_free(va_space_thrashing->pinned_pages.va_block_context);
uvm_kvfree(va_space_thrashing);
}
}
@@ -1104,7 +1113,7 @@ static NV_STATUS unmap_remote_pinned_pages(uvm_va_block_t *va_block,
!uvm_processor_mask_test(&policy->accessed_by, processor_id));
if (uvm_processor_mask_test(&va_block->resident, processor_id)) {
const uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(va_block, processor_id);
const uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(va_block, processor_id, NUMA_NO_NODE);
if (!uvm_page_mask_andnot(&va_block_context->caller_page_mask,
&block_thrashing->pinned_pages.mask,
@@ -1312,9 +1321,8 @@ void thrashing_event_cb(uvm_perf_event_t event_id, uvm_perf_event_data_t *event_
if (block_thrashing->last_time_stamp == 0 ||
uvm_id_equal(block_thrashing->last_processor, processor_id) ||
time_stamp - block_thrashing->last_time_stamp > va_space_thrashing->params.lapse_ns) {
time_stamp - block_thrashing->last_time_stamp > va_space_thrashing->params.lapse_ns)
goto done;
}
num_block_pages = uvm_va_block_size(va_block) / PAGE_SIZE;
@@ -1803,7 +1811,7 @@ static void thrashing_unpin_pages(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
va_space_thrashing_info_t *va_space_thrashing = container_of(dwork, va_space_thrashing_info_t, pinned_pages.dwork);
uvm_va_space_t *va_space = va_space_thrashing->va_space;
uvm_va_block_context_t *va_block_context = &va_space_thrashing->pinned_pages.va_block_context;
uvm_va_block_context_t *va_block_context = va_space_thrashing->pinned_pages.va_block_context;
// Take the VA space lock so that VA blocks don't go away during this
// operation.
@@ -1937,7 +1945,6 @@ void uvm_perf_thrashing_unload(uvm_va_space_t *va_space)
// Make sure that there are not pending work items
if (va_space_thrashing) {
UVM_ASSERT(va_space_thrashing->pinned_pages.in_va_space_teardown);
UVM_ASSERT(list_empty(&va_space_thrashing->pinned_pages.list));
va_space_thrashing_info_destroy(va_space);

View File

@@ -3377,76 +3377,47 @@ uvm_gpu_id_t uvm_pmm_devmem_page_to_gpu_id(struct page *page)
return gpu->id;
}
static void evict_orphan_pages(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk)
{
NvU32 i;
UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT);
UVM_ASSERT(chunk->suballoc);
for (i = 0; i < num_subchunks(chunk); i++) {
uvm_gpu_chunk_t *subchunk = chunk->suballoc->subchunks[i];
uvm_spin_lock(&pmm->list_lock);
if (subchunk->state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT) {
uvm_spin_unlock(&pmm->list_lock);
evict_orphan_pages(pmm, subchunk);
continue;
}
if (subchunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED && subchunk->is_referenced) {
unsigned long pfn = uvm_pmm_gpu_devmem_get_pfn(pmm, subchunk);
// TODO: Bug 3368756: add support for large GPU pages.
UVM_ASSERT(uvm_gpu_chunk_get_size(subchunk) == PAGE_SIZE);
uvm_spin_unlock(&pmm->list_lock);
// The above check for subchunk state is racy because the
// chunk may be freed after the lock is dropped. It is
// still safe to proceed in that case because the struct
// page reference will have dropped to zero and cannot
// have been re-allocated as this is only called during
// GPU teardown. Therefore migrate_device_range() will
// simply fail.
uvm_hmm_pmm_gpu_evict_pfn(pfn);
continue;
}
uvm_spin_unlock(&pmm->list_lock);
}
}
// Free any orphan pages.
// This should be called as part of removing a GPU: after all work is stopped
// and all va_blocks have been destroyed. There normally won't be any
// device private struct page references left but there can be cases after
// fork() where a child process still holds a reference. This function searches
// for pages that still have a reference and migrates the page to the GPU in
// order to release the reference in the CPU page table.
static void uvm_pmm_gpu_free_orphan_pages(uvm_pmm_gpu_t *pmm)
// Check there are no orphan pages. This should be only called as part of
// removing a GPU: after all work is stopped and all va_blocks have been
// destroyed. By now there should be no device-private page references left as
// there are no va_space's left on this GPU and orphan pages should be removed
// by va_space destruction or unregistration from the GPU.
static bool uvm_pmm_gpu_check_orphan_pages(uvm_pmm_gpu_t *pmm)
{
size_t i;
bool ret = true;
unsigned long pfn;
struct range range = pmm->devmem.pagemap.range;
if (!pmm->initialized)
return;
// This is only safe to call during GPU teardown where chunks
// cannot be re-allocated.
UVM_ASSERT(uvm_gpu_retained_count(uvm_pmm_to_gpu(pmm)) == 0);
if (!pmm->initialized || !uvm_hmm_is_enabled_system_wide())
return ret;
// Scan all the root chunks looking for subchunks which are still
// referenced. This is slow, but we only do this when unregistering a GPU
// and is not critical for performance.
// referenced.
for (i = 0; i < pmm->root_chunks.count; i++) {
uvm_gpu_root_chunk_t *root_chunk = &pmm->root_chunks.array[i];
root_chunk_lock(pmm, root_chunk);
if (root_chunk->chunk.state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT)
evict_orphan_pages(pmm, &root_chunk->chunk);
ret = false;
root_chunk_unlock(pmm, root_chunk);
}
for (pfn = __phys_to_pfn(range.start); pfn <= __phys_to_pfn(range.end); pfn++) {
struct page *page = pfn_to_page(pfn);
if (!is_device_private_page(page)) {
ret = false;
break;
}
if (page_count(page)) {
ret = false;
break;
}
}
return ret;
}
static void devmem_page_free(struct page *page)
@@ -3479,7 +3450,7 @@ static vm_fault_t devmem_fault(struct vm_fault *vmf)
{
uvm_va_space_t *va_space = vmf->page->zone_device_data;
if (!va_space || va_space->va_space_mm.mm != vmf->vma->vm_mm)
if (!va_space)
return VM_FAULT_SIGBUS;
return uvm_va_space_cpu_fault_hmm(va_space, vmf->vma, vmf);
@@ -3568,8 +3539,9 @@ static void devmem_deinit(uvm_pmm_gpu_t *pmm)
{
}
static void uvm_pmm_gpu_free_orphan_pages(uvm_pmm_gpu_t *pmm)
static bool uvm_pmm_gpu_check_orphan_pages(uvm_pmm_gpu_t *pmm)
{
return true;
}
#endif // UVM_IS_CONFIG_HMM()
@@ -3744,7 +3716,7 @@ void uvm_pmm_gpu_deinit(uvm_pmm_gpu_t *pmm)
gpu = uvm_pmm_to_gpu(pmm);
uvm_pmm_gpu_free_orphan_pages(pmm);
UVM_ASSERT(uvm_pmm_gpu_check_orphan_pages(pmm));
nv_kthread_q_flush(&gpu->parent->lazy_free_q);
UVM_ASSERT(list_empty(&pmm->root_chunks.va_block_lazy_free));
release_free_root_chunks(pmm);

View File

@@ -749,6 +749,7 @@ NV_STATUS uvm_cpu_chunk_map_gpu(uvm_cpu_chunk_t *chunk, uvm_gpu_t *gpu)
}
static struct page *uvm_cpu_chunk_alloc_page(uvm_chunk_size_t alloc_size,
int nid,
uvm_cpu_chunk_alloc_flags_t alloc_flags)
{
gfp_t kernel_alloc_flags;
@@ -764,18 +765,27 @@ static struct page *uvm_cpu_chunk_alloc_page(uvm_chunk_size_t alloc_size,
kernel_alloc_flags |= GFP_HIGHUSER;
// For allocation sizes higher than PAGE_SIZE, use __GFP_NORETRY in
// order to avoid higher allocation latency from the kernel compacting
// memory to satisfy the request.
// For allocation sizes higher than PAGE_SIZE, use __GFP_NORETRY in order
// to avoid higher allocation latency from the kernel compacting memory to
// satisfy the request.
// Use __GFP_NOWARN to avoid printing allocation failure to the kernel log.
// High order allocation failures are handled gracefully by the caller.
if (alloc_size > PAGE_SIZE)
kernel_alloc_flags |= __GFP_COMP | __GFP_NORETRY;
kernel_alloc_flags |= __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN;
if (alloc_flags & UVM_CPU_CHUNK_ALLOC_FLAGS_ZERO)
kernel_alloc_flags |= __GFP_ZERO;
page = alloc_pages(kernel_alloc_flags, get_order(alloc_size));
if (page && (alloc_flags & UVM_CPU_CHUNK_ALLOC_FLAGS_ZERO))
SetPageDirty(page);
UVM_ASSERT(nid < num_online_nodes());
if (nid == NUMA_NO_NODE)
page = alloc_pages(kernel_alloc_flags, get_order(alloc_size));
else
page = alloc_pages_node(nid, kernel_alloc_flags, get_order(alloc_size));
if (page) {
if (alloc_flags & UVM_CPU_CHUNK_ALLOC_FLAGS_ZERO)
SetPageDirty(page);
}
return page;
}
@@ -805,6 +815,7 @@ static uvm_cpu_physical_chunk_t *uvm_cpu_chunk_create(uvm_chunk_size_t alloc_siz
NV_STATUS uvm_cpu_chunk_alloc(uvm_chunk_size_t alloc_size,
uvm_cpu_chunk_alloc_flags_t alloc_flags,
int nid,
uvm_cpu_chunk_t **new_chunk)
{
uvm_cpu_physical_chunk_t *chunk;
@@ -812,7 +823,7 @@ NV_STATUS uvm_cpu_chunk_alloc(uvm_chunk_size_t alloc_size,
UVM_ASSERT(new_chunk);
page = uvm_cpu_chunk_alloc_page(alloc_size, alloc_flags);
page = uvm_cpu_chunk_alloc_page(alloc_size, nid, alloc_flags);
if (!page)
return NV_ERR_NO_MEMORY;
@@ -847,6 +858,13 @@ NV_STATUS uvm_cpu_chunk_alloc_hmm(struct page *page,
return NV_OK;
}
int uvm_cpu_chunk_get_numa_node(uvm_cpu_chunk_t *chunk)
{
UVM_ASSERT(chunk);
UVM_ASSERT(chunk->page);
return page_to_nid(chunk->page);
}
NV_STATUS uvm_cpu_chunk_split(uvm_cpu_chunk_t *chunk, uvm_cpu_chunk_t **new_chunks)
{
NV_STATUS status = NV_OK;

View File

@@ -304,11 +304,24 @@ uvm_chunk_sizes_mask_t uvm_cpu_chunk_get_allocation_sizes(void);
// Allocate a physical CPU chunk of the specified size.
//
// The nid argument is used to indicate a memory node preference. If the
// value is a memory node ID, the chunk allocation will be attempted on
// that memory node. If the chunk cannot be allocated on that memory node,
// it will be allocated on any memory node allowed by the process's policy.
//
// If the value of nid is a memory node ID that is not in the set of
// current process's allowed memory nodes, it will be allocated on one of the
// nodes in the allowed set.
//
// If the value of nid is NUMA_NO_NODE, the chunk will be allocated from any
// of the allowed memory nodes by the process policy.
//
// If a CPU chunk allocation succeeds, NV_OK is returned. new_chunk will be set
// to point to the newly allocated chunk. On failure, NV_ERR_NO_MEMORY is
// returned.
NV_STATUS uvm_cpu_chunk_alloc(uvm_chunk_size_t alloc_size,
uvm_cpu_chunk_alloc_flags_t flags,
int nid,
uvm_cpu_chunk_t **new_chunk);
// Allocate a HMM CPU chunk.
@@ -375,6 +388,9 @@ static uvm_cpu_logical_chunk_t *uvm_cpu_chunk_to_logical(uvm_cpu_chunk_t *chunk)
return container_of((chunk), uvm_cpu_logical_chunk_t, common);
}
// Return the NUMA node ID of the physical page backing the chunk.
int uvm_cpu_chunk_get_numa_node(uvm_cpu_chunk_t *chunk);
// Free a CPU chunk.
// This may not result in the immediate freeing of the physical pages of the
// chunk if this is a logical chunk and there are other logical chunks holding

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2017-2019 NVIDIA Corporation
Copyright (c) 2017-2023 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -664,6 +664,7 @@ done:
static NV_STATUS test_cpu_chunk_alloc(uvm_chunk_size_t size,
uvm_cpu_chunk_alloc_flags_t flags,
int nid,
uvm_cpu_chunk_t **out_chunk)
{
uvm_cpu_chunk_t *chunk;
@@ -675,7 +676,7 @@ static NV_STATUS test_cpu_chunk_alloc(uvm_chunk_size_t size,
// It is possible that the allocation fails due to lack of large pages
// rather than an API issue, which will result in a false negative.
// However, that should be very rare.
TEST_NV_CHECK_RET(uvm_cpu_chunk_alloc(size, flags, &chunk));
TEST_NV_CHECK_RET(uvm_cpu_chunk_alloc(size, flags, nid, &chunk));
// Check general state of the chunk:
// - chunk should be a physical chunk,
@@ -685,6 +686,12 @@ static NV_STATUS test_cpu_chunk_alloc(uvm_chunk_size_t size,
TEST_CHECK_GOTO(uvm_cpu_chunk_get_size(chunk) == size, done);
TEST_CHECK_GOTO(uvm_cpu_chunk_num_pages(chunk) == size / PAGE_SIZE, done);
// It is possible for the kernel to allocate a chunk on a NUMA node other
// than the one requested. However, that should not be an issue with
// sufficient memory on each NUMA node.
if (nid != NUMA_NO_NODE)
TEST_CHECK_GOTO(uvm_cpu_chunk_get_numa_node(chunk) == nid, done);
if (flags & UVM_CPU_CHUNK_ALLOC_FLAGS_ZERO) {
NvU64 *cpu_addr;
@@ -719,7 +726,7 @@ static NV_STATUS test_cpu_chunk_mapping_basic_verify(uvm_gpu_t *gpu,
NvU64 dma_addr;
NV_STATUS status = NV_OK;
TEST_NV_CHECK_RET(test_cpu_chunk_alloc(size, flags, &chunk));
TEST_NV_CHECK_RET(test_cpu_chunk_alloc(size, flags, NUMA_NO_NODE, &chunk));
phys_chunk = uvm_cpu_chunk_to_physical(chunk);
// Check state of the physical chunk:
@@ -763,27 +770,27 @@ static NV_STATUS test_cpu_chunk_mapping_basic(uvm_gpu_t *gpu, uvm_cpu_chunk_allo
return NV_OK;
}
static NV_STATUS test_cpu_chunk_mapping_array(uvm_gpu_t *gpu1, uvm_gpu_t *gpu2, uvm_gpu_t *gpu3)
static NV_STATUS test_cpu_chunk_mapping_array(uvm_gpu_t *gpu0, uvm_gpu_t *gpu1, uvm_gpu_t *gpu2)
{
NV_STATUS status = NV_OK;
uvm_cpu_chunk_t *chunk;
uvm_cpu_physical_chunk_t *phys_chunk;
NvU64 dma_addr_gpu2;
NvU64 dma_addr_gpu1;
TEST_NV_CHECK_RET(test_cpu_chunk_alloc(PAGE_SIZE, UVM_CPU_CHUNK_ALLOC_FLAGS_NONE, &chunk));
TEST_NV_CHECK_RET(test_cpu_chunk_alloc(PAGE_SIZE, UVM_CPU_CHUNK_ALLOC_FLAGS_NONE, NUMA_NO_NODE, &chunk));
phys_chunk = uvm_cpu_chunk_to_physical(chunk);
TEST_NV_CHECK_GOTO(uvm_cpu_chunk_map_gpu(chunk, gpu2), done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(chunk, gpu2), done);
TEST_NV_CHECK_GOTO(uvm_cpu_chunk_map_gpu(chunk, gpu3), done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(chunk, gpu2), done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(chunk, gpu3), done);
dma_addr_gpu2 = uvm_cpu_chunk_get_gpu_phys_addr(chunk, gpu2->parent);
uvm_cpu_chunk_unmap_gpu_phys(chunk, gpu3->parent);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(chunk, gpu2), done);
TEST_NV_CHECK_GOTO(uvm_cpu_chunk_map_gpu(chunk, gpu1), done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(chunk, gpu1), done);
TEST_NV_CHECK_GOTO(uvm_cpu_chunk_map_gpu(chunk, gpu2), done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(chunk, gpu1), done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(chunk, gpu2), done);
dma_addr_gpu1 = uvm_cpu_chunk_get_gpu_phys_addr(chunk, gpu1->parent);
uvm_cpu_chunk_unmap_gpu_phys(chunk, gpu2->parent);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(chunk, gpu1), done);
TEST_NV_CHECK_GOTO(uvm_cpu_chunk_map_gpu(chunk, gpu0), done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(chunk, gpu0), done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(chunk, gpu1), done);
// DMA mapping addresses for different GPUs live in different IOMMU spaces,
// so it would be perfectly legal for them to have the same IOVA, and even
@@ -793,7 +800,7 @@ static NV_STATUS test_cpu_chunk_mapping_array(uvm_gpu_t *gpu1, uvm_gpu_t *gpu2,
// GPU1. It's true that we may get a false negative if both addresses
// happened to alias and we had a bug in how the addresses are shifted in
// the dense array, but that's better than intermittent failure.
TEST_CHECK_GOTO(uvm_cpu_chunk_get_gpu_phys_addr(chunk, gpu2->parent) == dma_addr_gpu2, done);
TEST_CHECK_GOTO(uvm_cpu_chunk_get_gpu_phys_addr(chunk, gpu1->parent) == dma_addr_gpu1, done);
done:
uvm_cpu_chunk_free(chunk);
@@ -911,7 +918,7 @@ static NV_STATUS test_cpu_chunk_split_and_merge(uvm_gpu_t *gpu)
uvm_cpu_chunk_t *chunk;
NV_STATUS status;
TEST_NV_CHECK_RET(test_cpu_chunk_alloc(size, UVM_CPU_CHUNK_ALLOC_FLAGS_NONE, &chunk));
TEST_NV_CHECK_RET(test_cpu_chunk_alloc(size, UVM_CPU_CHUNK_ALLOC_FLAGS_NONE, NUMA_NO_NODE, &chunk));
status = do_test_cpu_chunk_split_and_merge(chunk, gpu);
uvm_cpu_chunk_free(chunk);
@@ -993,7 +1000,7 @@ static NV_STATUS test_cpu_chunk_dirty(uvm_gpu_t *gpu)
uvm_cpu_physical_chunk_t *phys_chunk;
size_t num_pages;
TEST_NV_CHECK_RET(test_cpu_chunk_alloc(size, UVM_CPU_CHUNK_ALLOC_FLAGS_NONE, &chunk));
TEST_NV_CHECK_RET(test_cpu_chunk_alloc(size, UVM_CPU_CHUNK_ALLOC_FLAGS_NONE, NUMA_NO_NODE, &chunk));
phys_chunk = uvm_cpu_chunk_to_physical(chunk);
num_pages = uvm_cpu_chunk_num_pages(chunk);
@@ -1005,7 +1012,7 @@ static NV_STATUS test_cpu_chunk_dirty(uvm_gpu_t *gpu)
uvm_cpu_chunk_free(chunk);
TEST_NV_CHECK_RET(test_cpu_chunk_alloc(size, UVM_CPU_CHUNK_ALLOC_FLAGS_ZERO, &chunk));
TEST_NV_CHECK_RET(test_cpu_chunk_alloc(size, UVM_CPU_CHUNK_ALLOC_FLAGS_ZERO, NUMA_NO_NODE, &chunk));
phys_chunk = uvm_cpu_chunk_to_physical(chunk);
num_pages = uvm_cpu_chunk_num_pages(chunk);
@@ -1170,13 +1177,35 @@ NV_STATUS test_cpu_chunk_free(uvm_va_space_t *va_space, uvm_processor_mask_t *te
size_t size = uvm_chunk_find_next_size(alloc_sizes, PAGE_SIZE);
for_each_chunk_size_from(size, alloc_sizes) {
TEST_NV_CHECK_RET(test_cpu_chunk_alloc(size, UVM_CPU_CHUNK_ALLOC_FLAGS_NONE, &chunk));
TEST_NV_CHECK_RET(test_cpu_chunk_alloc(size, UVM_CPU_CHUNK_ALLOC_FLAGS_NONE, NUMA_NO_NODE, &chunk));
TEST_NV_CHECK_RET(do_test_cpu_chunk_free(chunk, va_space, test_gpus));
}
return NV_OK;
}
static NV_STATUS test_cpu_chunk_numa_alloc(uvm_va_space_t *va_space)
{
uvm_cpu_chunk_t *chunk;
uvm_chunk_sizes_mask_t alloc_sizes = uvm_cpu_chunk_get_allocation_sizes();
size_t size;
for_each_chunk_size(size, alloc_sizes) {
int nid;
for_each_possible_uvm_node(nid) {
// Do not test CPU allocation on nodes that have no memory or CPU
if (!node_state(nid, N_MEMORY) || !node_state(nid, N_CPU))
continue;
TEST_NV_CHECK_RET(test_cpu_chunk_alloc(size, UVM_CPU_CHUNK_ALLOC_FLAGS_NONE, nid, &chunk));
uvm_cpu_chunk_free(chunk);
}
}
return NV_OK;
}
NV_STATUS uvm_test_cpu_chunk_api(UVM_TEST_CPU_CHUNK_API_PARAMS *params, struct file *filp)
{
uvm_va_space_t *va_space = uvm_va_space_get(filp);
@@ -1197,6 +1226,7 @@ NV_STATUS uvm_test_cpu_chunk_api(UVM_TEST_CPU_CHUNK_API_PARAMS *params, struct f
}
TEST_NV_CHECK_GOTO(test_cpu_chunk_free(va_space, &test_gpus), done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_numa_alloc(va_space), done);
if (uvm_processor_mask_get_gpu_count(&test_gpus) >= 3) {
uvm_gpu_t *gpu2, *gpu3;

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2015-2023 NVIDIA Corporation
Copyright (c) 2015-2022 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -324,7 +324,7 @@ static NV_STATUS gpu_mem_check(uvm_gpu_t *gpu,
// TODO: Bug 3839176: [UVM][HCC][uvm_test] Update tests that assume GPU
// engines can directly access sysmem
// Skip this test for now. To enable this test in Confidential Computing,
// Skip this test for now. To enable this test under SEV,
// The GPU->CPU CE copy needs to be updated so it uses encryption when
// CC is enabled.
if (uvm_conf_computing_mode_enabled(gpu))
@@ -1068,7 +1068,7 @@ static NV_STATUS test_pmm_reverse_map_single(uvm_gpu_t *gpu, uvm_va_space_t *va_
uvm_mutex_lock(&va_block->lock);
is_resident = uvm_processor_mask_test(&va_block->resident, gpu->id) &&
uvm_page_mask_full(uvm_va_block_resident_mask_get(va_block, gpu->id));
uvm_page_mask_full(uvm_va_block_resident_mask_get(va_block, gpu->id, NUMA_NO_NODE));
if (is_resident)
phys_addr = uvm_va_block_gpu_phys_page_address(va_block, 0, gpu);
@@ -1154,7 +1154,7 @@ static NV_STATUS test_pmm_reverse_map_many_blocks(uvm_gpu_t *gpu, uvm_va_space_t
uvm_mutex_lock(&va_block->lock);
// Verify that all pages are populated on the GPU
is_resident = uvm_page_mask_region_full(uvm_va_block_resident_mask_get(va_block, gpu->id),
is_resident = uvm_page_mask_region_full(uvm_va_block_resident_mask_get(va_block, gpu->id, NUMA_NO_NODE),
reverse_mapping->region);
uvm_mutex_unlock(&va_block->lock);
@@ -1223,6 +1223,8 @@ static NV_STATUS test_indirect_peers(uvm_gpu_t *owning_gpu, uvm_gpu_t *accessing
if (!chunks)
return NV_ERR_NO_MEMORY;
UVM_ASSERT(!g_uvm_global.sev_enabled);
TEST_NV_CHECK_GOTO(uvm_mem_alloc_sysmem_and_map_cpu_kernel(UVM_CHUNK_SIZE_MAX, current->mm, &verif_mem), out);
TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_kernel(verif_mem, owning_gpu), out);
TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_kernel(verif_mem, accessing_gpu), out);

View File

@@ -176,7 +176,9 @@ static NV_STATUS preferred_location_unmap_remote_pages(uvm_va_block_t *va_block,
mapped_mask = uvm_va_block_map_mask_get(va_block, preferred_location);
if (uvm_processor_mask_test(&va_block->resident, preferred_location)) {
const uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(va_block, preferred_location);
const uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(va_block,
preferred_location,
NUMA_NO_NODE);
if (!uvm_page_mask_andnot(&va_block_context->caller_page_mask, mapped_mask, resident_mask))
goto done;
@@ -638,7 +640,7 @@ static NV_STATUS va_block_set_read_duplication_locked(uvm_va_block_t *va_block,
for_each_id_in_mask(src_id, &va_block->resident) {
NV_STATUS status;
uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(va_block, src_id);
uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(va_block, src_id, NUMA_NO_NODE);
// Calling uvm_va_block_make_resident_read_duplicate will break all
// SetAccessedBy and remote mappings
@@ -695,7 +697,7 @@ static NV_STATUS va_block_unset_read_duplication_locked(uvm_va_block_t *va_block
// If preferred_location is set and has resident copies, give it preference
if (UVM_ID_IS_VALID(preferred_location) &&
uvm_processor_mask_test(&va_block->resident, preferred_location)) {
uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(va_block, preferred_location);
uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(va_block, preferred_location, NUMA_NO_NODE);
bool is_mask_empty = !uvm_page_mask_and(break_read_duplication_pages,
&va_block->read_duplicated_pages,
resident_mask);
@@ -723,7 +725,7 @@ static NV_STATUS va_block_unset_read_duplication_locked(uvm_va_block_t *va_block
if (uvm_id_equal(processor_id, preferred_location))
continue;
resident_mask = uvm_va_block_resident_mask_get(va_block, processor_id);
resident_mask = uvm_va_block_resident_mask_get(va_block, processor_id, NUMA_NO_NODE);
is_mask_empty = !uvm_page_mask_and(break_read_duplication_pages,
&va_block->read_duplicated_pages,
resident_mask);

View File

@@ -0,0 +1,40 @@
/*******************************************************************************
Copyright (c) 2023 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
#include "uvm_processors.h"
int uvm_find_closest_node_mask(int src, const nodemask_t *mask)
{
int nid;
int closest_nid = NUMA_NO_NODE;
if (node_isset(src, *mask))
return src;
for_each_set_bit(nid, mask->bits, MAX_NUMNODES) {
if (closest_nid == NUMA_NO_NODE || node_distance(src, nid) < node_distance(src, closest_nid))
closest_nid = nid;
}
return closest_nid;
}

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2016-2019 NVIDIA Corporation
Copyright (c) 2016-2023 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -26,6 +26,7 @@
#include "uvm_linux.h"
#include "uvm_common.h"
#include <linux/numa.h>
#define UVM_MAX_UNIQUE_GPU_PAIRS SUM_FROM_0_TO_N(UVM_MAX_GPUS - 1)
@@ -37,11 +38,11 @@
// provide type safety, they are wrapped within the uvm_processor_id_t struct.
// The range of valid identifiers needs to cover the maximum number of
// supported GPUs on a system plus the CPU. CPU is assigned value 0, and GPUs
// range: [1, UVM_ID_MAX_GPUS].
// range: [1, UVM_PARENT_ID_MAX_GPUS].
//
// There are some functions that only expect GPU identifiers and, in order to
// make it clearer, the uvm_gpu_id_t alias type is provided. However, as this
// type is just a typedef of uvm_processor_id_t, there is no type checking
// make it clearer, the uvm_parent_gpu_id_t alias type is provided. However, as
// this type is just a typedef of uvm_processor_id_t, there is no type checking
// performed by the compiler.
//
// Identifier value vs index
@@ -60,22 +61,25 @@
// the GPU within the GPU id space (basically id - 1).
//
// In the diagram below, MAX_SUB is used to abbreviate
// UVM_ID_MAX_SUB_PROCESSORS.
// UVM_PARENT_ID_MAX_SUB_PROCESSORS.
//
// |-------------------------- uvm_processor_id_t ----------------------|
// | |
// | |----------------------- uvm_gpu_id_t ------------------------||
// | | ||
// Proc type | CPU | GPU ... GPU ... GPU ||
// | | ||
// ID values | 0 | 1 ... i+1 ... UVM_ID_MAX_PROCESSORS-1 ||
// TODO: Bug 4195538: uvm_parent_processor_id_t is currently but temporarily the
// same as uvm_processor_id_t.
//
// GPU index 0 ... i ... UVM_ID_MAX_GPUS-1
// |-------------------------- uvm_parent_processor_id_t ----------------------|
// | |
// | |----------------------- uvm_parent_gpu_id_t ------------------------||
// | | ||
// Proc type | CPU | GPU ... GPU ... GPU ||
// | | ||
// ID values | 0 | 1 ... i+1 ... UVM_PARENT_ID_MAX_PROCESSORS-1 ||
//
// GPU index 0 ... i ... UVM_PARENT_ID_MAX_GPUS-1
// | | | |
// | | | |
// | |-------------| | |-----------------------------|
// | | | |
// | | | |
// | |-------------| | |------------------------------------|
// | | | |
// | | | |
// GPU index 0 ... MAX_SUB-1 ... i*MAX_SUB ... (i+1)*MAX_SUB-1 ... UVM_GLOBAL_ID_MAX_GPUS-1
//
// ID values | 0 | 1 ... MAX_SUB ... (i*MAX_SUB)+1 ... (i+1)*MAX_SUB ... UVM_GLOBAL_ID_MAX_PROCESSORS-1 ||
@@ -210,7 +214,7 @@ static proc_id_t prefix_fn_mask##_find_first_id(const mask_t *mask)
\
static proc_id_t prefix_fn_mask##_find_first_gpu_id(const mask_t *mask) \
{ \
return proc_id_ctor(find_next_bit(mask->bitmap, (maxval), UVM_ID_GPU0_VALUE)); \
return proc_id_ctor(find_next_bit(mask->bitmap, (maxval), UVM_PARENT_ID_GPU0_VALUE)); \
} \
\
static proc_id_t prefix_fn_mask##_find_next_id(const mask_t *mask, proc_id_t min_id) \
@@ -252,7 +256,7 @@ static NvU32 prefix_fn_mask##_get_gpu_count(const mask_t *mask)
{ \
NvU32 gpu_count = prefix_fn_mask##_get_count(mask); \
\
if (prefix_fn_mask##_test(mask, proc_id_ctor(UVM_ID_CPU_VALUE))) \
if (prefix_fn_mask##_test(mask, proc_id_ctor(UVM_PARENT_ID_CPU_VALUE))) \
--gpu_count; \
\
return gpu_count; \
@@ -261,55 +265,55 @@ static NvU32 prefix_fn_mask##_get_gpu_count(const mask_t *mask)
typedef struct
{
NvU32 val;
} uvm_processor_id_t;
} uvm_parent_processor_id_t;
typedef struct
{
NvU32 val;
} uvm_global_processor_id_t;
typedef uvm_processor_id_t uvm_gpu_id_t;
typedef uvm_parent_processor_id_t uvm_parent_gpu_id_t;
typedef uvm_global_processor_id_t uvm_global_gpu_id_t;
// Static value assigned to the CPU
#define UVM_ID_CPU_VALUE 0
#define UVM_ID_GPU0_VALUE (UVM_ID_CPU_VALUE + 1)
#define UVM_PARENT_ID_CPU_VALUE 0
#define UVM_PARENT_ID_GPU0_VALUE (UVM_PARENT_ID_CPU_VALUE + 1)
// ID values for the CPU and first GPU, respectively; the values for both types
// of IDs must match to enable sharing of UVM_PROCESSOR_MASK().
#define UVM_GLOBAL_ID_CPU_VALUE UVM_ID_CPU_VALUE
#define UVM_GLOBAL_ID_GPU0_VALUE UVM_ID_GPU0_VALUE
#define UVM_GLOBAL_ID_CPU_VALUE UVM_PARENT_ID_CPU_VALUE
#define UVM_GLOBAL_ID_GPU0_VALUE UVM_PARENT_ID_GPU0_VALUE
// Maximum number of GPUs/processors that can be represented with the id types
#define UVM_ID_MAX_GPUS UVM_MAX_GPUS
#define UVM_ID_MAX_PROCESSORS UVM_MAX_PROCESSORS
#define UVM_PARENT_ID_MAX_GPUS UVM_MAX_GPUS
#define UVM_PARENT_ID_MAX_PROCESSORS UVM_MAX_PROCESSORS
#define UVM_ID_MAX_SUB_PROCESSORS 8
#define UVM_PARENT_ID_MAX_SUB_PROCESSORS 8
#define UVM_GLOBAL_ID_MAX_GPUS (UVM_MAX_GPUS * UVM_ID_MAX_SUB_PROCESSORS)
#define UVM_GLOBAL_ID_MAX_GPUS (UVM_PARENT_ID_MAX_GPUS * UVM_PARENT_ID_MAX_SUB_PROCESSORS)
#define UVM_GLOBAL_ID_MAX_PROCESSORS (UVM_GLOBAL_ID_MAX_GPUS + 1)
#define UVM_ID_CPU ((uvm_processor_id_t) { .val = UVM_ID_CPU_VALUE })
#define UVM_ID_INVALID ((uvm_processor_id_t) { .val = UVM_ID_MAX_PROCESSORS })
#define UVM_PARENT_ID_CPU ((uvm_parent_processor_id_t) { .val = UVM_PARENT_ID_CPU_VALUE })
#define UVM_PARENT_ID_INVALID ((uvm_parent_processor_id_t) { .val = UVM_PARENT_ID_MAX_PROCESSORS })
#define UVM_GLOBAL_ID_CPU ((uvm_global_processor_id_t) { .val = UVM_GLOBAL_ID_CPU_VALUE })
#define UVM_GLOBAL_ID_INVALID ((uvm_global_processor_id_t) { .val = UVM_GLOBAL_ID_MAX_PROCESSORS })
#define UVM_ID_CHECK_BOUNDS(id) UVM_ASSERT_MSG(id.val <= UVM_ID_MAX_PROCESSORS, "id %u\n", id.val)
#define UVM_PARENT_ID_CHECK_BOUNDS(id) UVM_ASSERT_MSG(id.val <= UVM_PARENT_ID_MAX_PROCESSORS, "id %u\n", id.val)
#define UVM_GLOBAL_ID_CHECK_BOUNDS(id) UVM_ASSERT_MSG(id.val <= UVM_GLOBAL_ID_MAX_PROCESSORS, "id %u\n", id.val)
static int uvm_id_cmp(uvm_processor_id_t id1, uvm_processor_id_t id2)
static int uvm_parent_id_cmp(uvm_parent_processor_id_t id1, uvm_parent_processor_id_t id2)
{
UVM_ID_CHECK_BOUNDS(id1);
UVM_ID_CHECK_BOUNDS(id2);
UVM_PARENT_ID_CHECK_BOUNDS(id1);
UVM_PARENT_ID_CHECK_BOUNDS(id2);
return UVM_CMP_DEFAULT(id1.val, id2.val);
}
static bool uvm_id_equal(uvm_processor_id_t id1, uvm_processor_id_t id2)
static bool uvm_parent_id_equal(uvm_parent_processor_id_t id1, uvm_parent_processor_id_t id2)
{
UVM_ID_CHECK_BOUNDS(id1);
UVM_ID_CHECK_BOUNDS(id2);
UVM_PARENT_ID_CHECK_BOUNDS(id1);
UVM_PARENT_ID_CHECK_BOUNDS(id2);
return id1.val == id2.val;
}
@@ -330,30 +334,30 @@ static bool uvm_global_id_equal(uvm_global_processor_id_t id1, uvm_global_proces
return id1.val == id2.val;
}
#define UVM_ID_IS_CPU(id) uvm_id_equal(id, UVM_ID_CPU)
#define UVM_ID_IS_INVALID(id) uvm_id_equal(id, UVM_ID_INVALID)
#define UVM_ID_IS_VALID(id) (!UVM_ID_IS_INVALID(id))
#define UVM_ID_IS_GPU(id) (!UVM_ID_IS_CPU(id) && !UVM_ID_IS_INVALID(id))
#define UVM_PARENT_ID_IS_CPU(id) uvm_parent_id_equal(id, UVM_PARENT_ID_CPU)
#define UVM_PARENT_ID_IS_INVALID(id) uvm_parent_id_equal(id, UVM_PARENT_ID_INVALID)
#define UVM_PARENT_ID_IS_VALID(id) (!UVM_PARENT_ID_IS_INVALID(id))
#define UVM_PARENT_ID_IS_GPU(id) (!UVM_PARENT_ID_IS_CPU(id) && !UVM_PARENT_ID_IS_INVALID(id))
#define UVM_GLOBAL_ID_IS_CPU(id) uvm_global_id_equal(id, UVM_GLOBAL_ID_CPU)
#define UVM_GLOBAL_ID_IS_INVALID(id) uvm_global_id_equal(id, UVM_GLOBAL_ID_INVALID)
#define UVM_GLOBAL_ID_IS_VALID(id) (!UVM_GLOBAL_ID_IS_INVALID(id))
#define UVM_GLOBAL_ID_IS_GPU(id) (!UVM_GLOBAL_ID_IS_CPU(id) && !UVM_GLOBAL_ID_IS_INVALID(id))
static uvm_processor_id_t uvm_id_from_value(NvU32 val)
static uvm_parent_processor_id_t uvm_parent_id_from_value(NvU32 val)
{
uvm_processor_id_t ret = { .val = val };
uvm_parent_processor_id_t ret = { .val = val };
UVM_ID_CHECK_BOUNDS(ret);
UVM_PARENT_ID_CHECK_BOUNDS(ret);
return ret;
}
static uvm_gpu_id_t uvm_gpu_id_from_value(NvU32 val)
static uvm_parent_gpu_id_t uvm_parent_gpu_id_from_value(NvU32 val)
{
uvm_gpu_id_t ret = uvm_id_from_value(val);
uvm_parent_gpu_id_t ret = uvm_parent_id_from_value(val);
UVM_ASSERT(!UVM_ID_IS_CPU(ret));
UVM_ASSERT(!UVM_PARENT_ID_IS_CPU(ret));
return ret;
}
@@ -376,34 +380,34 @@ static uvm_global_gpu_id_t uvm_global_gpu_id_from_value(NvU32 val)
return ret;
}
// Create a GPU id from the given GPU id index (previously obtained via
// uvm_id_gpu_index)
static uvm_gpu_id_t uvm_gpu_id_from_index(NvU32 index)
// Create a parent GPU id from the given parent GPU id index (previously
// obtained via uvm_parent_id_gpu_index)
static uvm_parent_gpu_id_t uvm_parent_gpu_id_from_index(NvU32 index)
{
return uvm_gpu_id_from_value(index + UVM_ID_GPU0_VALUE);
return uvm_parent_gpu_id_from_value(index + UVM_PARENT_ID_GPU0_VALUE);
}
static uvm_processor_id_t uvm_id_next(uvm_processor_id_t id)
static uvm_parent_processor_id_t uvm_parent_id_next(uvm_parent_processor_id_t id)
{
++id.val;
UVM_ID_CHECK_BOUNDS(id);
UVM_PARENT_ID_CHECK_BOUNDS(id);
return id;
}
static uvm_gpu_id_t uvm_gpu_id_next(uvm_gpu_id_t id)
static uvm_parent_gpu_id_t uvm_parent_gpu_id_next(uvm_parent_gpu_id_t id)
{
UVM_ASSERT(UVM_ID_IS_GPU(id));
UVM_ASSERT(UVM_PARENT_ID_IS_GPU(id));
++id.val;
UVM_ID_CHECK_BOUNDS(id);
UVM_PARENT_ID_CHECK_BOUNDS(id);
return id;
}
// Same as uvm_gpu_id_from_index but for uvm_global_processor_id_t
// Same as uvm_parent_gpu_id_from_index but for uvm_global_processor_id_t
static uvm_global_gpu_id_t uvm_global_gpu_id_from_index(NvU32 index)
{
return uvm_global_gpu_id_from_value(index + UVM_GLOBAL_ID_GPU0_VALUE);
@@ -429,11 +433,11 @@ static uvm_global_gpu_id_t uvm_global_gpu_id_next(uvm_global_gpu_id_t id)
return id;
}
// This function returns the numerical value within [0, UVM_ID_MAX_PROCESSORS)
// of the given processor id
static NvU32 uvm_id_value(uvm_processor_id_t id)
// This function returns the numerical value within
// [0, UVM_PARENT_ID_MAX_PROCESSORS) of the given parent processor id.
static NvU32 uvm_parent_id_value(uvm_parent_processor_id_t id)
{
UVM_ASSERT(UVM_ID_IS_VALID(id));
UVM_ASSERT(UVM_PARENT_ID_IS_VALID(id));
return id.val;
}
@@ -448,12 +452,12 @@ static NvU32 uvm_global_id_value(uvm_global_processor_id_t id)
}
// This function returns the index of the given GPU id within the GPU id space
// [0, UVM_ID_MAX_GPUS)
static NvU32 uvm_id_gpu_index(uvm_gpu_id_t id)
// [0, UVM_PARENT_ID_MAX_GPUS)
static NvU32 uvm_parent_id_gpu_index(uvm_parent_gpu_id_t id)
{
UVM_ASSERT(UVM_ID_IS_GPU(id));
UVM_ASSERT(UVM_PARENT_ID_IS_GPU(id));
return id.val - UVM_ID_GPU0_VALUE;
return id.val - UVM_PARENT_ID_GPU0_VALUE;
}
// This function returns the index of the given GPU id within the GPU id space
@@ -465,61 +469,61 @@ static NvU32 uvm_global_id_gpu_index(const uvm_global_gpu_id_t id)
return id.val - UVM_GLOBAL_ID_GPU0_VALUE;
}
static NvU32 uvm_global_id_gpu_index_from_gpu_id(const uvm_gpu_id_t id)
static NvU32 uvm_global_id_gpu_index_from_parent_gpu_id(const uvm_parent_gpu_id_t id)
{
UVM_ASSERT(UVM_ID_IS_GPU(id));
UVM_ASSERT(UVM_PARENT_ID_IS_GPU(id));
return uvm_id_gpu_index(id) * UVM_ID_MAX_SUB_PROCESSORS;
return uvm_parent_id_gpu_index(id) * UVM_PARENT_ID_MAX_SUB_PROCESSORS;
}
static NvU32 uvm_id_gpu_index_from_global_gpu_id(const uvm_global_gpu_id_t id)
static NvU32 uvm_parent_id_gpu_index_from_global_gpu_id(const uvm_global_gpu_id_t id)
{
UVM_ASSERT(UVM_GLOBAL_ID_IS_GPU(id));
return uvm_global_id_gpu_index(id) / UVM_ID_MAX_SUB_PROCESSORS;
return uvm_global_id_gpu_index(id) / UVM_PARENT_ID_MAX_SUB_PROCESSORS;
}
static uvm_global_gpu_id_t uvm_global_gpu_id_from_gpu_id(const uvm_gpu_id_t id)
static uvm_global_gpu_id_t uvm_global_gpu_id_from_parent_gpu_id(const uvm_parent_gpu_id_t id)
{
UVM_ASSERT(UVM_ID_IS_GPU(id));
UVM_ASSERT(UVM_PARENT_ID_IS_GPU(id));
return uvm_global_gpu_id_from_index(uvm_global_id_gpu_index_from_gpu_id(id));
return uvm_global_gpu_id_from_index(uvm_global_id_gpu_index_from_parent_gpu_id(id));
}
static uvm_global_gpu_id_t uvm_global_gpu_id_from_parent_index(NvU32 index)
{
UVM_ASSERT(index < UVM_MAX_GPUS);
UVM_ASSERT(index < UVM_PARENT_ID_MAX_GPUS);
return uvm_global_gpu_id_from_gpu_id(uvm_gpu_id_from_value(index + UVM_GLOBAL_ID_GPU0_VALUE));
return uvm_global_gpu_id_from_parent_gpu_id(uvm_parent_gpu_id_from_value(index + UVM_GLOBAL_ID_GPU0_VALUE));
}
static uvm_global_gpu_id_t uvm_global_gpu_id_from_sub_processor_index(const uvm_gpu_id_t id, NvU32 sub_index)
static uvm_global_gpu_id_t uvm_global_gpu_id_from_sub_processor_index(const uvm_parent_gpu_id_t id, NvU32 sub_index)
{
NvU32 index;
UVM_ASSERT(sub_index < UVM_ID_MAX_SUB_PROCESSORS);
UVM_ASSERT(sub_index < UVM_PARENT_ID_MAX_SUB_PROCESSORS);
index = uvm_global_id_gpu_index_from_gpu_id(id) + sub_index;
index = uvm_global_id_gpu_index_from_parent_gpu_id(id) + sub_index;
return uvm_global_gpu_id_from_index(index);
}
static uvm_gpu_id_t uvm_gpu_id_from_global_gpu_id(const uvm_global_gpu_id_t id)
static uvm_parent_gpu_id_t uvm_parent_gpu_id_from_global_gpu_id(const uvm_global_gpu_id_t id)
{
UVM_ASSERT(UVM_GLOBAL_ID_IS_GPU(id));
return uvm_gpu_id_from_index(uvm_id_gpu_index_from_global_gpu_id(id));
return uvm_parent_gpu_id_from_index(uvm_parent_id_gpu_index_from_global_gpu_id(id));
}
static NvU32 uvm_global_id_sub_processor_index(const uvm_global_gpu_id_t id)
{
return uvm_global_id_gpu_index(id) % UVM_ID_MAX_SUB_PROCESSORS;
return uvm_global_id_gpu_index(id) % UVM_PARENT_ID_MAX_SUB_PROCESSORS;
}
UVM_PROCESSOR_MASK(uvm_processor_mask_t, \
uvm_processor_mask, \
UVM_ID_MAX_PROCESSORS, \
uvm_processor_id_t, \
uvm_id_from_value)
UVM_PARENT_ID_MAX_PROCESSORS, \
uvm_parent_processor_id_t, \
uvm_parent_id_from_value)
UVM_PROCESSOR_MASK(uvm_global_processor_mask_t, \
uvm_global_processor_mask, \
@@ -533,19 +537,19 @@ static bool uvm_processor_mask_gpu_subset(const uvm_processor_mask_t *subset, co
{
uvm_processor_mask_t subset_gpus;
uvm_processor_mask_copy(&subset_gpus, subset);
uvm_processor_mask_clear(&subset_gpus, UVM_ID_CPU);
uvm_processor_mask_clear(&subset_gpus, UVM_PARENT_ID_CPU);
return uvm_processor_mask_subset(&subset_gpus, mask);
}
#define for_each_id_in_mask(id, mask) \
for ((id) = uvm_processor_mask_find_first_id(mask); \
UVM_ID_IS_VALID(id); \
(id) = uvm_processor_mask_find_next_id((mask), uvm_id_next(id)))
UVM_PARENT_ID_IS_VALID(id); \
(id) = uvm_processor_mask_find_next_id((mask), uvm_parent_id_next(id)))
#define for_each_gpu_id_in_mask(gpu_id, mask) \
for ((gpu_id) = uvm_processor_mask_find_first_gpu_id((mask)); \
UVM_ID_IS_VALID(gpu_id); \
(gpu_id) = uvm_processor_mask_find_next_id((mask), uvm_gpu_id_next(gpu_id)))
UVM_PARENT_ID_IS_VALID(gpu_id); \
(gpu_id) = uvm_processor_mask_find_next_id((mask), uvm_parent_gpu_id_next(gpu_id)))
#define for_each_global_id_in_mask(id, mask) \
for ((id) = uvm_global_processor_mask_find_first_id(mask); \
@@ -559,21 +563,36 @@ static bool uvm_processor_mask_gpu_subset(const uvm_processor_mask_t *subset, co
// Helper to iterate over all valid gpu ids
#define for_each_gpu_id(i) \
for (i = uvm_gpu_id_from_value(UVM_ID_GPU0_VALUE); UVM_ID_IS_VALID(i); i = uvm_gpu_id_next(i))
for (i = uvm_parent_gpu_id_from_value(UVM_PARENT_ID_GPU0_VALUE); UVM_PARENT_ID_IS_VALID(i); i = uvm_parent_gpu_id_next(i))
#define for_each_global_gpu_id(i) \
for (i = uvm_global_gpu_id_from_value(UVM_GLOBAL_ID_GPU0_VALUE); UVM_GLOBAL_ID_IS_VALID(i); i = uvm_global_gpu_id_next(i))
#define for_each_global_sub_processor_id_in_gpu(id, i) \
for (i = uvm_global_gpu_id_from_gpu_id(id); \
for (i = uvm_global_gpu_id_from_parent_gpu_id(id); \
UVM_GLOBAL_ID_IS_VALID(i) && \
(uvm_global_id_value(i) < uvm_global_id_value(uvm_global_gpu_id_from_gpu_id(id)) + UVM_ID_MAX_SUB_PROCESSORS); \
(uvm_global_id_value(i) < uvm_global_id_value(uvm_global_gpu_id_from_parent_gpu_id(id)) + UVM_PARENT_ID_MAX_SUB_PROCESSORS); \
i = uvm_global_gpu_id_next(i))
// Helper to iterate over all valid gpu ids
#define for_each_processor_id(i) for (i = UVM_ID_CPU; UVM_ID_IS_VALID(i); i = uvm_id_next(i))
#define for_each_processor_id(i) for (i = UVM_PARENT_ID_CPU; UVM_PARENT_ID_IS_VALID(i); i = uvm_parent_id_next(i))
#define for_each_global_id(i) for (i = UVM_GLOBAL_ID_CPU; UVM_GLOBAL_ID_IS_VALID(i); i = uvm_global_id_next(i))
// Find the node in mask with the shorted distance (as returned by
// node_distance) for src.
// Note that the search is inclusive of src.
// If mask has no bits set, NUMA_NO_NODE is returned.
int uvm_find_closest_node_mask(int src, const nodemask_t *mask);
// Iterate over all nodes in mask with increasing distance from src.
// Note that this iterator is destructive of the mask.
#define for_each_closest_uvm_node(nid, src, mask) \
for ((nid) = uvm_find_closest_node_mask((src), &(mask)); \
(nid) != NUMA_NO_NODE; \
node_clear((nid), (mask)), (nid) = uvm_find_closest_node_mask((src), &(mask)))
#define for_each_possible_uvm_node(nid) for_each_node_mask((nid), node_possible_map)
static bool uvm_processor_uuid_eq(const NvProcessorUuid *uuid1, const NvProcessorUuid *uuid2)
{
return memcmp(uuid1, uuid2, sizeof(*uuid1)) == 0;
@@ -585,4 +604,78 @@ static void uvm_processor_uuid_copy(NvProcessorUuid *dst, const NvProcessorUuid
memcpy(dst, src, sizeof(*dst));
}
// TODO: Bug 4195538: [uvm][multi-SMC] Get UVM internal data structures ready to
// meet multi-SMC requirements. Temporary aliases, they must be removed once
// the data structures are converted.
typedef uvm_parent_processor_id_t uvm_processor_id_t;
typedef uvm_parent_gpu_id_t uvm_gpu_id_t;
#define UVM_ID_CPU_VALUE UVM_PARENT_ID_CPU_VALUE
#define UVM_ID_GPU0_VALUE UVM_PARENT_ID_GPU0_VALUE
#define UVM_ID_MAX_GPUS UVM_PARENT_ID_MAX_GPUS
#define UVM_ID_MAX_PROCESSORS UVM_PARENT_ID_MAX_PROCESSORS
#define UVM_ID_MAX_SUB_PROCESSORS UVM_PARENT_ID_MAX_SUB_PROCESSORS
#define UVM_ID_CPU UVM_PARENT_ID_CPU
#define UVM_ID_INVALID UVM_PARENT_ID_INVALID
static int uvm_id_cmp(uvm_parent_processor_id_t id1, uvm_parent_processor_id_t id2)
{
return UVM_CMP_DEFAULT(id1.val, id2.val);
}
static bool uvm_id_equal(uvm_parent_processor_id_t id1, uvm_parent_processor_id_t id2)
{
return uvm_parent_id_equal(id1, id2);
}
#define UVM_ID_IS_CPU(id) uvm_id_equal(id, UVM_ID_CPU)
#define UVM_ID_IS_INVALID(id) uvm_id_equal(id, UVM_ID_INVALID)
#define UVM_ID_IS_VALID(id) (!UVM_ID_IS_INVALID(id))
#define UVM_ID_IS_GPU(id) (!UVM_ID_IS_CPU(id) && !UVM_ID_IS_INVALID(id))
static uvm_parent_gpu_id_t uvm_gpu_id_from_value(NvU32 val)
{
return uvm_parent_gpu_id_from_value(val);
}
static NvU32 uvm_id_value(uvm_parent_processor_id_t id)
{
return uvm_parent_id_value(id);
}
static NvU32 uvm_id_gpu_index(uvm_parent_gpu_id_t id)
{
return uvm_parent_id_gpu_index(id);
}
static NvU32 uvm_id_gpu_index_from_global_gpu_id(const uvm_global_gpu_id_t id)
{
return uvm_parent_id_gpu_index_from_global_gpu_id(id);
}
static uvm_parent_gpu_id_t uvm_gpu_id_from_index(NvU32 index)
{
return uvm_parent_gpu_id_from_index(index);
}
static uvm_parent_gpu_id_t uvm_gpu_id_next(uvm_parent_gpu_id_t id)
{
return uvm_parent_gpu_id_next(id);
}
static uvm_parent_gpu_id_t uvm_gpu_id_from_global_gpu_id(const uvm_global_gpu_id_t id)
{
return uvm_parent_gpu_id_from_global_gpu_id(id);
}
static NvU32 uvm_global_id_gpu_index_from_gpu_id(const uvm_parent_gpu_id_t id)
{
return uvm_global_id_gpu_index_from_parent_gpu_id(id);
}
static uvm_global_gpu_id_t uvm_global_gpu_id_from_gpu_id(const uvm_parent_gpu_id_t id)
{
return uvm_global_gpu_id_from_parent_gpu_id(id);
}
#endif

View File

@@ -106,26 +106,6 @@ static NV_STATUS uvm_test_nv_kthread_q(UVM_TEST_NV_KTHREAD_Q_PARAMS *params, str
return NV_ERR_INVALID_STATE;
}
static NV_STATUS uvm_test_numa_get_closest_cpu_node_to_gpu(UVM_TEST_NUMA_GET_CLOSEST_CPU_NODE_TO_GPU_PARAMS *params,
struct file *filp)
{
uvm_gpu_t *gpu;
NV_STATUS status;
uvm_rm_user_object_t user_rm_va_space = {
.rm_control_fd = -1,
.user_client = params->client,
.user_object = params->smc_part_ref
};
status = uvm_gpu_retain_by_uuid(&params->gpu_uuid, &user_rm_va_space, &gpu);
if (status != NV_OK)
return status;
params->node_id = gpu->parent->closest_cpu_numa_node;
uvm_gpu_release(gpu);
return NV_OK;
}
// Callers of this function should ensure that node is not NUMA_NO_NODE in order
// to avoid overrunning the kernel's node to cpumask map.
static NV_STATUS uvm_test_verify_bh_affinity(uvm_intr_handler_t *isr, int node)
@@ -307,8 +287,6 @@ long uvm_test_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_DRAIN_REPLAYABLE_FAULTS, uvm_test_drain_replayable_faults);
UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PMA_GET_BATCH_SIZE, uvm_test_pma_get_batch_size);
UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PMM_QUERY_PMA_STATS, uvm_test_pmm_query_pma_stats);
UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_NUMA_GET_CLOSEST_CPU_NODE_TO_GPU,
uvm_test_numa_get_closest_cpu_node_to_gpu);
UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_NUMA_CHECK_AFFINITY, uvm_test_numa_check_affinity);
UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_VA_SPACE_ADD_DUMMY_THREAD_CONTEXTS,
uvm_test_va_space_add_dummy_thread_contexts);

View File

@@ -561,6 +561,22 @@ typedef struct
// user_pages_allocation_retry_force_count, but the injection point simulates
// driver metadata allocation failure.
//
// cpu_chunk_allocation_target_id and cpu_chunk_allocation_actual_id are used
// to control the NUMA node IDs for CPU chunk allocations, specifically for
// testing overlapping CPU chunk allocations.
//
// Currently, uvm_api_migrate() does not pass the preferred CPU NUMA node to for
// managed memory so it is not possible to request a specific node.
// cpu_chunk_allocation_target_id is used to request the allocation be made on
// specific node. On the other hand, cpu_chunk_allocation_actual_id is the node
// on which the allocation will actually be made.
//
// The two parameters can be used to force a CPU chunk allocation to overlap a
// previously allocated chunk.
//
// Please note that even when specifying cpu_cpu_allocation_actual_id, the
// kernel may end up allocating on a different node.
//
// Error returns:
// NV_ERR_INVALID_ADDRESS
// - lookup_address doesn't match a UVM range
@@ -571,6 +587,8 @@ typedef struct
NvU32 page_table_allocation_retry_force_count; // In
NvU32 user_pages_allocation_retry_force_count; // In
NvU32 cpu_chunk_allocation_size_mask; // In
NvS32 cpu_chunk_allocation_target_id; // In
NvS32 cpu_chunk_allocation_actual_id; // In
NvU32 cpu_pages_allocation_error_count; // In
NvBool eviction_error; // In
NvBool populate_error; // In
@@ -604,6 +622,10 @@ typedef struct
NvProcessorUuid resident_on[UVM_MAX_PROCESSORS]; // Out
NvU32 resident_on_count; // Out
// If the memory is resident on the CPU, the NUMA node on which the page
// is resident. Otherwise, -1.
NvS32 resident_nid; // Out
// The size of the physical allocation backing lookup_address. Only the
// system-page-sized portion of this allocation which contains
// lookup_address is guaranteed to be resident on the corresponding
@@ -1168,19 +1190,6 @@ typedef struct
NV_STATUS rmStatus; // Out
} UVM_TEST_PMM_QUERY_PMA_STATS_PARAMS;
#define UVM_TEST_NUMA_GET_CLOSEST_CPU_NODE_TO_GPU UVM_TEST_IOCTL_BASE(77)
typedef struct
{
NvProcessorUuid gpu_uuid; // In
NvHandle client; // In
NvHandle smc_part_ref; // In
// On kernels with NUMA support, this entry contains the closest CPU NUMA
// node to this GPU. Otherwise, the value will be -1.
NvS32 node_id; // Out
NV_STATUS rmStatus; // Out
} UVM_TEST_NUMA_GET_CLOSEST_CPU_NODE_TO_GPU_PARAMS;
// Test whether the bottom halves have run on the correct CPUs based on the
// NUMA node locality of the GPU.
//

File diff suppressed because it is too large Load Diff

View File

@@ -44,6 +44,7 @@
#include <linux/mmu_notifier.h>
#include <linux/wait.h>
#include <linux/nodemask.h>
// VA blocks are the leaf nodes in the uvm_va_space tree for managed allocations
// (VA ranges with type == UVM_VA_RANGE_TYPE_MANAGED):
@@ -229,6 +230,42 @@ typedef struct
} uvm_va_block_gpu_state_t;
typedef struct
{
// Per-page residency bit vector, used for fast traversal of resident
// pages.
//
// A set bit means the CPU has a coherent copy of the physical page
// resident in the NUMA node's memory, and that a CPU chunk for the
// corresponding page index has been allocated. This does not mean that
// the coherent copy is currently mapped anywhere, however. A page may be
// resident on multiple processors (but not multiple CPU NUMA nodes) when in
// read-duplicate mode.
//
// A cleared bit means the CPU NUMA node does not have a coherent copy of
// that page resident. A CPU chunk for the corresponding page index may or
// may not have been allocated. If the chunk is present, it's a cached chunk
// which can be reused in the future.
//
// Allocating PAGES_PER_UVM_VA_BLOCK is overkill when the block is
// smaller than UVM_VA_BLOCK_SIZE, but it's not much extra memory
// overhead on the whole.
uvm_page_mask_t resident;
// Per-page allocation bit vector.
//
// A set bit means that a CPU chunk has been allocated for the
// corresponding page index on this NUMA node.
uvm_page_mask_t allocated;
// CPU memory chunks represent physically contiguous CPU memory
// allocations. See uvm_pmm_sysmem.h for more details on CPU chunks.
// This member is meant to hold an opaque value indicating the CPU
// chunk storage method. For more details on CPU chunk storage,
// see uvm_cpu_chunk_storage_type_t in uvm_va_block.c.
unsigned long chunks;
} uvm_va_block_cpu_node_state_t;
// TODO: Bug 1766180: Worst-case we could have one of these per system page.
// Options:
// 1) Rely on the OOM killer to prevent the user from trying to do that
@@ -306,38 +343,30 @@ struct uvm_va_block_struct
struct
{
// Per-page residency bit vector, used for fast traversal of resident
// pages.
//
// A set bit means the CPU has a coherent copy of the physical page
// resident in its memory, and that the corresponding entry in the pages
// array is present. This does not mean that the coherent copy is
// currently mapped anywhere, however. A page may be resident on
// multiple processors when in read-duplicate mode.
//
// A cleared bit means the CPU does not have a coherent copy of that
// page resident. The corresponding entry in the pages array may or may
// not present. If the entry is present, it's a cached page which can be
// reused in the future.
//
// Allocating PAGES_PER_UVM_VA_BLOCK is overkill when the block is
// smaller than UVM_VA_BLOCK_SIZE, but it's not much extra memory
// overhead on the whole.
uvm_page_mask_t resident;
// CPU memory chunks represent physically contiguous CPU memory
// allocations. See uvm_pmm_sysmem.h for more details on CPU chunks.
// This member is meant to hold an opaque value indicating the CPU
// chunk storage method. For more details on CPU chunk storage,
// see uvm_cpu_chunk_storage_type_t in uvm_va_block.c.
unsigned long chunks;
// Per-NUMA node tracking of CPU allocations.
// This is a dense array with one entry per possible NUMA node.
uvm_va_block_cpu_node_state_t **node_state;
// Per-page allocation bit vector.
//
// A set bit means that a CPU page has been allocated for the
// corresponding page index.
// corresponding page index on at least one CPU NUMA node.
uvm_page_mask_t allocated;
// Per-page residency bit vector. See
// uvm_va_block_cpu_numa_state_t::resident for a detailed description.
// This mask is a cumulative mask (logical OR) of all
// uvm_va_block_cpu_node_state_t::resident masks. It is meant to be used
// only for fast testing of page residency when it matters only if the
// page is resident on the CPU.
//
// Note that this mask cannot be set directly as this will cause
// inconsistencies between this mask and the per-NUMA residency masks.
// In order to properly maintain consistency between the per-NUMA masks
// and this one, uvm_va_block_cpu_[set|clear]_residency_*() helpers
// should be used.
uvm_page_mask_t resident;
// Per-page mapping bit vectors, one per bit we need to track. These are
// used for fast traversal of valid mappings in the block. These contain
// all non-address bits needed to establish a virtual mapping on this
@@ -418,7 +447,8 @@ struct uvm_va_block_struct
uvm_page_mask_t read_duplicated_pages;
// Mask to keep track of the pages that are not mapped on any non-UVM-Lite
// processor.
// processor. This mask is not used for HMM because the CPU can map pages
// at any time without notifying the driver.
// 0: Page is definitely not mapped by any processors
// 1: Page may or may not be mapped by a processor
//
@@ -525,6 +555,13 @@ struct uvm_va_block_wrapper_struct
// a successful migration if this error flag is cleared.
NvU32 inject_cpu_pages_allocation_error_count;
// A NUMA node ID on which any CPU chunks will be allocated from.
// This will override any other setting and/or policy.
// Note that the kernel is still free to allocate from any of the
// nodes in the thread's policy.
int cpu_chunk_allocation_target_id;
int cpu_chunk_allocation_actual_id;
// Force the next eviction attempt on this block to fail. Used for
// testing only.
bool inject_eviction_error;
@@ -668,17 +705,12 @@ void uvm_va_block_context_free(uvm_va_block_context_t *va_block_context);
// Initialization of an already-allocated uvm_va_block_context_t.
//
// mm is used to initialize the value of va_block_context->mm. NULL is allowed.
static void uvm_va_block_context_init(uvm_va_block_context_t *va_block_context, struct mm_struct *mm)
{
UVM_ASSERT(va_block_context);
void uvm_va_block_context_init(uvm_va_block_context_t *va_block_context, struct mm_struct *mm);
// Write garbage into the VA Block context to ensure that the UVM code
// clears masks appropriately
if (UVM_IS_DEBUG())
memset(va_block_context, 0xff, sizeof(*va_block_context));
va_block_context->mm = mm;
}
// Return the preferred NUMA node ID for the block's policy.
// If the preferred node ID is NUMA_NO_NODE, the current NUMA node ID
// is returned.
int uvm_va_block_context_get_node(uvm_va_block_context_t *va_block_context);
// TODO: Bug 1766480: Using only page masks instead of a combination of regions
// and page masks could simplify the below APIs and their implementations
@@ -734,6 +766,9 @@ static void uvm_va_block_context_init(uvm_va_block_context_t *va_block_context,
// those masks. It is the caller's responsiblity to zero the masks or
// not first.
//
// va_block_context->make_resident.dest_nid is used to guide the NUMA node for
// CPU allocations.
//
// Notably any status other than NV_OK indicates that the block's lock might
// have been unlocked and relocked.
//
@@ -1377,8 +1412,14 @@ static uvm_va_block_test_t *uvm_va_block_get_test(uvm_va_block_t *va_block)
// Get the page residency mask for a processor if it's known to be there.
//
// If the processor is the CPU, the residency mask for the NUMA node ID
// specified by nid will be returned (see
// uvm_va_block_cpu_node_state_t::resident). If nid is NUMA_NO_NODE,
// the cumulative CPU residency mask will be returned (see
// uvm_va_block_t::cpu::resident).
//
// If the processor is a GPU, this will assert that GPU state is indeed present.
uvm_page_mask_t *uvm_va_block_resident_mask_get(uvm_va_block_t *block, uvm_processor_id_t processor);
uvm_page_mask_t *uvm_va_block_resident_mask_get(uvm_va_block_t *block, uvm_processor_id_t processor, int nid);
// Get the page mapped mask for a processor. The returned mask cannot be
// directly modified by the caller
@@ -1386,6 +1427,13 @@ uvm_page_mask_t *uvm_va_block_resident_mask_get(uvm_va_block_t *block, uvm_proce
// If the processor is a GPU, this will assert that GPU state is indeed present.
const uvm_page_mask_t *uvm_va_block_map_mask_get(uvm_va_block_t *block, uvm_processor_id_t processor);
// Return a mask of non-UVM-Lite pages that are unmapped within the given
// region.
// Locking: The block lock must be held.
void uvm_va_block_unmapped_pages_get(uvm_va_block_t *va_block,
uvm_va_block_region_t region,
uvm_page_mask_t *out_mask);
// VA block lookup functions. There are a number of permutations which might be
// useful, such as looking up the block from {va_space, va_range} x {addr,
// block index}. The ones implemented here and in uvm_va_range.h support the
@@ -1756,17 +1804,28 @@ static bool uvm_page_mask_full(const uvm_page_mask_t *mask)
return bitmap_full(mask->bitmap, PAGES_PER_UVM_VA_BLOCK);
}
static bool uvm_page_mask_and(uvm_page_mask_t *mask_out, const uvm_page_mask_t *mask_in1, const uvm_page_mask_t *mask_in2)
static void uvm_page_mask_fill(uvm_page_mask_t *mask)
{
bitmap_fill(mask->bitmap, PAGES_PER_UVM_VA_BLOCK);
}
static bool uvm_page_mask_and(uvm_page_mask_t *mask_out,
const uvm_page_mask_t *mask_in1,
const uvm_page_mask_t *mask_in2)
{
return bitmap_and(mask_out->bitmap, mask_in1->bitmap, mask_in2->bitmap, PAGES_PER_UVM_VA_BLOCK);
}
static bool uvm_page_mask_andnot(uvm_page_mask_t *mask_out, const uvm_page_mask_t *mask_in1, const uvm_page_mask_t *mask_in2)
static bool uvm_page_mask_andnot(uvm_page_mask_t *mask_out,
const uvm_page_mask_t *mask_in1,
const uvm_page_mask_t *mask_in2)
{
return bitmap_andnot(mask_out->bitmap, mask_in1->bitmap, mask_in2->bitmap, PAGES_PER_UVM_VA_BLOCK);
}
static void uvm_page_mask_or(uvm_page_mask_t *mask_out, const uvm_page_mask_t *mask_in1, const uvm_page_mask_t *mask_in2)
static void uvm_page_mask_or(uvm_page_mask_t *mask_out,
const uvm_page_mask_t *mask_in1,
const uvm_page_mask_t *mask_in2)
{
bitmap_or(mask_out->bitmap, mask_in1->bitmap, mask_in2->bitmap, PAGES_PER_UVM_VA_BLOCK);
}
@@ -2036,30 +2095,49 @@ uvm_processor_id_t uvm_va_block_page_get_closest_resident(uvm_va_block_t *va_blo
uvm_page_index_t page_index,
uvm_processor_id_t processor);
// Mark CPU page page_index as resident on NUMA node specified by nid.
// nid cannot be NUMA_NO_NODE.
void uvm_va_block_cpu_set_resident_page(uvm_va_block_t *va_block, int nid, uvm_page_index_t page_index);
// Test if a CPU page is resident on NUMA node nid. If nid is NUMA_NO_NODE,
// the function will return True if the page is resident on any CPU NUMA node.
bool uvm_va_block_cpu_is_page_resident_on(uvm_va_block_t *va_block, int nid, uvm_page_index_t page_index);
// Test if all pages in region are resident on NUMA node nid. If nid is
// NUMA_NO_NODE, the function will test if the pages in the region are
// resident on any CPU NUMA node.
bool uvm_va_block_cpu_is_region_resident_on(uvm_va_block_t *va_block, int nid, uvm_va_block_region_t region);
// Insert a CPU chunk at the given page_index into the va_block.
// Locking: The va_block lock must be held.
NV_STATUS uvm_cpu_chunk_insert_in_block(uvm_va_block_t *va_block,
uvm_cpu_chunk_t *chunk,
uvm_page_index_t page_index);
NV_STATUS uvm_cpu_chunk_insert_in_block(uvm_va_block_t *va_block, uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index);
// Remove a CPU chunk at the given page_index from the va_block.
// nid cannot be NUMA_NO_NODE.
// Locking: The va_block lock must be held.
void uvm_cpu_chunk_remove_from_block(uvm_va_block_t *va_block,
uvm_page_index_t page_index);
void uvm_cpu_chunk_remove_from_block(uvm_va_block_t *va_block, int nid, uvm_page_index_t page_index);
// Return the CPU chunk at the given page_index from the va_block.
// Return the CPU chunk at the given page_index on the given NUMA node from the
// va_block. nid cannot be NUMA_NO_NODE.
// Locking: The va_block lock must be held.
uvm_cpu_chunk_t *uvm_cpu_chunk_get_chunk_for_page(uvm_va_block_t *va_block,
int nid,
uvm_page_index_t page_index);
// Return the CPU chunk at the given page_index from the va_block.
// Return the struct page * from the chunk corresponding to the given page_index
// Locking: The va_block lock must be held.
struct page *uvm_cpu_chunk_get_cpu_page(uvm_va_block_t *va_block,
uvm_page_index_t page_index);
struct page *uvm_cpu_chunk_get_cpu_page(uvm_va_block_t *va_block, uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index);
// Return the struct page * of the resident chunk at the given page_index from
// the va_block. The given page_index must be resident on the CPU.
// Locking: The va_block lock must be held.
struct page *uvm_va_block_get_cpu_page(uvm_va_block_t *va_block, uvm_page_index_t page_index);
// Physically map a CPU chunk so it is DMA'able from all registered GPUs.
// nid cannot be NUMA_NO_NODE.
// Locking: The va_block lock must be held.
NV_STATUS uvm_va_block_map_cpu_chunk_on_gpus(uvm_va_block_t *va_block,
uvm_cpu_chunk_t *chunk,
uvm_page_index_t page_index);
// Physically unmap a CPU chunk from all registered GPUs.

View File

@@ -30,6 +30,7 @@
#include "uvm_forward_decl.h"
#include <linux/migrate.h>
#include <linux/nodemask.h>
// UVM_VA_BLOCK_BITS is 21, meaning the maximum block size is 2MB. Rationale:
// - 2MB matches the largest Pascal GPU page size so it's a natural fit
@@ -145,6 +146,18 @@ typedef struct
unsigned count;
} uvm_prot_page_mask_array_t[UVM_PROT_MAX - 1];
typedef struct
{
// A per-NUMA-node array of page masks (size num_possible_nodes()) that hold
// the set of CPU pages used by the migration operation.
uvm_page_mask_t **node_masks;
// Node mask used to iterate over the page masks above.
// If a node's bit is set, it means that the page mask given by
// node_to_index() in node_masks has set pages.
nodemask_t nodes;
} uvm_make_resident_page_tracking_t;
// In the worst case some VA block operations require more state than we should
// reasonably store on the stack. Instead, we dynamically allocate VA block
// contexts. These are used for almost all operations on VA blocks.
@@ -159,6 +172,9 @@ typedef struct
// this block_context.
uvm_page_mask_t scratch_page_mask;
// Scratch node mask. This follows the same rules as scratch_page_mask;
nodemask_t scratch_node_mask;
// State used by uvm_va_block_make_resident
struct uvm_make_resident_context_struct
{
@@ -181,10 +197,24 @@ typedef struct
// Used to perform ECC checks after the migration is done.
uvm_processor_mask_t all_involved_processors;
// Page mask used to compute the set of CPU pages for each CPU node.
uvm_page_mask_t node_pages_mask;
// Final residency for the data. This is useful for callees to know if
// a migration is part of a staging copy
uvm_processor_id_t dest_id;
// Final residency NUMA node if the migration destination is the CPU.
int dest_nid;
// This structure is used to track CPU pages used for migrations on
// a per-NUMA node basis.
//
// The pages could be used for either migrations to the CPU (used to
// track the destination CPU pages) or staging copies (used to track
// the CPU pages used for the staging).
uvm_make_resident_page_tracking_t cpu_pages_used;
// Event that triggered the call
uvm_make_resident_cause_t cause;
} make_resident;

View File

@@ -31,6 +31,7 @@
const uvm_va_policy_t uvm_va_policy_default = {
.preferred_location = UVM_ID_INVALID,
.preferred_nid = NUMA_NO_NODE,
.read_duplication = UVM_READ_DUPLICATION_UNSET,
};

View File

@@ -24,6 +24,7 @@
#ifndef __UVM_VA_POLICY_H__
#define __UVM_VA_POLICY_H__
#include <linux/numa.h>
#include "uvm_linux.h"
#include "uvm_forward_decl.h"
#include "uvm_processors.h"
@@ -62,6 +63,18 @@ struct uvm_va_policy_struct
// This is set to UVM_ID_INVALID if no preferred location is set.
uvm_processor_id_t preferred_location;
// If the preferred location is the CPU, this is either the preferred NUMA
// node ID or NUMA_NO_NODE to indicate that there is no preference among
// nodes.
// If preferred_location is a GPU, preferred_nid will be used if CPU
// pages have to be allocated for any staging copies. Otherwise, it is
// not used.
//
// TODO: Bug 4148100 - Preferred_location and preferred_nid should be
// combined into a new type that combines the processor and NUMA node
// ID.
int preferred_nid;
// Mask of processors that are accessing this VA range and should have
// their page tables updated to access the (possibly remote) pages.
uvm_processor_mask_t accessed_by;
@@ -193,7 +206,8 @@ uvm_va_policy_node_t *uvm_va_policy_node_iter_next(uvm_va_block_t *va_block, uvm
for ((node) = uvm_va_policy_node_iter_first((va_block), (start), (end)), \
(next) = uvm_va_policy_node_iter_next((va_block), (node), (end)); \
(node); \
(node) = (next))
(node) = (next), \
(next) = uvm_va_policy_node_iter_next((va_block), (node), (end)))
// Returns the first policy in the range [start, end], if any.
// Locking: The va_block lock must be held.

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2015-2023 NVIDIA Corporation
Copyright (c) 2015-2022 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -162,9 +162,7 @@ static uvm_va_range_t *uvm_va_range_alloc_managed(uvm_va_space_t *va_space, NvU6
goto error;
va_range->type = UVM_VA_RANGE_TYPE_MANAGED;
uvm_va_range_get_policy(va_range)->read_duplication = UVM_READ_DUPLICATION_UNSET;
uvm_va_range_get_policy(va_range)->preferred_location = UVM_ID_INVALID;
va_range->managed.policy = uvm_va_policy_default;
va_range->blocks = uvm_kvmalloc_zero(uvm_va_range_num_blocks(va_range) * sizeof(va_range->blocks[0]));
if (!va_range->blocks) {
@@ -376,7 +374,7 @@ NV_STATUS uvm_va_range_create_semaphore_pool(uvm_va_space_t *va_space,
if (status != NV_OK)
goto error;
if (i == 0 && g_uvm_global.conf_computing_enabled)
if (i == 0 && g_uvm_global.sev_enabled)
mem_alloc_params.dma_owner = gpu;
if (attrs.is_cacheable) {
@@ -835,7 +833,7 @@ static void uvm_va_range_disable_peer_external(uvm_va_range_t *va_range,
range_tree = uvm_ext_gpu_range_tree(va_range, mapping_gpu);
uvm_mutex_lock(&range_tree->lock);
uvm_ext_gpu_map_for_each_safe(ext_map, ext_map_next, va_range, mapping_gpu) {
if (ext_map->owning_gpu == owning_gpu && !ext_map->is_sysmem) {
if (ext_map->owning_gpu == owning_gpu && (!ext_map->is_sysmem || ext_map->is_egm)) {
UVM_ASSERT(deferred_free_list);
uvm_ext_gpu_map_destroy(va_range, ext_map, deferred_free_list);
}
@@ -1807,7 +1805,7 @@ NV_STATUS uvm_api_alloc_semaphore_pool(UVM_ALLOC_SEMAPHORE_POOL_PARAMS *params,
if (params->gpuAttributesCount > UVM_MAX_GPUS)
return NV_ERR_INVALID_ARGUMENT;
if (g_uvm_global.conf_computing_enabled && params->gpuAttributesCount == 0)
if (g_uvm_global.sev_enabled && params->gpuAttributesCount == 0)
return NV_ERR_INVALID_ARGUMENT;
// The mm needs to be locked in order to remove stale HMM va_blocks.

View File

@@ -189,6 +189,7 @@ typedef struct
// sysmem was originally allocated under. For the allocation to remain valid
// we need to prevent the GPU from going away, similarly to P2P mapped
// memory.
// Similarly for EGM memory.
//
// This field is not used for sparse mappings as they don't have an
// allocation and, hence, owning GPU.
@@ -208,6 +209,9 @@ typedef struct
// backing.
bool is_sysmem;
// EGM memory. If true is_sysmem also has to be true and owning_gpu
// has to be valid.
bool is_egm;
// GPU page tables mapping the allocation
uvm_page_table_range_vec_t pt_range_vec;

View File

@@ -222,6 +222,12 @@ NV_STATUS uvm_va_space_create(struct address_space *mapping, uvm_va_space_t **va
uvm_down_write_mmap_lock(current->mm);
uvm_va_space_down_write(va_space);
va_space->va_block_context = uvm_va_block_context_alloc(NULL);
if (!va_space->va_block_context) {
status = NV_ERR_NO_MEMORY;
goto fail;
}
status = uvm_perf_init_va_space_events(va_space, &va_space->perf_events);
if (status != NV_OK)
goto fail;
@@ -258,6 +264,7 @@ NV_STATUS uvm_va_space_create(struct address_space *mapping, uvm_va_space_t **va
fail:
uvm_perf_heuristics_unload(va_space);
uvm_perf_destroy_va_space_events(&va_space->perf_events);
uvm_va_block_context_free(va_space->va_block_context);
uvm_va_space_up_write(va_space);
uvm_up_write_mmap_lock(current->mm);
@@ -457,8 +464,6 @@ void uvm_va_space_destroy(uvm_va_space_t *va_space)
uvm_va_range_destroy(va_range, &deferred_free_list);
}
uvm_hmm_va_space_destroy(va_space);
uvm_range_group_radix_tree_destroy(va_space);
// Unregister all GPUs in the VA space. Note that this does not release the
@@ -466,11 +471,17 @@ void uvm_va_space_destroy(uvm_va_space_t *va_space)
for_each_va_space_gpu(gpu, va_space)
unregister_gpu(va_space, gpu, NULL, &deferred_free_list, NULL);
uvm_hmm_va_space_destroy(va_space);
uvm_perf_heuristics_unload(va_space);
uvm_perf_destroy_va_space_events(&va_space->perf_events);
va_space_remove_dummy_thread_contexts(va_space);
// Destroy the VA space's block context node tracking after all ranges have
// been destroyed as the VA blocks may reference it.
uvm_va_block_context_free(va_space->va_block_context);
uvm_va_space_up_write(va_space);
UVM_ASSERT(uvm_processor_mask_empty(&va_space->registered_gpus));
@@ -688,7 +699,7 @@ NV_STATUS uvm_va_space_register_gpu(uvm_va_space_t *va_space,
// Mixing coherent and non-coherent GPUs is not supported
for_each_va_space_gpu(other_gpu, va_space) {
if (uvm_gpu_is_coherent(gpu->parent) != uvm_gpu_is_coherent(other_gpu->parent)) {
if (uvm_parent_gpu_is_coherent(gpu->parent) != uvm_parent_gpu_is_coherent(other_gpu->parent)) {
status = NV_ERR_INVALID_DEVICE;
goto done;
}
@@ -729,7 +740,7 @@ NV_STATUS uvm_va_space_register_gpu(uvm_va_space_t *va_space,
processor_mask_array_set(va_space->has_nvlink, UVM_ID_CPU, gpu->id);
}
if (uvm_gpu_is_coherent(gpu->parent)) {
if (uvm_parent_gpu_is_coherent(gpu->parent)) {
processor_mask_array_set(va_space->has_native_atomics, gpu->id, UVM_ID_CPU);
if (gpu->mem_info.numa.enabled) {
@@ -1540,7 +1551,6 @@ static void remove_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space,
atomic_inc(&va_space->gpu_va_space_deferred_free.num_pending);
uvm_processor_mask_clear(&va_space->registered_gpu_va_spaces, gpu_va_space->gpu->id);
uvm_processor_mask_clear_atomic(&va_space->needs_fault_buffer_flush, gpu_va_space->gpu->id);
va_space->gpu_va_spaces[uvm_id_gpu_index(gpu_va_space->gpu->id)] = NULL;
gpu_va_space->state = UVM_GPU_VA_SPACE_STATE_DEAD;
}
@@ -1610,14 +1620,14 @@ NV_STATUS uvm_va_space_unregister_gpu_va_space(uvm_va_space_t *va_space, const N
return status;
}
bool uvm_va_space_peer_enabled(uvm_va_space_t *va_space, uvm_gpu_t *gpu1, uvm_gpu_t *gpu2)
bool uvm_va_space_peer_enabled(uvm_va_space_t *va_space, const uvm_gpu_t *gpu0, const uvm_gpu_t *gpu1)
{
size_t table_index;
UVM_ASSERT(uvm_processor_mask_test(&va_space->registered_gpus, gpu0->id));
UVM_ASSERT(uvm_processor_mask_test(&va_space->registered_gpus, gpu1->id));
UVM_ASSERT(uvm_processor_mask_test(&va_space->registered_gpus, gpu2->id));
table_index = uvm_gpu_peer_table_index(gpu1->id, gpu2->id);
table_index = uvm_gpu_peer_table_index(gpu0->id, gpu1->id);
return !!test_bit(table_index, va_space->enabled_peers);
}
@@ -2073,9 +2083,16 @@ NV_STATUS uvm_service_block_context_init(void)
// Pre-allocate some fault service contexts for the CPU and add them to the global list
while (num_preallocated_contexts-- > 0) {
uvm_service_block_context_t *service_context = uvm_kvmalloc(sizeof(*service_context));
if (!service_context)
return NV_ERR_NO_MEMORY;
service_context->block_context = uvm_va_block_context_alloc(NULL);
if (!service_context->block_context) {
uvm_kvfree(service_context);
return NV_ERR_NO_MEMORY;
}
list_add(&service_context->cpu_fault.service_context_list, &g_cpu_service_block_context_list);
}
@@ -2089,6 +2106,7 @@ void uvm_service_block_context_exit(void)
// Free fault service contexts for the CPU and add clear the global list
list_for_each_entry_safe(service_context, service_context_tmp, &g_cpu_service_block_context_list,
cpu_fault.service_context_list) {
uvm_va_block_context_free(service_context->block_context);
uvm_kvfree(service_context);
}
INIT_LIST_HEAD(&g_cpu_service_block_context_list);
@@ -2110,8 +2128,17 @@ static uvm_service_block_context_t *service_block_context_cpu_alloc(void)
uvm_spin_unlock(&g_cpu_service_block_context_list_lock);
if (!service_context)
if (!service_context) {
service_context = uvm_kvmalloc(sizeof(*service_context));
service_context->block_context = uvm_va_block_context_alloc(NULL);
if (!service_context->block_context) {
uvm_kvfree(service_context);
service_context = NULL;
}
}
else {
uvm_va_block_context_init(service_context->block_context, NULL);
}
return service_context;
}
@@ -2137,6 +2164,7 @@ static vm_fault_t uvm_va_space_cpu_fault(uvm_va_space_t *va_space,
NV_STATUS status = uvm_global_get_status();
bool tools_enabled;
bool major_fault = false;
bool is_remote_mm = false;
uvm_service_block_context_t *service_context;
uvm_global_processor_mask_t gpus_to_check_for_ecc;
@@ -2177,7 +2205,7 @@ static vm_fault_t uvm_va_space_cpu_fault(uvm_va_space_t *va_space,
// mmap_lock held on the CPU fault path, so tell the fault handler to use
// that one. current->mm might differ if we're on the access_process_vm
// (ptrace) path or if another driver is calling get_user_pages.
service_context->block_context.mm = vma->vm_mm;
service_context->block_context->mm = vma->vm_mm;
// The mmap_lock might be held in write mode, but the mode doesn't matter
// for the purpose of lock ordering and we don't rely on it being in write
@@ -2216,25 +2244,32 @@ static vm_fault_t uvm_va_space_cpu_fault(uvm_va_space_t *va_space,
uvm_tools_record_throttling_end(va_space, fault_addr, UVM_ID_CPU);
if (is_hmm) {
// Note that normally we should find a va_block for the faulting
// address because the block had to be created when migrating a
// page to the GPU and a device private PTE inserted into the CPU
// page tables in order for migrate_to_ram() to be called. Not
// finding it means the PTE was remapped to a different virtual
// address with mremap() so create a new va_block if needed.
status = uvm_hmm_va_block_find_create(va_space,
fault_addr,
&service_context->block_context.hmm.vma,
&va_block);
if (status != NV_OK)
break;
if (va_space->va_space_mm.mm == vma->vm_mm) {
// Note that normally we should find a va_block for the faulting
// address because the block had to be created when migrating a
// page to the GPU and a device private PTE inserted into the CPU
// page tables in order for migrate_to_ram() to be called. Not
// finding it means the PTE was remapped to a different virtual
// address with mremap() so create a new va_block if needed.
status = uvm_hmm_va_block_find_create(va_space,
fault_addr,
&service_context->block_context->hmm.vma,
&va_block);
if (status != NV_OK)
break;
UVM_ASSERT(service_context->block_context.hmm.vma == vma);
status = uvm_hmm_migrate_begin(va_block);
if (status != NV_OK)
break;
UVM_ASSERT(service_context->block_context->hmm.vma == vma);
status = uvm_hmm_migrate_begin(va_block);
if (status != NV_OK)
break;
service_context->cpu_fault.vmf = vmf;
service_context->cpu_fault.vmf = vmf;
}
else {
is_remote_mm = true;
status = uvm_hmm_remote_cpu_fault(vmf);
break;
}
}
else {
status = uvm_va_block_find_create_managed(va_space, fault_addr, &va_block);
@@ -2265,7 +2300,7 @@ static vm_fault_t uvm_va_space_cpu_fault(uvm_va_space_t *va_space,
tools_enabled = va_space->tools.enabled;
if (status == NV_OK) {
if (status == NV_OK && !is_remote_mm) {
uvm_va_space_global_gpus_in_mask(va_space,
&gpus_to_check_for_ecc,
&service_context->cpu_fault.gpus_to_check_for_ecc);
@@ -2275,7 +2310,7 @@ static vm_fault_t uvm_va_space_cpu_fault(uvm_va_space_t *va_space,
uvm_va_space_up_read(va_space);
uvm_record_unlock_mmap_lock_read(vma->vm_mm);
if (status == NV_OK) {
if (status == NV_OK && !is_remote_mm) {
status = uvm_global_mask_check_ecc_error(&gpus_to_check_for_ecc);
uvm_global_mask_release(&gpus_to_check_for_ecc);
}

View File

@@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2015-2022 NVIDIA Corporation
Copyright (c) 2015-2023 NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@@ -253,17 +253,6 @@ struct uvm_va_space_struct
// corrupting state.
uvm_processor_mask_t gpu_unregister_in_progress;
// On VMA destruction, the fault buffer needs to be flushed for all the GPUs
// registered in the VA space to avoid leaving stale entries of the VA range
// that is going to be destroyed. Otherwise, these fault entries can be
// attributed to new VA ranges reallocated at the same addresses. However,
// uvm_vm_close is called with mm->mmap_lock taken and we cannot take the
// ISR lock. Therefore, we use a flag to notify the GPU fault handler that
// the fault buffer needs to be flushed, before servicing the faults that
// belong to the va_space. The bits are set and cleared atomically so no
// va_space lock is required.
uvm_processor_mask_t needs_fault_buffer_flush;
// Mask of processors that are participating in system-wide atomics
uvm_processor_mask_t system_wide_atomics_enabled_processors;
@@ -335,7 +324,7 @@ struct uvm_va_space_struct
// Block context used for GPU unmap operations so that allocation is not
// required on the teardown path. This can only be used while the VA space
// lock is held in write mode. Access using uvm_va_space_block_context().
uvm_va_block_context_t va_block_context;
uvm_va_block_context_t *va_block_context;
NvU64 initialization_flags;
@@ -541,7 +530,7 @@ void uvm_va_space_detach_all_user_channels(uvm_va_space_t *va_space, struct list
// Returns whether peer access between these two GPUs has been enabled in this
// VA space. Both GPUs must be registered in the VA space.
bool uvm_va_space_peer_enabled(uvm_va_space_t *va_space, uvm_gpu_t *gpu1, uvm_gpu_t *gpu2);
bool uvm_va_space_peer_enabled(uvm_va_space_t *va_space, const uvm_gpu_t *gpu0, const uvm_gpu_t *gpu1);
// Returns the va_space this file points to. Returns NULL if this file
// does not point to a va_space.
@@ -575,8 +564,8 @@ static uvm_va_block_context_t *uvm_va_space_block_context(uvm_va_space_t *va_spa
if (mm)
uvm_assert_mmap_lock_locked(mm);
uvm_va_block_context_init(&va_space->va_block_context, mm);
return &va_space->va_block_context;
uvm_va_block_context_init(va_space->va_block_context, mm);
return va_space->va_block_context;
}
// Retains the GPU VA space memory object. destroy_gpu_va_space and

Some files were not shown because too many files have changed in this diff Show More