This commit is contained in:
Maneet Singh
2025-06-16 19:28:19 -07:00
parent 30e15d79de
commit fade1f7b20
35 changed files with 50249 additions and 49935 deletions

View File

@@ -79,7 +79,7 @@ ccflags-y += -I$(src)/common/inc
ccflags-y += -I$(src)
ccflags-y += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args
ccflags-y += -D__KERNEL__ -DMODULE -DNVRM
ccflags-y += -DNV_VERSION_STRING=\"575.57.08\"
ccflags-y += -DNV_VERSION_STRING=\"575.64\"
ifneq ($(SYSSRCHOST1X),)
ccflags-y += -I$(SYSSRCHOST1X)

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2016-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -297,9 +297,21 @@ static inline struct rw_semaphore *nv_mmap_get_lock(struct mm_struct *mm)
#endif
}
#define NV_CAN_CALL_VMA_START_WRITE 1
#if !NV_CAN_CALL_VMA_START_WRITE
/*
* Commit 45ad9f5290dc updated vma_start_write() to call __vma_start_write().
*/
void nv_vma_start_write(struct vm_area_struct *);
#endif
static inline void nv_vm_flags_set(struct vm_area_struct *vma, vm_flags_t flags)
{
#if defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS)
#if !NV_CAN_CALL_VMA_START_WRITE
nv_vma_start_write(vma);
ACCESS_PRIVATE(vma, __vm_flags) |= flags;
#elif defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS)
vm_flags_set(vma, flags);
#else
vma->vm_flags |= flags;
@@ -308,7 +320,10 @@ static inline void nv_vm_flags_set(struct vm_area_struct *vma, vm_flags_t flags)
static inline void nv_vm_flags_clear(struct vm_area_struct *vma, vm_flags_t flags)
{
#if defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS)
#if !NV_CAN_CALL_VMA_START_WRITE
nv_vma_start_write(vma);
ACCESS_PRIVATE(vma, __vm_flags) &= ~flags;
#elif defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS)
vm_flags_clear(vma, flags);
#else
vma->vm_flags &= ~flags;

View File

@@ -7604,7 +7604,7 @@ compile_test() {
CODE="
#include <linux/mmzone.h>
int conftest_page_pgmap(void) {
return page_pgmap(NULL);
return page_pgmap();
}"
compile_check_conftest "$CODE" "NV_PAGE_PGMAP_PRESENT" "" "functions"

View File

@@ -38,6 +38,7 @@ NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_alpha_blending_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_fd_to_handle
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_handle_to_fd
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_timer_delete_sync
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl___vma_start_write
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_unref
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_reinit_primary_mode_group

View File

@@ -757,6 +757,20 @@ NV_STATUS uvm_ats_service_access_counters(uvm_gpu_va_space_t *gpu_va_space,
&ats_context->access_counters.accessed_mask,
&ats_context->prefetch_state.residency_mask);
// Pretend that pages that are already resident at the destination GPU were
// migrated now. This makes sure that the access counter is cleared even if
// the accessed pages, were already resident on the target.
// TODO: Bug 5296998: [uvm][ats] Not clearing stale access counter
// notifications can lead to missed migrations
// The same problem of stale notification exists for migration to other
// locations than local vidmem. However, stale notifications to data
// migrated to another remote location are identical to those triggered
// by accessing memory that cannot or should not be migrated.
if (uvm_id_equal(ats_context->residency_id, gpu_va_space->gpu->id)) {
uvm_page_mask_copy(&ats_context->access_counters.migrated_mask,
&ats_context->prefetch_state.residency_mask);
}
for_each_va_block_subregion_in_mask(subregion, &ats_context->access_counters.accessed_mask, region) {
NV_STATUS status;
NvU64 start = base + (subregion.first * PAGE_SIZE);
@@ -769,7 +783,7 @@ NV_STATUS uvm_ats_service_access_counters(uvm_gpu_va_space_t *gpu_va_space,
status = service_ats_requests(gpu_va_space, vma, start, length, access_type, service_type, ats_context);
// clear access counters if pages were migrated or migration needs to
// Clear access counters if pages were migrated or migration needs to
// be retried
if (status == NV_OK || status == NV_ERR_BUSY_RETRY)
uvm_page_mask_region_fill(migrated_mask, subregion);

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -847,3 +847,75 @@ void NV_API_CALL nv_set_safe_to_mmap_locked(
nvl->safe_to_mmap = safe_to_mmap;
}
#if !NV_CAN_CALL_VMA_START_WRITE
static NvBool nv_vma_enter_locked(struct vm_area_struct *vma, NvBool detaching)
{
NvU32 tgt_refcnt = VMA_LOCK_OFFSET;
NvBool interrupted = NV_FALSE;
if (!detaching)
{
tgt_refcnt++;
}
if (!refcount_add_not_zero(VMA_LOCK_OFFSET, &vma->vm_refcnt))
{
return NV_FALSE;
}
rwsem_acquire(&vma->vmlock_dep_map, 0, 0, _RET_IP_);
prepare_to_rcuwait(&vma->vm_mm->vma_writer_wait);
for (;;)
{
set_current_state(TASK_UNINTERRUPTIBLE);
if (refcount_read(&vma->vm_refcnt) == tgt_refcnt)
break;
if (signal_pending_state(TASK_UNINTERRUPTIBLE, current))
{
interrupted = NV_TRUE;
break;
}
schedule();
}
// This is an open-coded version of finish_rcuwait().
rcu_assign_pointer(vma->vm_mm->vma_writer_wait.task, NULL);
__set_current_state(TASK_RUNNING);
if (interrupted)
{
// Clean up on error: release refcount and dep_map
refcount_sub_and_test(VMA_LOCK_OFFSET, &vma->vm_refcnt);
rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
return NV_FALSE;
}
lock_acquired(&vma->vmlock_dep_map, _RET_IP_);
return NV_TRUE;
}
/*
* Helper function to handle VMA locking and refcount management.
*/
void nv_vma_start_write(struct vm_area_struct *vma)
{
NvU32 mm_lock_seq;
NvBool locked;
if (__is_vma_write_locked(vma, &mm_lock_seq))
return;
locked = nv_vma_enter_locked(vma, NV_FALSE);
WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
if (locked)
{
NvBool detached;
detached = refcount_sub_and_test(VMA_LOCK_OFFSET, &vma->vm_refcnt);
rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
WARN_ON_ONCE(detached);
}
}
EXPORT_SYMBOL(nv_vma_start_write);
#endif // !NV_CAN_CALL_VMA_START_WRITE

View File

@@ -240,6 +240,7 @@ NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl___platform_driver_regis
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present___platform_driver_register
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_hrtimer_setup
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_timer_delete_sync
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl___vma_start_write
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += swiotlb_dma_ops