535.261.03

This commit is contained in:
Bernhard Stoeckner
2025-07-17 17:13:07 +02:00
parent f468568958
commit 9c67f19366
37 changed files with 425 additions and 177 deletions

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -562,6 +562,9 @@ err:
void NV_API_CALL nv_cap_close_fd(int fd)
{
struct file *file;
NvBool is_nv_cap_fd;
if (fd == -1)
{
return;
@@ -580,6 +583,30 @@ void NV_API_CALL nv_cap_close_fd(int fd)
return;
}
file = fget(fd);
if (file == NULL)
{
task_unlock(current);
return;
}
/* Make sure the fd belongs to the nv-cap-drv */
is_nv_cap_fd = (file->f_op == &g_nv_cap_drv_fops);
fput(file);
/*
* In some cases, we may be in shutdown path and execute
* in context of unrelated process. In that case we should
* not access any 'current' state, but instead let kernel
* clean up capability files on its own.
*/
if (!is_nv_cap_fd)
{
task_unlock(current);
return;
}
/*
* From v4.17-rc1 (to v5.10.8) kernels have stopped exporting sys_close(fd)
* and started exporting __close_fd, as of this commit:

View File

@@ -780,10 +780,9 @@ nv_dma_buf_map(
// On non-coherent platforms, importers must be able to handle peer
// MMIO resources not backed by struct page.
//
#if defined(NV_DMA_BUF_HAS_DYNAMIC_ATTACHMENT) && \
defined(NV_DMA_BUF_ATTACHMENT_HAS_PEER2PEER)
#if defined(NV_DMA_BUF_ATTACHMENT_HAS_PEER2PEER)
if (!priv->nv->coherent &&
dma_buf_attachment_is_dynamic(attachment) &&
(attachment->importer_ops != NULL) &&
!attachment->peer2peer)
{
nv_printf(NV_DBG_ERRORS,

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -804,3 +804,75 @@ void NV_API_CALL nv_set_safe_to_mmap_locked(
nvl->safe_to_mmap = safe_to_mmap;
}
#if !NV_CAN_CALL_VMA_START_WRITE
static NvBool nv_vma_enter_locked(struct vm_area_struct *vma, NvBool detaching)
{
NvU32 tgt_refcnt = VMA_LOCK_OFFSET;
NvBool interrupted = NV_FALSE;
if (!detaching)
{
tgt_refcnt++;
}
if (!refcount_add_not_zero(VMA_LOCK_OFFSET, &vma->vm_refcnt))
{
return NV_FALSE;
}
rwsem_acquire(&vma->vmlock_dep_map, 0, 0, _RET_IP_);
prepare_to_rcuwait(&vma->vm_mm->vma_writer_wait);
for (;;)
{
set_current_state(TASK_UNINTERRUPTIBLE);
if (refcount_read(&vma->vm_refcnt) == tgt_refcnt)
break;
if (signal_pending_state(TASK_UNINTERRUPTIBLE, current))
{
interrupted = NV_TRUE;
break;
}
schedule();
}
// This is an open-coded version of finish_rcuwait().
rcu_assign_pointer(vma->vm_mm->vma_writer_wait.task, NULL);
__set_current_state(TASK_RUNNING);
if (interrupted)
{
// Clean up on error: release refcount and dep_map
refcount_sub_and_test(VMA_LOCK_OFFSET, &vma->vm_refcnt);
rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
return NV_FALSE;
}
lock_acquired(&vma->vmlock_dep_map, _RET_IP_);
return NV_TRUE;
}
/*
* Helper function to handle VMA locking and refcount management.
*/
void nv_vma_start_write(struct vm_area_struct *vma)
{
NvU32 mm_lock_seq;
NvBool locked;
if (__is_vma_write_locked(vma, &mm_lock_seq))
return;
locked = nv_vma_enter_locked(vma, NV_FALSE);
WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
if (locked)
{
NvBool detached;
detached = refcount_sub_and_test(VMA_LOCK_OFFSET, &vma->vm_refcnt);
rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
WARN_ON_ONCE(detached);
}
}
EXPORT_SYMBOL(nv_vma_start_write);
#endif // !NV_CAN_CALL_VMA_START_WRITE

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -154,8 +154,13 @@ void NV_API_CALL nv_create_nano_timer(
nv_nstimer->nv_nano_timer_callback = nvidia_nano_timer_callback;
#if NV_NANO_TIMER_USE_HRTIMER
#if NV_IS_EXPORT_SYMBOL_PRESENT_hrtimer_setup
hrtimer_setup(&nv_nstimer->hr_timer, &nv_nano_timer_callback_typed_data,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
#else
hrtimer_init(&nv_nstimer->hr_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
nv_nstimer->hr_timer.function = nv_nano_timer_callback_typed_data;
#endif // NV_IS_EXPORT_SYMBOL_PRESENT_hrtimer_setup
#else
#if defined(NV_TIMER_SETUP_PRESENT)
timer_setup(&nv_nstimer->jiffy_timer, nv_jiffy_timer_callback_typed_data, 0);
@@ -213,7 +218,7 @@ void NV_API_CALL nv_cancel_nano_timer(
#if NV_NANO_TIMER_USE_HRTIMER
hrtimer_cancel(&nv_nstimer->hr_timer);
#else
del_timer_sync(&nv_nstimer->jiffy_timer);
nv_timer_delete_sync(&nv_nstimer->jiffy_timer);
#endif
}

View File

@@ -3753,7 +3753,7 @@ int NV_API_CALL nv_stop_rc_timer(
nv_printf(NV_DBG_INFO, "NVRM: stopping rc timer\n");
nv->rc_timer_enabled = 0;
del_timer_sync(&nvl->rc_timer.kernel_timer);
nv_timer_delete_sync(&nvl->rc_timer.kernel_timer);
nv_printf(NV_DBG_INFO, "NVRM: rc timer stopped\n");
return 0;
@@ -3797,7 +3797,7 @@ void NV_API_CALL nv_stop_snapshot_timer(void)
NV_SPIN_UNLOCK_IRQRESTORE(&nvl->snapshot_timer_lock, flags);
if (timer_active)
del_timer_sync(&nvl->snapshot_timer.kernel_timer);
nv_timer_delete_sync(&nvl->snapshot_timer.kernel_timer);
}
void NV_API_CALL nv_flush_snapshot_timer(void)

View File

@@ -186,7 +186,6 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_kmap
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_kmap_atomic
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_map
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_map_atomic
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_has_dynamic_attachment
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_attachment_has_peer2peer
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_set_mask_and_coherent
NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_clk_bulk_get_all
@@ -231,6 +230,11 @@ NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_follow_pte
NV_CONFTEST_SYMBOL_COMPILE_TESTS += follow_pte_arg_vma
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_follow_pfnmap_start
NV_CONFTEST_SYMBOL_COMPILE_TESTS += ecc_digits_from_bytes
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_timer_delete_sync
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_hrtimer_setup
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl___vma_start_write
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_iommu_dev_enable_feature
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_iommu_dev_disable_feature
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += swiotlb_dma_ops