550.163.01

This commit is contained in:
Bernhard Stoeckner
2025-04-17 17:48:53 +02:00
parent ca09591fbd
commit 23e9e76214
62 changed files with 780 additions and 292 deletions

View File

@@ -143,6 +143,11 @@ nvidia_vma_access(
return -EINVAL;
}
if (write && !(mmap_context->prot & NV_PROTECT_WRITEABLE))
{
return -EACCES;
}
offset = mmap_context->mmap_start;
if (nv->flags & NV_FLAG_CONTROL)

View File

@@ -1628,8 +1628,6 @@ static void nv_init_mapping_revocation(nv_linux_state_t *nvl,
nv_linux_file_private_t *nvlfp,
struct inode *inode)
{
down(&nvl->mmap_lock);
/* Set up struct address_space for use with unmap_mapping_range() */
address_space_init_once(&nvlfp->mapping);
nvlfp->mapping.host = inode;
@@ -1638,10 +1636,20 @@ static void nv_init_mapping_revocation(nv_linux_state_t *nvl,
nvlfp->mapping.backing_dev_info = inode->i_mapping->backing_dev_info;
#endif
file->f_mapping = &nvlfp->mapping;
}
/* Add nvlfp to list of open files in nvl for mapping revocation */
/* Adds nvlfp to list of open files for mapping revocation */
static void nv_add_open_file(nv_linux_state_t *nvl,
nv_linux_file_private_t *nvlfp)
{
nvlfp->nvptr = nvl;
/*
* nvl->open_files and other mapping revocation members in nv_linux_state_t
* are protected by nvl->mmap_lock instead of nvl->ldata_lock.
*/
down(&nvl->mmap_lock);
list_add(&nvlfp->entry, &nvl->open_files);
up(&nvl->mmap_lock);
}
@@ -1691,11 +1699,12 @@ static void nvidia_open_deferred(void *nvlfp_raw)
*/
down(&nvl->ldata_lock);
rc = nv_open_device_for_nvlfp(NV_STATE_PTR(nvl), nvlfp->sp, nvlfp);
up(&nvl->ldata_lock);
/* Set nvptr only upon success (where nvl->usage_count is incremented) */
/* Only add open file tracking where nvl->usage_count is incremented */
if (rc == 0)
nvlfp->nvptr = nvl;
nv_add_open_file(nvl, nvlfp);
up(&nvl->ldata_lock);
complete_all(&nvlfp->open_complete);
}
@@ -1814,6 +1823,7 @@ nvidia_open(
}
nv = NV_STATE_PTR(nvl);
nv_init_mapping_revocation(nvl, file, nvlfp, inode);
if (nv_try_lock_foreground_open(file, nvl) == 0)
{
@@ -1824,11 +1834,11 @@ nvidia_open(
rc = nv_open_device_for_nvlfp(nv, nvlfp->sp, nvlfp);
up(&nvl->ldata_lock);
/* Set nvptr only upon success (where nvl->usage_count is incremented) */
/* Only add open file tracking where nvl->usage_count is incremented */
if (rc == 0)
nvlfp->nvptr = nvl;
nv_add_open_file(nvl, nvlfp);
up(&nvl->ldata_lock);
complete_all(&nvlfp->open_complete);
}
@@ -1883,10 +1893,6 @@ failed:
NV_SET_FILE_PRIVATE(file, NULL);
}
}
else
{
nv_init_mapping_revocation(nvl, file, nvlfp, inode);
}
return rc;
}

View File

@@ -161,6 +161,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += cc_attr_guest_sev_snp
NV_CONFTEST_FUNCTION_COMPILE_TESTS += hv_get_isolation_type
NV_CONFTEST_FUNCTION_COMPILE_TESTS += seq_read_iter
NV_CONFTEST_FUNCTION_COMPILE_TESTS += follow_pfn
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ptep_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += add_memory_driver_managed
@@ -230,6 +231,8 @@ NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_memory_block_size_b
NV_CONFTEST_SYMBOL_COMPILE_TESTS += crypto
NV_CONFTEST_SYMBOL_COMPILE_TESTS += crypto_akcipher_verify
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_follow_pte
NV_CONFTEST_SYMBOL_COMPILE_TESTS += follow_pte_arg_vma
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_follow_pfnmap_start
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_pci_ats_supported
NV_CONFTEST_SYMBOL_COMPILE_TESTS += ecc_digits_from_bytes

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -32,14 +32,27 @@
#define NV_NUM_PIN_PAGES_PER_ITERATION 0x80000
#endif
static inline int nv_follow_pfn(struct vm_area_struct *vma,
unsigned long address,
unsigned long *pfn)
static inline int nv_follow_flavors(struct vm_area_struct *vma,
unsigned long address,
unsigned long *pfn)
{
#if defined(NV_FOLLOW_PFN_PRESENT)
return follow_pfn(vma, address, pfn);
#else
#if NV_IS_EXPORT_SYMBOL_PRESENT_follow_pte
#if NV_IS_EXPORT_SYMBOL_PRESENT_follow_pfnmap_start
struct follow_pfnmap_args args = {};
int rc;
args.address = address;
args.vma = vma;
rc = follow_pfnmap_start(&args);
if (rc)
return rc;
*pfn = args.pfn;
follow_pfnmap_end(&args);
return 0;
#elif NV_IS_EXPORT_SYMBOL_PRESENT_follow_pte
int status = 0;
spinlock_t *ptl;
pte_t *ptep;
@@ -47,17 +60,40 @@ static inline int nv_follow_pfn(struct vm_area_struct *vma,
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
return status;
//
// The first argument of follow_pte() was changed from
// mm_struct to vm_area_struct in kernel 6.10.
//
#if defined(NV_FOLLOW_PTE_ARG1_VMA)
status = follow_pte(vma, address, &ptep, &ptl);
#else
status = follow_pte(vma->vm_mm, address, &ptep, &ptl);
#endif
if (status)
return status;
#if defined(NV_PTEP_GET_PRESENT)
*pfn = pte_pfn(ptep_get(ptep));
#else
*pfn = pte_pfn(READ_ONCE(*ptep));
#endif
// The lock is acquired inside follow_pte()
pte_unmap_unlock(ptep, ptl);
return 0;
#else // NV_IS_EXPORT_SYMBOL_PRESENT_follow_pte
#else
return -1;
#endif // NV_IS_EXPORT_SYMBOL_PRESENT_follow_pte
#endif // NV_IS_EXPORT_SYMBOL_PRESENT_follow_pfnmap_start
}
static inline int nv_follow_pfn(struct vm_area_struct *vma,
unsigned long address,
unsigned long *pfn)
{
#if defined(NV_FOLLOW_PFN_PRESENT)
return follow_pfn(vma, address, pfn);
#else
return nv_follow_flavors(vma, address, pfn);
#endif
}