550.163.01

This commit is contained in:
Bernhard Stoeckner
2025-04-17 17:48:53 +02:00
parent ca09591fbd
commit 23e9e76214
62 changed files with 780 additions and 292 deletions

View File

@@ -86,7 +86,7 @@ EXTRA_CFLAGS += -I$(src)/common/inc
EXTRA_CFLAGS += -I$(src)
EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"550.144.03\"
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"550.163.01\"
ifneq ($(SYSSRCHOST1X),)
EXTRA_CFLAGS += -I$(SYSSRCHOST1X)
@@ -186,6 +186,7 @@ NV_CFLAGS_FROM_CONFTEST := $(shell $(NV_CONFTEST_CMD) build_cflags)
NV_CONFTEST_CFLAGS = $(NV_CFLAGS_FROM_CONFTEST) $(EXTRA_CFLAGS) -fno-pie
NV_CONFTEST_CFLAGS += $(call cc-disable-warning,pointer-sign)
NV_CONFTEST_CFLAGS += $(call cc-option,-fshort-wchar,)
NV_CONFTEST_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types,)
NV_CONFTEST_CFLAGS += -Wno-error
NV_CONFTEST_COMPILE_TEST_HEADERS := $(obj)/conftest/macros.h

View File

@@ -694,6 +694,42 @@ nvPrevPow2_U64(const NvU64 x )
} \
}
//
// Bug 4851259: Newly added functions must be hidden from certain HS-signed
// ucode compilers to avoid signature mismatch.
//
#ifndef NVDEC_1_0
/*!
* Returns the position of nth set bit in the given mask.
*
* Returns -1 if mask has fewer than n bits set.
*
* n is 0 indexed and has valid values 0..31 inclusive, so "zeroth" set bit is
* the first set LSB.
*
* Example, if mask = 0x000000F0u and n = 1, the return value will be 5.
* Example, if mask = 0x000000F0u and n = 4, the return value will be -1.
*/
static NV_FORCEINLINE NvS32
nvGetNthSetBitIndex32(NvU32 mask, NvU32 n)
{
NvU32 seenSetBitsCount = 0;
NvS32 index;
FOR_EACH_INDEX_IN_MASK(32, index, mask)
{
if (seenSetBitsCount == n)
{
return index;
}
++seenSetBitsCount;
}
FOR_EACH_INDEX_IN_MASK_END;
return -1;
}
#endif // NVDEC_1_0
//
// Size to use when declaring variable-sized arrays
//

View File

@@ -5321,6 +5321,45 @@ compile_test() {
compile_check_conftest "$CODE" "NV_FOLLOW_PFN_PRESENT" "" "functions"
;;
follow_pte_arg_vma)
#
# Determine if the first argument of follow_pte is
# mm_struct or vm_area_struct.
#
# The first argument was changed from mm_struct to vm_area_struct by
# commit 29ae7d96d166 ("mm: pass VMA instead of MM to follow_pte()")
#
CODE="
#include <linux/mm.h>
typeof(follow_pte) conftest_follow_pte_has_vma_arg;
int conftest_follow_pte_has_vma_arg(struct vm_area_struct *vma,
unsigned long address,
pte_t **ptep,
spinlock_t **ptl) {
return 0;
}"
compile_check_conftest "$CODE" "NV_FOLLOW_PTE_ARG1_VMA" "" "types"
;;
ptep_get)
#
# Determine if ptep_get() is present.
#
# ptep_get() was added by commit 481e980a7c19
# ("mm: Allow arches to provide ptep_get()")
#
CODE="
#include <linux/mm.h>
void conftest_ptep_get(void) {
ptep_get();
}"
compile_check_conftest "$CODE" "NV_PTEP_GET_PRESENT" "" "functions"
;;
drm_plane_atomic_check_has_atomic_state_arg)
#
# Determine if drm_plane_helper_funcs::atomic_check takes 'state'
@@ -6269,6 +6308,32 @@ compile_test() {
compile_check_conftest "$CODE" "NV_NUM_REGISTERED_FB_PRESENT" "" "types"
;;
acpi_video_register_backlight)
#
# Determine if acpi_video_register_backlight() function is present
#
# acpi_video_register_backlight was added by commit 3dbc80a3e4c55c
# (ACPI: video: Make backlight class device registration a separate
# step (v2)) for v6.0 (2022-09-02).
# Note: the include directive for <linux/types> in this conftest is
# necessary in order to support kernels between commit 0b9f7d93ca61
# ("ACPI / i915: ignore firmware requests backlight change") for
# v3.16 (2014-07-07) and commit 3bd6bce369f5 ("ACPI / video: Port
# to new backlight interface selection API") for v4.2 (2015-07-16).
# Kernels within this range use the 'bool' type and the related
# 'false' value in <acpi/video.h> without first including the
# definitions of that type and value.
#
CODE="
#include <linux/types.h>
#include <acpi/video.h>
void conftest_acpi_video_register_backlight(void) {
acpi_video_register_backlight(0);
}"
compile_check_conftest "$CODE" "NV_ACPI_VIDEO_REGISTER_BACKLIGHT" "" "functions"
;;
acpi_video_backlight_use_native)
#
# Determine if acpi_video_backlight_use_native() function is present
@@ -6652,13 +6717,18 @@ compile_test() {
#
# Determine whether drm_client_setup is present.
#
# Added by commit d07fdf922592 ("drm/fbdev-ttm:
# Convert to client-setup") in v6.13.
# Added by commit d07fdf922592 ("drm/fbdev-ttm: Convert to
# client-setup") in v6.13 in drm/drm_client_setup.h, but then moved
# to drm/clients/drm_client_setup.h by commit b86711c6d6e2
# ("drm/client: Move public client header to clients/ subdirectory")
# in linux-next b86711c6d6e2.
#
CODE="
#include <drm/drm_fb_helper.h>
#if defined(NV_DRM_DRM_CLIENT_SETUP_H_PRESENT)
#include <drm/drm_client_setup.h>
#elif defined(NV_DRM_CLIENTS_DRM_CLIENT_SETUP_H_PRESENT)
#include <drm/clients/drm_client_setup.h>
#endif
void conftest_drm_client_setup(void) {
drm_client_setup();
@@ -7038,6 +7108,58 @@ compile_test() {
compile_check_conftest "$CODE" "NV_FOLIO_TEST_SWAPCACHE_PRESENT" "" "functions"
;;
drm_driver_has_date)
#
# Determine if the 'drm_driver' structure has a 'date' field.
#
# Removed by commit cb2e1c2136f7 ("drm: remove driver date from
# struct drm_driver and all drivers") in linux-next, expected in
# v6.14.
#
CODE="
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
int conftest_drm_driver_has_date(void) {
return offsetof(struct drm_driver, date);
}"
compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_DATE" "" "types"
;;
drm_connector_helper_funcs_mode_valid_has_const_mode_arg)
#
# Determine if the 'mode' pointer argument is const in
# drm_connector_helper_funcs::mode_valid.
#
# The 'mode' pointer argument in
# drm_connector_helper_funcs::mode_valid was made const by commit
# 26d6fd81916e ("drm/connector: make mode_valid take a const struct
# drm_display_mode") in linux-next, expected in v6.15.
#
CODE="
#if defined(NV_DRM_DRM_ATOMIC_HELPER_H_PRESENT)
#include <drm/drm_atomic_helper.h>
#endif
static int conftest_drm_connector_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode) {
return 0;
}
const struct drm_connector_helper_funcs conftest_drm_connector_helper_funcs = {
.mode_valid = conftest_drm_connector_mode_valid,
};"
compile_check_conftest "$CODE" "NV_DRM_CONNECTOR_HELPER_FUNCS_MODE_VALID_HAS_CONST_MODE_ARG" "" "types"
;;
# When adding a new conftest entry, please use the correct format for
# specifying the relevant upstream Linux kernel commit. Please
# avoid specifying -rc kernels, and only use SHAs that actually exist

View File

@@ -65,9 +65,13 @@
#if defined(NV_DRM_CLIENT_SETUP_PRESENT) && \
(defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT) || \
defined(NV_APERTURE_REMOVE_CONFLICTING_PCI_DEVICES_PRESENT))
// XXX remove dependency on DRM_TTM_HELPER by implementing nvidia-drm's own
// .fbdev_probe callback that uses NVKMS kapi
#if IS_ENABLED(CONFIG_DRM_TTM_HELPER)
#define NV_DRM_FBDEV_AVAILABLE
#define NV_DRM_CLIENT_AVAILABLE
#endif
#endif
/*
* We can support color management if either drm_helper_crtc_enable_color_mgmt()

View File

@@ -314,7 +314,11 @@ static int nv_drm_connector_get_modes(struct drm_connector *connector)
}
static int nv_drm_connector_mode_valid(struct drm_connector *connector,
#if defined(NV_DRM_CONNECTOR_HELPER_FUNCS_MODE_VALID_HAS_CONST_MODE_ARG)
const struct drm_display_mode *mode)
#else
struct drm_display_mode *mode)
#endif
{
struct drm_device *dev = connector->dev;
struct nv_drm_device *nv_dev = to_nv_device(dev);

View File

@@ -1730,14 +1730,18 @@ static struct drm_driver nv_drm_driver = {
.name = "nvidia-drm",
.desc = "NVIDIA DRM driver",
#if defined(NV_DRM_DRIVER_HAS_DATE)
.date = "20160202",
#endif
#if defined(NV_DRM_DRIVER_HAS_DEVICE_LIST)
.device_list = LIST_HEAD_INIT(nv_drm_driver.device_list),
#elif defined(NV_DRM_DRIVER_HAS_LEGACY_DEV_LIST)
.legacy_dev_list = LIST_HEAD_INIT(nv_drm_driver.legacy_dev_list),
#endif
#if defined(DRM_FBDEV_TTM_DRIVER_OPS)
// XXX implement nvidia-drm's own .fbdev_probe callback that uses NVKMS kapi directly
#if defined(NV_DRM_FBDEV_AVAILABLE) && defined(DRM_FBDEV_TTM_DRIVER_OPS)
DRM_FBDEV_TTM_DRIVER_OPS,
#endif
};

View File

@@ -134,4 +134,6 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffe
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_create_dp_colorspace_property_has_supported_colorspaces_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_unlocked_ioctl_flag_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_output_poll_changed
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_date
NV_CONFTEST_TYPE_COMPILE_TESTS += file_operations_fop_unsigned_offset_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_helper_funcs_mode_valid_has_const_mode_arg

View File

@@ -996,6 +996,11 @@ nvkms_register_backlight(NvU32 gpu_id, NvU32 display_id, void *drv_priv,
#if defined(NV_ACPI_VIDEO_BACKLIGHT_USE_NATIVE)
if (!acpi_video_backlight_use_native()) {
#if defined(NV_ACPI_VIDEO_REGISTER_BACKLIGHT)
nvkms_log(NVKMS_LOG_LEVEL_INFO, NVKMS_LOG_PREFIX,
"ACPI reported no NVIDIA native backlight available; attempting to use ACPI backlight.");
acpi_video_register_backlight();
#endif
return NULL;
}
#endif

View File

@@ -102,3 +102,4 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_real_ts64
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64
NV_CONFTEST_FUNCTION_COMPILE_TESTS += acpi_video_backlight_use_native
NV_CONFTEST_FUNCTION_COMPILE_TESTS += acpi_video_register_backlight

View File

@@ -730,7 +730,12 @@ void uvm_conf_computing_disable_key_rotation(uvm_gpu_t *gpu)
bool uvm_conf_computing_is_key_rotation_enabled(uvm_gpu_t *gpu)
{
return gpu->channel_manager->conf_computing.key_rotation_enabled;
UVM_ASSERT(gpu);
// If the channel_manager is not set, we're in channel manager destroy
// path after the pointer was NULL-ed. Chances are that other key rotation
// infrastructure is not available either. Disallow the key rotation.
return gpu->channel_manager && gpu->channel_manager->conf_computing.key_rotation_enabled;
}
bool uvm_conf_computing_is_key_rotation_enabled_in_pool(uvm_channel_pool_t *pool)

View File

@@ -226,7 +226,7 @@ static inline const struct cpumask *uvm_cpumask_of_node(int node)
#define __GFP_NORETRY 0
#endif
#define NV_UVM_GFP_FLAGS (GFP_KERNEL)
#define NV_UVM_GFP_FLAGS (GFP_KERNEL | __GFP_NOMEMALLOC)
#ifndef NV_ALIGN_DOWN
#define NV_ALIGN_DOWN(v,g) ((v) & ~((g) - 1))

View File

@@ -130,27 +130,12 @@ static NV_STATUS block_migrate_map_unmapped_pages(uvm_va_block_t *va_block,
uvm_tracker_t local_tracker = UVM_TRACKER_INIT();
NV_STATUS status = NV_OK;
NV_STATUS tracker_status;
uvm_prot_t prot = UVM_PROT_READ_WRITE_ATOMIC;
// Get the mask of unmapped pages because it will change after the
// first map operation
uvm_va_block_unmapped_pages_get(va_block, region, &va_block_context->caller_page_mask);
if (uvm_va_block_is_hmm(va_block) && !UVM_ID_IS_CPU(dest_id)) {
// Do not map pages that are already resident on the CPU. This is in
// order to avoid breaking system-wide atomic operations on HMM. HMM's
// implementation of system-side atomic operations involves restricting
// mappings to one processor (CPU or a GPU) at a time. If we were to
// grant a GPU a mapping to system memory, this gets into trouble
// because, on the CPU side, Linux can silently upgrade PTE permissions
// (move from read-only, to read-write, without any MMU notifiers
// firing), thus breaking the model by allowing simultaneous read-write
// access from two separate processors. To avoid that, just don't map
// such pages at all, when migrating.
uvm_page_mask_andnot(&va_block_context->caller_page_mask,
&va_block_context->caller_page_mask,
uvm_va_block_resident_mask_get(va_block, UVM_ID_CPU, NUMA_NO_NODE));
}
// Only map those pages that are not mapped anywhere else (likely due
// to a first touch or a migration). We pass
// UvmEventMapRemoteCauseInvalid since the destination processor of a
@@ -166,6 +151,31 @@ static NV_STATUS block_migrate_map_unmapped_pages(uvm_va_block_t *va_block,
if (status != NV_OK)
goto out;
if (uvm_va_block_is_hmm(va_block) && UVM_ID_IS_CPU(dest_id)) {
uvm_processor_id_t id;
// Do not atomically map pages that are resident on the CPU. This is in
// order to avoid breaking system-wide atomic operations on HMM. HMM's
// implementation of system-side atomic operations involves restricting
// mappings to one processor (CPU or a GPU) at a time. If we were to
// grant a GPU a mapping to system memory, this gets into trouble
// because, on the CPU side, Linux can silently upgrade PTE permissions
// (move from read-only, to read-write, without any MMU notifiers
// firing), thus breaking the model by allowing simultaneous read-write
// access from two separate processors. To avoid that, don't remote map
// such pages atomically, after migrating.
// Also note that HMM sets CPU mapping for resident pages so the mask
// of pages to be mapped needs to be recomputed without including the
// CPU mapping.
prot = UVM_PROT_READ_WRITE;
uvm_page_mask_region_fill(&va_block_context->caller_page_mask, region);
for_each_gpu_id_in_mask(id, &va_block->mapped) {
uvm_page_mask_andnot(&va_block_context->caller_page_mask,
&va_block_context->caller_page_mask,
uvm_va_block_map_mask_get(va_block, id));
}
}
// Add mappings for AccessedBy processors
//
// No mappings within this call will operate on dest_id, so we don't
@@ -176,7 +186,7 @@ static NV_STATUS block_migrate_map_unmapped_pages(uvm_va_block_t *va_block,
dest_id,
region,
&va_block_context->caller_page_mask,
UVM_PROT_READ_WRITE_ATOMIC,
prot,
NULL);
out:

View File

@@ -1768,7 +1768,7 @@ static NV_STATUS block_alloc_cpu_chunk(uvm_va_block_t *block,
if (status == NV_OK)
break;
if (flags & UVM_CPU_CHUNK_ALLOC_FLAGS_STRICT) {
if ((flags & UVM_CPU_CHUNK_ALLOC_FLAGS_STRICT) && (num_possible_nodes() > 1)) {
flags &= ~UVM_CPU_CHUNK_ALLOC_FLAGS_STRICT;
numa_fallback = true;
status = block_alloc_cpu_chunk_inject_error(block, alloc_size, flags, NUMA_NO_NODE, chunk);

View File

@@ -143,6 +143,11 @@ nvidia_vma_access(
return -EINVAL;
}
if (write && !(mmap_context->prot & NV_PROTECT_WRITEABLE))
{
return -EACCES;
}
offset = mmap_context->mmap_start;
if (nv->flags & NV_FLAG_CONTROL)

View File

@@ -1628,8 +1628,6 @@ static void nv_init_mapping_revocation(nv_linux_state_t *nvl,
nv_linux_file_private_t *nvlfp,
struct inode *inode)
{
down(&nvl->mmap_lock);
/* Set up struct address_space for use with unmap_mapping_range() */
address_space_init_once(&nvlfp->mapping);
nvlfp->mapping.host = inode;
@@ -1638,10 +1636,20 @@ static void nv_init_mapping_revocation(nv_linux_state_t *nvl,
nvlfp->mapping.backing_dev_info = inode->i_mapping->backing_dev_info;
#endif
file->f_mapping = &nvlfp->mapping;
}
/* Add nvlfp to list of open files in nvl for mapping revocation */
/* Adds nvlfp to list of open files for mapping revocation */
static void nv_add_open_file(nv_linux_state_t *nvl,
nv_linux_file_private_t *nvlfp)
{
nvlfp->nvptr = nvl;
/*
* nvl->open_files and other mapping revocation members in nv_linux_state_t
* are protected by nvl->mmap_lock instead of nvl->ldata_lock.
*/
down(&nvl->mmap_lock);
list_add(&nvlfp->entry, &nvl->open_files);
up(&nvl->mmap_lock);
}
@@ -1691,11 +1699,12 @@ static void nvidia_open_deferred(void *nvlfp_raw)
*/
down(&nvl->ldata_lock);
rc = nv_open_device_for_nvlfp(NV_STATE_PTR(nvl), nvlfp->sp, nvlfp);
up(&nvl->ldata_lock);
/* Set nvptr only upon success (where nvl->usage_count is incremented) */
/* Only add open file tracking where nvl->usage_count is incremented */
if (rc == 0)
nvlfp->nvptr = nvl;
nv_add_open_file(nvl, nvlfp);
up(&nvl->ldata_lock);
complete_all(&nvlfp->open_complete);
}
@@ -1814,6 +1823,7 @@ nvidia_open(
}
nv = NV_STATE_PTR(nvl);
nv_init_mapping_revocation(nvl, file, nvlfp, inode);
if (nv_try_lock_foreground_open(file, nvl) == 0)
{
@@ -1824,11 +1834,11 @@ nvidia_open(
rc = nv_open_device_for_nvlfp(nv, nvlfp->sp, nvlfp);
up(&nvl->ldata_lock);
/* Set nvptr only upon success (where nvl->usage_count is incremented) */
/* Only add open file tracking where nvl->usage_count is incremented */
if (rc == 0)
nvlfp->nvptr = nvl;
nv_add_open_file(nvl, nvlfp);
up(&nvl->ldata_lock);
complete_all(&nvlfp->open_complete);
}
@@ -1883,10 +1893,6 @@ failed:
NV_SET_FILE_PRIVATE(file, NULL);
}
}
else
{
nv_init_mapping_revocation(nvl, file, nvlfp, inode);
}
return rc;
}

View File

@@ -161,6 +161,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += cc_attr_guest_sev_snp
NV_CONFTEST_FUNCTION_COMPILE_TESTS += hv_get_isolation_type
NV_CONFTEST_FUNCTION_COMPILE_TESTS += seq_read_iter
NV_CONFTEST_FUNCTION_COMPILE_TESTS += follow_pfn
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ptep_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += add_memory_driver_managed
@@ -230,6 +231,8 @@ NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_memory_block_size_b
NV_CONFTEST_SYMBOL_COMPILE_TESTS += crypto
NV_CONFTEST_SYMBOL_COMPILE_TESTS += crypto_akcipher_verify
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_follow_pte
NV_CONFTEST_SYMBOL_COMPILE_TESTS += follow_pte_arg_vma
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_follow_pfnmap_start
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_pci_ats_supported
NV_CONFTEST_SYMBOL_COMPILE_TESTS += ecc_digits_from_bytes

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -32,14 +32,27 @@
#define NV_NUM_PIN_PAGES_PER_ITERATION 0x80000
#endif
static inline int nv_follow_pfn(struct vm_area_struct *vma,
unsigned long address,
unsigned long *pfn)
static inline int nv_follow_flavors(struct vm_area_struct *vma,
unsigned long address,
unsigned long *pfn)
{
#if defined(NV_FOLLOW_PFN_PRESENT)
return follow_pfn(vma, address, pfn);
#else
#if NV_IS_EXPORT_SYMBOL_PRESENT_follow_pte
#if NV_IS_EXPORT_SYMBOL_PRESENT_follow_pfnmap_start
struct follow_pfnmap_args args = {};
int rc;
args.address = address;
args.vma = vma;
rc = follow_pfnmap_start(&args);
if (rc)
return rc;
*pfn = args.pfn;
follow_pfnmap_end(&args);
return 0;
#elif NV_IS_EXPORT_SYMBOL_PRESENT_follow_pte
int status = 0;
spinlock_t *ptl;
pte_t *ptep;
@@ -47,17 +60,40 @@ static inline int nv_follow_pfn(struct vm_area_struct *vma,
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
return status;
//
// The first argument of follow_pte() was changed from
// mm_struct to vm_area_struct in kernel 6.10.
//
#if defined(NV_FOLLOW_PTE_ARG1_VMA)
status = follow_pte(vma, address, &ptep, &ptl);
#else
status = follow_pte(vma->vm_mm, address, &ptep, &ptl);
#endif
if (status)
return status;
#if defined(NV_PTEP_GET_PRESENT)
*pfn = pte_pfn(ptep_get(ptep));
#else
*pfn = pte_pfn(READ_ONCE(*ptep));
#endif
// The lock is acquired inside follow_pte()
pte_unmap_unlock(ptep, ptl);
return 0;
#else // NV_IS_EXPORT_SYMBOL_PRESENT_follow_pte
#else
return -1;
#endif // NV_IS_EXPORT_SYMBOL_PRESENT_follow_pte
#endif // NV_IS_EXPORT_SYMBOL_PRESENT_follow_pfnmap_start
}
static inline int nv_follow_pfn(struct vm_area_struct *vma,
unsigned long address,
unsigned long *pfn)
{
#if defined(NV_FOLLOW_PFN_PRESENT)
return follow_pfn(vma, address, pfn);
#else
return nv_follow_flavors(vma, address, pfn);
#endif
}