535.309.01

This commit is contained in:
Bernhard Stoeckner
2026-04-28 20:23:00 +02:00
parent ef65a13097
commit 9756a4df56
42 changed files with 551 additions and 250 deletions

View File

@@ -1,7 +1,7 @@
# NVIDIA Linux Open GPU Kernel Module Source
This is the source release of the NVIDIA Linux open GPU kernel modules,
version 535.288.01.
version 535.309.01.
## How to Build
@@ -17,7 +17,7 @@ as root:
Note that the kernel modules built here must be used with GSP
firmware and user-space NVIDIA GPU driver components from a corresponding
535.288.01 driver release. This can be achieved by installing
535.309.01 driver release. This can be achieved by installing
the NVIDIA GPU driver from the .run file using the `--no-kernel-modules`
option. E.g.,
@@ -180,7 +180,7 @@ software applications.
## Compatible GPUs
The open-gpu-kernel-modules can be used on any Turing or later GPU
(see the table below). However, in the 535.288.01 release,
(see the table below). However, in the 535.309.01 release,
GeForce and Workstation support is still considered alpha-quality.
To enable use of the open kernel modules on GeForce and Workstation GPUs,
@@ -188,7 +188,7 @@ set the "NVreg_OpenRmEnableUnsupportedGpus" nvidia.ko kernel module
parameter to 1. For more details, see the NVIDIA GPU driver end user
README here:
https://us.download.nvidia.com/XFree86/Linux-x86_64/535.288.01/README/kernel_open.html
https://us.download.nvidia.com/XFree86/Linux-x86_64/535.309.01/README/kernel_open.html
In the below table, if three IDs are listed, the first is the PCI Device
ID, the second is the PCI Subsystem Vendor ID, and the third is the PCI

View File

@@ -79,7 +79,7 @@ ccflags-y += -I$(src)/common/inc
ccflags-y += -I$(src)
ccflags-y += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args
ccflags-y += -D__KERNEL__ -DMODULE -DNVRM
ccflags-y += -DNV_VERSION_STRING=\"535.288.01\"
ccflags-y += -DNV_VERSION_STRING=\"535.309.01\"
ifneq ($(SYSSRCHOST1X),)
ccflags-y += -I$(SYSSRCHOST1X)
@@ -163,6 +163,7 @@ NV_CONFTEST_CFLAGS += $(filter -std=%,$(KBUILD_CFLAGS))
NV_CONFTEST_CFLAGS += $(call cc-disable-warning,pointer-sign)
NV_CONFTEST_CFLAGS += $(call cc-option,-fshort-wchar,)
NV_CONFTEST_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types,)
NV_CONFTEST_CFLAGS += $(call cc-option,-fms-extensions,)
NV_CONFTEST_COMPILE_TEST_HEADERS := $(obj)/conftest/macros.h
NV_CONFTEST_COMPILE_TEST_HEADERS += $(obj)/conftest/functions.h
@@ -228,6 +229,7 @@ $(obj)/conftest/patches.h: $(NV_CONFTEST_SCRIPT)
NV_HEADER_PRESENCE_TESTS = \
asm/system.h \
drm/drmP.h \
drm/drm_print.h \
drm/drm_auth.h \
drm/drm_gem.h \
drm/drm_crtc.h \

View File

@@ -1730,6 +1730,27 @@ static inline void nv_nvlfp_put_sp(nv_linux_file_private_t *nvlfp, nvidia_entry_
#define NV_ATOMIC_DEC(data) atomic_dec(&(data))
#define NV_ATOMIC_DEC_AND_TEST(data) atomic_dec_and_test(&(data))
#if defined(smp_store_release)
#define nv_smp_store_release(p, v) smp_store_release(p, v);
#else
#define nv_smp_store_release(p, v) \
do { \
smp_mb(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
#endif
#if defined(smp_load_acquire)
#define nv_smp_load_acquire(p) smp_load_acquire(p)
#else
#define nv_smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
smp_mb(); \
___p1; \
})
#endif
static inline struct kmem_cache *nv_kmem_cache_create(const char *name, unsigned int size,
unsigned int align)
{

View File

@@ -299,14 +299,43 @@ static inline struct rw_semaphore *nv_mmap_get_lock(struct mm_struct *mm)
* Commit 45ad9f5290dc updated vma_start_write() to call __vma_start_write().
*/
void nv_vma_start_write(struct vm_area_struct *);
static inline void nv_vma_flags_set_word(struct vm_area_struct *vma, unsigned long flags)
{
nv_vma_start_write(vma);
#if defined(NV_VMA_FLAGS_SET_WORD_PRESENT)
vma_flags_set_word(&vma->flags, flags);
#else
ACCESS_PRIVATE(vma, __vm_flags) |= flags;
#endif
}
static inline void nv_vma_flags_clear_word(struct vm_area_struct *vma, unsigned long flags)
{
nv_vma_start_write(vma);
#if defined(NV_VMA_FLAGS_SET_WORD_PRESENT)
vma_flags_clear_word(&vma->flags, flags);
#else
ACCESS_PRIVATE(vma, __vm_flags) &= ~flags;
#endif
}
static inline int nv_is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq)
{
#if defined(NV_IS_VMA_WRITE_LOCKED_HAS_MM_LOCK_SEQ_ARG)
return __is_vma_write_locked(vma, mm_lock_seq);
#else
*mm_lock_seq = __vma_raw_mm_seqnum(vma);
return __is_vma_write_locked(vma);
#endif
}
#endif // !NV_CAN_CALL_VMA_START_WRITE
static inline void nv_vm_flags_set(struct vm_area_struct *vma, vm_flags_t flags)
{
#if !NV_CAN_CALL_VMA_START_WRITE
nv_vma_start_write(vma);
ACCESS_PRIVATE(vma, __vm_flags) |= flags;
#elif defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS)
nv_vma_flags_set_word(vma, flags);
#elif defined(NV_VM_FLAGS_SET_PRESENT)
vm_flags_set(vma, flags);
#else
vma->vm_flags |= flags;
@@ -316,9 +345,8 @@ static inline void nv_vm_flags_set(struct vm_area_struct *vma, vm_flags_t flags)
static inline void nv_vm_flags_clear(struct vm_area_struct *vma, vm_flags_t flags)
{
#if !NV_CAN_CALL_VMA_START_WRITE
nv_vma_start_write(vma);
ACCESS_PRIVATE(vma, __vm_flags) &= ~flags;
#elif defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS)
nv_vma_flags_clear_word(vma, flags);
#elif defined(NV_VM_FLAGS_SET_PRESENT)
vm_flags_clear(vma, flags);
#else
vma->vm_flags &= ~flags;

View File

@@ -36,6 +36,19 @@
#define NV_MAX_ISR_DELAY_MS (NV_MAX_ISR_DELAY_US / 1000)
#define NV_NSECS_TO_JIFFIES(nsec) ((nsec) * HZ / 1000000000)
/*
* in_hardirq() was added in v5.11-rc1 (2020-12-15) to replace in_irq().
* Fall back to in_irq() for older kernels that don't have in_hardirq().
*/
static inline NvBool nv_in_hardirq(void)
{
#if defined(in_hardirq)
return in_hardirq();
#else
return in_irq();
#endif
}
#if !defined(NV_TIMESPEC64_PRESENT)
struct timespec64 {
__s64 tv_sec;
@@ -142,7 +155,7 @@ static inline NV_STATUS nv_sleep_us(unsigned int us)
ktime_get_raw_ts64(&tm1);
#endif
if (in_irq() && (us > NV_MAX_ISR_DELAY_US))
if (nv_in_hardirq() && (us > NV_MAX_ISR_DELAY_US))
return NV_ERR_GENERIC;
mdelay_safe_msec = us / 1000;
@@ -187,7 +200,7 @@ static inline NV_STATUS nv_sleep_ms(unsigned int ms)
tm_start = tm_aux;
#endif
if (in_irq() && (ms > NV_MAX_ISR_DELAY_MS))
if (nv_in_hardirq() && (ms > NV_MAX_ISR_DELAY_MS))
{
return NV_ERR_GENERIC;
}

View File

@@ -305,7 +305,7 @@ typedef struct nv_alloc_mapping_context_s {
NvU64 access_size;
NvU64 remap_prot_extra;
NvU32 prot;
NvBool valid;
NvU32 valid;
NvU32 caching;
} nv_alloc_mapping_context_t;

View File

@@ -447,6 +447,7 @@ compile_test() {
#endif
#if defined(NV_ASM_PAGE_H_PRESENT)
#include <asm/page.h>
#include <linux/percpu.h>
#endif
#include <asm/set_memory.h>
#else
@@ -472,6 +473,7 @@ compile_test() {
#endif
#if defined(NV_ASM_PAGE_H_PRESENT)
#include <asm/page.h>
#include <linux/percpu.h>
#endif
#include <asm/set_memory.h>
#else
@@ -532,6 +534,7 @@ compile_test() {
#endif
#if defined(NV_ASM_PAGE_H_PRESENT)
#include <asm/page.h>
#include <linux/percpu.h>
#endif
#include <asm/set_memory.h>
#else
@@ -562,6 +565,7 @@ compile_test() {
#endif
#if defined(NV_ASM_PAGE_H_PRESENT)
#include <asm/page.h>
#include <linux/percpu.h>
#endif
#include <asm/set_memory.h>
#else
@@ -1225,6 +1229,63 @@ compile_test() {
compile_check_conftest "$CODE" "NV_VFIO_DEVICE_OPS_HAS_DETACH_IOAS" "" "types"
;;
vfio_device_ops_has_get_region_info_caps)
#
# Determine if 'struct vfio_device_ops' has 'get_region_info_caps'
# callback.
#
# Added by commit 775f726a742a ("vfio: Add get_region_info_caps op")
# in v6.19
#
CODE="
#include <linux/pci.h>
#include <linux/vfio.h>
int conftest_vfio_device_ops_has_get_region_info_caps(void) {
return offsetof(struct vfio_device_ops, get_region_info_caps);
}"
compile_check_conftest "$CODE" "NV_VFIO_DEVICE_OPS_HAS_GET_REGION_INFO_CAPS" "" "types"
;;
irq_bypass_producer_has_token)
#
# Determine if 'struct irq_bypass_producer' has 'token' field
#
# Added by commit 2b521d86ee80 ("irqbypass: Take ownership of
# producer/consumer token tracking") in v6.17
#
CODE="
#include <linux/irqbypass.h>
int conftest_irq_bypass_producer_has_token(void) {
return offsetof(struct irq_bypass_producer, token);
}"
compile_check_conftest "$CODE" "NV_IRQ_BYPASS_PRODUCER_HAS_TOKEN" "" "types"
;;
irq_bypass_register_producer_has_eventfd_and_irq_args)
#
# Determine if irq_bypass_register_producer() function has
# additional 'eventfd' and 'irq' arguments.
#
# Added by commits 2b521d86ee80 ("irqbypass: Take ownership of
# producer/consumer token tracking") and 23b54381cee2
# ("irqbypass: Require producers to pass in Linux IRQ number
# during registration") in v6.17
#
CODE="
#include <linux/irqbypass.h>
#include <linux/eventfd.h>
void conftest_irq_bypass_register_producer_has_eventfd_and_irq_args(void) {
struct irq_bypass_producer *prod = NULL;
struct eventfd_ctx *eventfd = NULL;
int irq = 0;
irq_bypass_register_producer(prod, eventfd, irq);
}"
compile_check_conftest "$CODE" "NV_IRQ_BYPASS_REGISTER_PRODUCER_HAS_EVENTFD_AND_IRQ_ARGS" "" "types"
;;
pci_irq_vector_helpers)
#
# Determine if pci_alloc_irq_vectors(), pci_free_irq_vectors()
@@ -3179,24 +3240,6 @@ compile_test() {
fi
;;
enable_apicv)
#
# Determine if enable_apicv boolean is exported by kernel.
#
# Added by commit fdf513e37a3bd ("KVM: x86: Use common 'enable_apicv'
# variable for both APICv and AVIC")
#
CODE="
$CONFTEST_PREAMBLE
#include <asm/kvm_host.h>
bool is_enable_apicv_present() {
return enable_apicv;
}"
compile_check_conftest "$CODE" "NV_ENABLE_APICV_PRESENT" "" "types"
;;
pci_driver_has_driver_managed_dma)
#
# Determine if "struct pci_driver" has .driver_managed_dma member.
@@ -3676,6 +3719,35 @@ compile_test() {
compile_check_conftest "$CODE" "NV_GET_BACKLIGHT_DEVICE_BY_NAME_PRESENT" "" "functions"
;;
dma_map_ops_has_map_phys)
#
# Determine if .map_phys exists in struct dma_map_ops.
#
# Commit 14cb413af00c ("dma-mapping: remove unused mapping resource callbacks")
# removed .map_resource operation and replaced it with .map_phys.
#
echo "$CONFTEST_PREAMBLE
#include <linux/dma-map-ops.h>
int conftest_dma_map_ops_has_map_phys(void) {
return offsetof(struct dma_map_ops, map_phys);
}
int conftest_dma_map_ops_has_unmap_phys(void) {
return offsetof(struct dma_map_ops, unmap_phys);
}" > conftest$$.c
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
rm -f conftest$$.c
if [ -f conftest$$.o ]; then
echo "#define NV_DMA_MAP_OPS_HAS_MAP_PHYS" | append_conftest "types"
rm -f conftest$$.o
return
else
echo "#undef NV_DMA_MAP_OPS_HAS_MAP_PHYS" | append_conftest "types"
return
fi
;;
timer_setup)
#
# Determine if the function timer_setup() is present.
@@ -6299,6 +6371,53 @@ compile_test() {
compile_check_conftest "$CODE" "NV_HANDLE_MM_FAULT_HAS_PT_REGS_ARG" "" "types"
;;
zone_device_page_init_has_pgmap_and_order_args)
#
# Determine if the zone_device_page_init() has two additional
# arguments
#
# This change was introduced by d245f9b4ab80
# ("mm/zone_device: support large zone device private folios")
#
# It was further amended in 9387a71ec62c
# (mm/zone_device: reinitialize large zone device private folios)
#
# both commits are in linux-next, expected in v6.19.
#
CODE="
#include <linux/memremap.h>
void init_page(void) {
struct page *page;
struct dev_pagemap *pgmap;
zone_device_page_init(page, pgmap, 0);
}"
compile_check_conftest "$CODE" "NV_ZONE_DEVICE_PAGE_INIT_HAS_PGMAP_AND_ORDER_ARGS" "" "types"
;;
dev_pagemap_ops_has_folio_free)
#
# Determine if the zone device now uses a folio_free() as the callback
# function instead of page_free()
#
# This change was introduced by 3a5a06554566
# (mm/zone_device: rename page_free callback to folio_free)
#
# in linux-next, expected in v6.19.
#
CODE="
#include <linux/memremap.h>
void test_folio_free(struct folio *folio) {
}
void set_folio_free_ops(void) {
struct dev_pagemap_ops ops;
ops.folio_free = test_folio_free;
}"
compile_check_conftest "$CODE" "NV_PAGEMAP_OPS_HAS_FOLIO_FREE" "" "types"
;;
pci_rebar_get_possible_sizes)
#
# Determine if the pci_rebar_get_possible_sizes() function is present.
@@ -6315,6 +6434,27 @@ compile_test() {
compile_check_conftest "$CODE" "NV_PCI_REBAR_GET_POSSIBLE_SIZES_PRESENT" "" "functions"
;;
pci_resize_resource_has_exclude_bars_arg)
#
# Determine if pci_resize_resource() has exclude_bars argument.
#
# exclude_bars argument was added to pci_resize_resource by commit
# 337b1b566db0 (11/14/2025) ("PCI: Fix restoring BARs on BAR resize rollback path")
# in linux-next.
#
CODE="
#include <linux/pci.h>
typeof(pci_resize_resource) conftest_pci_resize_resource_has_exclude_bars_arg;
int __must_check conftest_pci_resize_resource_has_exclude_bars_arg(struct pci_dev *dev,
int i, int size,
int exclude_bars) {
return 0;
}"
compile_check_conftest "$CODE" "NV_PCI_RESIZE_RESOURCE_HAS_EXCLUDE_BARS_ARG" "" "types"
;;
wait_for_random_bytes)
#
# Determine if the wait_for_random_bytes() function is present.
@@ -6374,23 +6514,39 @@ compile_test() {
compile_check_conftest "$CODE" "NV_IOMMU_SVA_BIND_DEVICE_HAS_DRVDATA_ARG" "" "types"
;;
vm_area_struct_has_const_vm_flags)
vm_flags_set)
#
# Determine if the 'vm_area_struct' structure has
# const 'vm_flags'.
# Determine if the vm_flags_set() function is present. The
# presence of this function indicates that the vm_flags_clear()
# function is also present.
#
# A union of '__vm_flags' and 'const vm_flags' was added
# by commit bc292ab00f6c ("mm: introduce vma->vm_flags
# wrapper functions") in mm-stable branch (2023-02-09)
# of the akpm/mm maintainer tree.
# The functions vm_flags_set()/ vm_flags_clear() were added by
# commit bc292ab00f6c ("mm: introduce vma->vm_flags wrapper
# functions") in v6.3-rc1 (2023-02-09).
#
CODE="
#include <linux/mm_types.h>
int conftest_vm_area_struct_has_const_vm_flags(void) {
return offsetof(struct vm_area_struct, __vm_flags);
#include <linux/mm.h>
void conftest_vm_flags_set(void) {
vm_flags_set();
}"
compile_check_conftest "$CODE" "NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS" "" "types"
compile_check_conftest "$CODE" "NV_VM_FLAGS_SET_PRESENT" "" "functions"
;;
vma_flags_set_word)
#
# Determine if the vma_flags_set_word() function is present.
#
# Added by commit c3f7c506e8f1 ("mm: introduce VMA flags bitmap type")
# in v6.19-rc1.
#
CODE="
#include <linux/mm.h>
void conftest_vma_flags_set_word(void) {
vma_flags_set_word();
}"
compile_check_conftest "$CODE" "NV_VMA_FLAGS_SET_WORD_PRESENT" "" "functions"
;;
drm_driver_has_dumb_destroy)
@@ -6797,6 +6953,26 @@ compile_test() {
compile_check_conftest "$CODE" "NV_DRM_CONNECTOR_HELPER_FUNCS_MODE_VALID_HAS_CONST_MODE_ARG" "" "types"
;;
is_vma_write_locked_has_mm_lock_seq_arg)
#
# Determine if __is_vma_write_locked() takes only a single
# 'struct vm_area_struct *' argument.
#
# Commit 22f7639f2f03 ("mm/vma: improve and document
# __is_vma_write_locked()") removed the 'unsigned int *mm_lock_seq'
# parameter in v7.0-rc1.
#
CODE="
#include <linux/mm.h>
#include <linux/mmap_lock.h>
int conftest_is_vma_write_locked_has_mm_lock_seq_arg(struct vm_area_struct *vma) {
unsigned int mm_lock_seq;
return __is_vma_write_locked(vma, &mm_lock_seq);
}"
compile_check_conftest "$CODE" "NV_IS_VMA_WRITE_LOCKED_HAS_MM_LOCK_SEQ_ARG" "" "types"
;;
# When adding a new conftest entry, please use the correct format for
# specifying the relevant upstream Linux kernel commit.
#

View File

@@ -93,7 +93,8 @@ static inline int nv_dma_fence_signal(nv_dma_fence_t *fence) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_signal(fence);
#else
return dma_fence_signal(fence);
dma_fence_signal(fence);
return 0;
#endif
}

View File

@@ -497,7 +497,7 @@ static int __nv_drm_cursor_atomic_check(struct drm_plane *plane,
WARN_ON(nv_plane->layer_idx != NVKMS_KAPI_LAYER_INVALID_IDX);
nv_drm_for_each_crtc_in_state(plane_state->state, crtc, crtc_state, i) {
nv_drm_for_each_new_crtc_in_state(plane_state->state, crtc, crtc_state, i) {
struct nv_drm_crtc_state *nv_crtc_state = to_nv_crtc_state(crtc_state);
struct NvKmsKapiHeadRequestedConfig *head_req_config =
&nv_crtc_state->req_config;
@@ -543,7 +543,7 @@ static int nv_drm_plane_atomic_check(struct drm_plane *plane,
WARN_ON(nv_plane->layer_idx == NVKMS_KAPI_LAYER_INVALID_IDX);
nv_drm_for_each_crtc_in_state(plane_state->state, crtc, crtc_state, i) {
nv_drm_for_each_new_crtc_in_state(plane_state->state, crtc, crtc_state, i) {
struct nv_drm_crtc_state *nv_crtc_state = to_nv_crtc_state(crtc_state);
struct NvKmsKapiHeadRequestedConfig *head_req_config =
&nv_crtc_state->req_config;
@@ -692,9 +692,11 @@ static inline void __nv_drm_plane_atomic_destroy_state(
#endif
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
struct nv_drm_plane_state *nv_drm_plane_state =
to_nv_drm_plane_state(state);
drm_property_blob_put(nv_drm_plane_state->hdr_output_metadata);
{
struct nv_drm_plane_state *nv_drm_plane_state =
to_nv_drm_plane_state(state);
drm_property_blob_put(nv_drm_plane_state->hdr_output_metadata);
}
#endif
}
@@ -906,7 +908,7 @@ static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc,
req_config->flags.displaysChanged = NV_TRUE;
nv_drm_for_each_connector_in_state(crtc_state->state,
nv_drm_for_each_new_connector_in_state(crtc_state->state,
connector, connector_state, j) {
if (connector_state->crtc != crtc) {
continue;

View File

@@ -75,7 +75,7 @@ static void __nv_drm_framebuffer_put(struct drm_framebuffer *fb)
* drm_atomic_helper_disable_all() is copied from
* linux/drivers/gpu/drm/drm_atomic_helper.c and modified to use
* nv_drm_for_each_crtc instead of drm_for_each_crtc to loop over all crtcs,
* use nv_drm_for_each_*_in_state instead of for_each_connector_in_state to loop
* use nv_drm_for_each_new_*_in_state instead of for_each_connector_in_state to loop
* over all modeset object states, and use drm_atomic_state_free() if
* drm_atomic_state_put() is not available.
*
@@ -162,13 +162,13 @@ int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
}
#endif
nv_drm_for_each_connector_in_state(state, conn, conn_state, i) {
nv_drm_for_each_new_connector_in_state(state, conn, conn_state, i) {
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
if (ret < 0)
goto free;
}
nv_drm_for_each_plane_in_state(state, plane, plane_state, i) {
nv_drm_for_each_new_plane_in_state(state, plane, plane_state, i) {
ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
if (ret < 0)
goto free;

View File

@@ -256,6 +256,14 @@ int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
#endif
#if !defined(for_each_new_connector_in_state)
#define nv_drm_for_each_new_connector_in_state(__state, connector, new_connector_state, __i) \
nv_drm_for_each_connector_in_state(__state, connector, new_connector_state, __i)
#else
#define nv_drm_for_each_new_connector_in_state(__state, connector, new_connector_state, __i) \
for_each_new_connector_in_state(__state, connector, new_connector_state, __i)
#endif
/**
* nv_drm_for_each_crtc_in_state - iterate over all CRTCs in an atomic update
* @__state: &struct drm_atomic_state pointer
@@ -281,6 +289,31 @@ int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
for_each_crtc_in_state(__state, crtc, crtc_state, __i)
#endif
#if !defined(for_each_new_crtc_in_state)
#define nv_drm_for_each_new_crtc_in_state(__state, crtc, new_crtc_state, __i) \
nv_drm_for_each_crtc_in_state(__state, crtc, new_crtc_state, __i)
#else
#define nv_drm_for_each_new_crtc_in_state(__state, crtc, new_crtc_state, __i) \
for_each_new_crtc_in_state(__state, crtc, new_crtc_state, __i)
#endif
#if !defined(for_each_old_crtc_in_state)
#define nv_drm_for_each_old_crtc_in_state(__state, crtc, old_crtc_state, __i) \
nv_drm_for_each_crtc_in_state(__state, crtc, old_crtc_state, __i)
#else
#define nv_drm_for_each_old_crtc_in_state(__state, crtc, old_crtc_state, __i) \
for_each_old_crtc_in_state(__state, crtc, old_crtc_state, __i)
#endif
#if !defined(for_each_oldnew_crtc_in_state)
#define nv_drm_for_each_oldnew_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \
nv_drm_for_each_crtc_in_state(__state, crtc, old_crtc_state, __i) \
(new_crtc_state) = (old_crtc_state);
#else
#define nv_drm_for_each_oldnew_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \
for_each_oldnew_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i)
#endif
/**
* nv_drm_for_each_plane_in_state - iterate over all planes in an atomic update
* @__state: &struct drm_atomic_state pointer
@@ -306,6 +339,22 @@ int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
for_each_plane_in_state(__state, plane, plane_state, __i)
#endif
#if !defined(for_each_new_plane_in_state)
#define nv_drm_for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \
nv_drm_for_each_plane_in_state(__state, plane, new_plane_state, __i)
#else
#define nv_drm_for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \
for_each_new_plane_in_state(__state, plane, new_plane_state, __i)
#endif
#if !defined(for_each_old_plane_in_state)
#define nv_drm_for_each_old_plane_in_state(__state, plane, old_plane_state, __i) \
nv_drm_for_each_plane_in_state(__state, plane, old_plane_state, __i)
#else
#define nv_drm_for_each_old_plane_in_state(__state, plane, old_plane_state, __i) \
for_each_old_plane_in_state(__state, plane, old_plane_state, __i)
#endif
static inline struct drm_connector *
nv_drm_connector_lookup(struct drm_device *dev, struct drm_file *filep,
uint32_t id)

View File

@@ -183,3 +183,4 @@ module_exit(nv_linux_drm_exit);
MODULE_INFO(supported, "external");
MODULE_VERSION(NV_VERSION_STRING);
MODULE_DESCRIPTION("NVIDIA DRM kernel module");

View File

@@ -103,8 +103,11 @@ static bool __will_generate_flip_event(struct drm_crtc *crtc,
return false;
}
/* Find out whether primary & overlay flip done events will be generated. */
nv_drm_for_each_plane_in_state(old_crtc_state->state,
/*
* Find out whether primary & overlay flip done events will be generated.
* Only called after drm_atomic_helper_swap_state, so we use old state.
*/
nv_drm_for_each_old_plane_in_state(old_crtc_state->state,
plane, old_plane_state, i) {
if (old_plane_state->crtc != crtc) {
continue;
@@ -172,7 +175,8 @@ static int __nv_drm_get_syncpt_data(
head_reply_config = &reply_config->headReplyConfig[nv_crtc->head];
nv_drm_for_each_plane_in_state(old_crtc_state->state, plane, old_plane_state, i) {
/* Use old state because this is only called after drm_atomic_helper_swap_state */
nv_drm_for_each_old_plane_in_state(old_crtc_state->state, plane, old_plane_state, i) {
struct nv_drm_plane *nv_plane = to_nv_plane(plane);
if (plane->type == DRM_PLANE_TYPE_CURSOR || old_plane_state->crtc != crtc) {
@@ -233,21 +237,14 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
&(to_nv_atomic_state(state)->config);
struct NvKmsKapiModeSetReplyConfig reply_config = { };
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
int ret;
memset(requested_config, 0, sizeof(*requested_config));
/* Loop over affected crtcs and construct NvKmsKapiRequestedModeSetConfig */
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
/*
* When committing a state, the new state is already stored in
* crtc->state. When checking a proposed state, the proposed state is
* stored in crtc_state.
*/
struct drm_crtc_state *new_crtc_state =
commit ? crtc->state : crtc_state;
nv_drm_for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
requested_config->headRequestedConfig[nv_crtc->head] =
@@ -256,7 +253,6 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
requested_config->headsMask |= 1 << nv_crtc->head;
if (commit) {
struct drm_crtc_state *old_crtc_state = crtc_state;
struct nv_drm_crtc_state *nv_new_crtc_state =
to_nv_crtc_state(new_crtc_state);
@@ -296,10 +292,11 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
}
if (commit && nv_dev->supportsSyncpts) {
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
/* commit is true so we check old state */
nv_drm_for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
/*! loop over affected crtcs and get NvKmsKapiModeSetReplyConfig */
ret = __nv_drm_get_syncpt_data(
nv_dev, crtc, crtc_state, requested_config, &reply_config);
nv_dev, crtc, old_crtc_state, requested_config, &reply_config);
if (ret != 0) {
return ret;
}
@@ -394,9 +391,8 @@ int nv_drm_atomic_commit(struct drm_device *dev,
* updates to complete.
*/
if (nonblock) {
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
nv_drm_for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
/*
* Here you aren't required to hold nv_drm_crtc::flip_list_lock
* because:
@@ -475,7 +471,7 @@ int nv_drm_atomic_commit(struct drm_device *dev,
goto done;
}
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
nv_drm_for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
struct nv_drm_crtc_state *nv_new_crtc_state =
to_nv_crtc_state(crtc->state);

View File

@@ -31,6 +31,10 @@
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_PRINT_H_PRESENT)
#include <drm/drm_print.h>
#endif
#if defined(NV_DRM_DRM_DEVICE_H_PRESENT)
#include <drm/drm_device.h>
#endif

View File

@@ -80,6 +80,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_rotation_available
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_vma_offset_exact_lookup_locked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += nvhost_dma_fence_unpack
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vm_flags_set
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_bus_type
@@ -132,7 +133,6 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += drm_file_get_master
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_modeset_lock_all_end
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_lookup
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_put
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_dumb_destroy
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_unlocked_ioctl_flag_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_output_poll_changed

View File

@@ -1793,3 +1793,4 @@ module_exit(nvkms_exit);
MODULE_INFO(supported, "external");
MODULE_VERSION(NV_VERSION_STRING);
MODULE_DESCRIPTION("NVIDIA modeset kernel module");

View File

@@ -88,6 +88,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += iommu_sva_bind_device_has_drvdata_arg
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vm_fault_to_errno
NV_CONFTEST_FUNCTION_COMPILE_TESTS += folio_test_swapcache
NV_CONFTEST_FUNCTION_COMPILE_TESTS += page_pgmap
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vm_flags_set
NV_CONFTEST_TYPE_COMPILE_TESTS += backing_dev_info
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_context_t
@@ -107,9 +108,10 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += timespec64
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
NV_CONFTEST_TYPE_COMPILE_TESTS += migrate_vma_added_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += migrate_device_range
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += handle_mm_fault_has_mm_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += handle_mm_fault_has_pt_regs_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += dev_pagemap_ops_has_folio_free
NV_CONFTEST_TYPE_COMPILE_TESTS += zone_device_page_init_has_pgmap_and_order_args
NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_unified_nodes
NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_home_node
NV_CONFTEST_TYPE_COMPILE_TESTS += mpol_preferred_many_present

View File

@@ -1203,3 +1203,4 @@ module_exit(uvm_exit_entry);
MODULE_LICENSE("Dual MIT/GPL");
MODULE_INFO(supported, "external");
MODULE_VERSION(NV_VERSION_STRING);
MODULE_DESCRIPTION("NVIDIA Unified Virtual Memory kernel module");

View File

@@ -71,6 +71,19 @@ module_param(uvm_disable_hmm, bool, 0444);
#include "uvm_va_policy.h"
#include "uvm_tools.h"
//
// Pass 0 as the order, when actual large order support is added this
// function will need to be revisited
//
static __always_inline void nv_zone_device_page_init(struct page *page)
{
#if defined(NV_ZONE_DEVICE_PAGE_INIT_HAS_PGMAP_AND_ORDER_ARGS)
zone_device_page_init(page, page_pgmap(page), 0);
#else
zone_device_page_init(page);
#endif
}
// The function nv_PageSwapCache() wraps the check for page swap cache flag in
// order to support a wide variety of kernel versions.
// The function PageSwapCache() is removed after 32f51ead3d77 ("mm: remove
@@ -1455,6 +1468,31 @@ uvm_va_block_region_t uvm_hmm_get_prefetch_region(uvm_va_block_t *va_block,
return uvm_va_block_region_from_start_end(va_block, start, end);
}
uvm_prot_t uvm_hmm_compute_mapping_prot(uvm_va_block_t *va_block,
uvm_processor_id_t processor_id,
uvm_page_index_t page_index)
{
if (!uvm_processor_mask_test(&va_block->mapped, UVM_ID_CPU))
return UVM_PROT_NONE;
if (uvm_page_mask_test(&va_block->cpu.pte_bits[UVM_PTE_BITS_CPU_WRITE], page_index)) {
if (uvm_processor_mask_test(&va_block->hmm.va_space->has_native_atomics[uvm_id_value(UVM_ID_CPU)],
processor_id))
// If the CPU has write access it also has atomic access, so it's
// fine for any GPU with HW support to do atomic accesses.
return UVM_PROT_READ_WRITE_ATOMIC;
else
// Otherwise the GPU needs to fault on atomic access to ensure the
// CPU is unmapped.
return UVM_PROT_READ_WRITE;
}
if (uvm_page_mask_test(&va_block->cpu.pte_bits[UVM_PTE_BITS_CPU_READ], page_index))
return UVM_PROT_READ_ONLY;
return UVM_PROT_NONE;
}
uvm_prot_t uvm_hmm_compute_logical_prot(uvm_va_block_t *va_block,
struct vm_area_struct *vma,
NvU64 addr)
@@ -1868,7 +1906,7 @@ static void fill_dst_pfn(uvm_va_block_t *va_block,
hmm_mark_gpu_chunk_referenced(va_block, gpu, gpu_chunk);
UVM_ASSERT(!page_count(dpage));
zone_device_page_init(dpage);
nv_zone_device_page_init(dpage);
dpage->zone_device_data = va_block->hmm.va_space;
dst_pfns[page_index] = migrate_pfn(pfn);

View File

@@ -261,6 +261,14 @@ typedef struct
const uvm_va_policy_t *policy,
NvU64 address);
// Return the actual permissions allowed when mapping a page within a
// va_block on the given processor_id. This may differ from the logical
// permission if for example the kernel has the CPU pages mapped read-only
// to do copy-on-write.
uvm_prot_t uvm_hmm_compute_mapping_prot(uvm_va_block_t *va_block,
uvm_processor_id_t processor_id,
uvm_page_index_t page_index);
// Return the logical protection allowed of a HMM va_block for the page at
// the given address within the vma which must be valid. This is usually
// obtained from uvm_hmm_va_block_find_create()).
@@ -555,6 +563,13 @@ typedef struct
return (uvm_va_block_region_t){};
}
static uvm_prot_t uvm_hmm_compute_mapping_prot(uvm_va_block_t *va_block,
uvm_processor_id_t processor_id,
uvm_page_index_t page_index)
{
return UVM_PROT_NONE;
}
static uvm_prot_t uvm_hmm_compute_logical_prot(uvm_va_block_t *va_block,
struct vm_area_struct *vma,
NvU64 addr)

View File

@@ -3474,6 +3474,11 @@ static void devmem_page_free(struct page *page)
&pmm->root_chunks.va_block_lazy_free_q_item);
}
static void devmem_folio_free(struct folio *folio)
{
devmem_page_free(&folio->page);
}
// This is called by HMM when the CPU faults on a ZONE_DEVICE private entry.
static vm_fault_t devmem_fault(struct vm_fault *vmf)
{
@@ -3492,7 +3497,11 @@ static vm_fault_t devmem_fault_entry(struct vm_fault *vmf)
static const struct dev_pagemap_ops uvm_pmm_devmem_ops =
{
#if defined(NV_PAGEMAP_OPS_HAS_FOLIO_FREE)
.folio_free = devmem_folio_free,
#else
.page_free = devmem_page_free,
#endif
.migrate_to_ram = devmem_fault_entry,
};

View File

@@ -9833,6 +9833,13 @@ uvm_prot_t uvm_va_block_page_compute_highest_permission(uvm_va_block_t *va_block
uvm_processor_mask_t resident_processors;
NvU32 resident_processors_count;
// TODO: Bug 5841902
// There are several calls to uvm_va_block_is_hmm() which need to be removed
if (uvm_va_block_is_hmm(va_block))
return uvm_hmm_compute_mapping_prot(va_block,
processor_id,
page_index);
if (uvm_processor_mask_test(block_get_uvm_lite_gpus(va_block), processor_id))
return UVM_PROT_READ_WRITE_ATOMIC;

View File

@@ -796,7 +796,11 @@ static NvBool nv_dma_use_map_resource
#endif
}
#if defined(NV_DMA_MAP_OPS_HAS_MAP_PHYS)
return (ops->map_phys != NULL);
#else
return (ops->map_resource != NULL);
#endif
#else
return NV_FALSE;
#endif

View File

@@ -126,7 +126,7 @@ nvidia_vma_access(
nv_state_t *nv = NV_STATE_PTR(nvlfp->nvptr);
NvU32 pageIndex, pageOffset;
void *kernel_mapping;
const nv_alloc_mapping_context_t *mmap_context = &nvlfp->mmap_context;
nv_alloc_mapping_context_t *mmap_context = &nvlfp->mmap_context;
NvU64 offset;
pageIndex = ((addr - vma->vm_start) >> PAGE_SHIFT);
@@ -137,7 +137,7 @@ nvidia_vma_access(
return -EINVAL;
}
if (!mmap_context->valid)
if (!nv_smp_load_acquire(&mmap_context->valid))
{
nv_printf(NV_DBG_ERRORS, "NVRM: VM: invalid mmap context\n");
return -EINVAL;
@@ -486,7 +486,7 @@ int nvidia_mmap_helper(
{
NvU32 prot = 0;
int ret;
const nv_alloc_mapping_context_t *mmap_context = &nvlfp->mmap_context;
nv_alloc_mapping_context_t *mmap_context = &nvlfp->mmap_context;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status;
@@ -497,7 +497,7 @@ int nvidia_mmap_helper(
* If mmap context is not valid on this file descriptor, this mapping wasn't
* previously validated with the RM so it must be rejected.
*/
if (!mmap_context->valid)
if (!nv_smp_load_acquire(&mmap_context->valid))
{
nv_printf(NV_DBG_ERRORS, "NVRM: VM: invalid mmap\n");
return -EINVAL;
@@ -806,15 +806,22 @@ void NV_API_CALL nv_set_safe_to_mmap_locked(
}
#if !NV_CAN_CALL_VMA_START_WRITE
#if defined(VM_REFCNT_EXCLUDE_READERS_FLAG)
#define NV_VMA_LOCK_OFFSET VM_REFCNT_EXCLUDE_READERS_FLAG
#else
#define NV_VMA_LOCK_OFFSET VMA_LOCK_OFFSET
#endif
static NvBool nv_vma_enter_locked(struct vm_area_struct *vma, NvBool detaching)
{
NvU32 tgt_refcnt = VMA_LOCK_OFFSET;
NvU32 tgt_refcnt = NV_VMA_LOCK_OFFSET;
NvBool interrupted = NV_FALSE;
if (!detaching)
{
tgt_refcnt++;
}
if (!refcount_add_not_zero(VMA_LOCK_OFFSET, &vma->vm_refcnt))
if (!refcount_add_not_zero(NV_VMA_LOCK_OFFSET, &vma->vm_refcnt))
{
return NV_FALSE;
}
@@ -844,7 +851,7 @@ static NvBool nv_vma_enter_locked(struct vm_area_struct *vma, NvBool detaching)
if (interrupted)
{
// Clean up on error: release refcount and dep_map
refcount_sub_and_test(VMA_LOCK_OFFSET, &vma->vm_refcnt);
refcount_sub_and_test(NV_VMA_LOCK_OFFSET, &vma->vm_refcnt);
rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
return NV_FALSE;
}
@@ -860,7 +867,7 @@ void nv_vma_start_write(struct vm_area_struct *vma)
{
NvU32 mm_lock_seq;
NvBool locked;
if (__is_vma_write_locked(vma, &mm_lock_seq))
if (nv_is_vma_write_locked(vma, &mm_lock_seq))
return;
locked = nv_vma_enter_locked(vma, NV_FALSE);
@@ -869,7 +876,7 @@ void nv_vma_start_write(struct vm_area_struct *vma)
if (locked)
{
NvBool detached;
detached = refcount_sub_and_test(VMA_LOCK_OFFSET, &vma->vm_refcnt);
detached = refcount_sub_and_test(NV_VMA_LOCK_OFFSET, &vma->vm_refcnt);
rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
WARN_ON_ONCE(detached);
}

View File

@@ -220,7 +220,11 @@ static int nv_resize_pcie_bars(struct pci_dev *pci_dev) {
resize:
/* Attempt to resize BAR1 to the largest supported size */
#if defined(NV_PCI_RESIZE_RESOURCE_HAS_EXCLUDE_BARS_ARG)
r = pci_resize_resource(pci_dev, NV_GPU_BAR1, requested_size, 0);
#else
r = pci_resize_resource(pci_dev, NV_GPU_BAR1, requested_size);
#endif
if (r) {
if (r == -ENOSPC)

View File

@@ -51,7 +51,7 @@ NV_STATUS NV_API_CALL nv_add_mapping_context_to_file(
nvamc = &nvlfp->mmap_context;
if (nvamc->valid)
if (nv_smp_load_acquire(&nvamc->valid))
{
status = NV_ERR_STATE_IN_USE;
goto done;
@@ -61,6 +61,10 @@ NV_STATUS NV_API_CALL nv_add_mapping_context_to_file(
{
nvamc->alloc = pAllocPriv;
nvamc->page_index = pageIndex;
{
nv_alloc_t *at = (nv_alloc_t *) nvamc->alloc;
atomic64_inc(&at->usage_count);
}
}
else
{
@@ -83,8 +87,8 @@ NV_STATUS NV_API_CALL nv_add_mapping_context_to_file(
}
nvamc->prot = prot;
nvamc->valid = NV_TRUE;
nvamc->caching = nvuap->caching;
nv_smp_store_release(&nvamc->valid, NV_TRUE);
done:
nv_put_file_private(priv);

View File

@@ -106,6 +106,8 @@
#define RM_THRESHOLD_UNAHNDLED_IRQ_COUNT 99900
#define RM_UNHANDLED_TIMEOUT_US 100000
MODULE_DESCRIPTION("NVIDIA core GPU kernel module");
const NvBool nv_is_rm_firmware_supported_os = NV_TRUE;
// Deprecated, use NV_REG_ENABLE_GPU_FIRMWARE instead
@@ -2724,6 +2726,12 @@ nvidia_ctl_close(
rm_cleanup_file_private(sp, nv, &nvlfp->nvfp);
if (nvlfp->mmap_context.alloc != NULL && nvlfp->mmap_context.valid)
{
nv_alloc_t *at = nvlfp->mmap_context.alloc;
nv_alloc_release(nvlfp, at);
}
if (nvlfp->free_list != NULL)
{
at = nvlfp->free_list;

View File

@@ -181,6 +181,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_to_irq
NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_put
NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_set_bw
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_map_ops_has_map_phys
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_export_args
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_kmap
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_kmap_atomic
@@ -193,6 +194,8 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_task_ioprio
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mdev_set_iommu_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += offline_and_remove_memory
NV_CONFTEST_FUNCTION_COMPILE_TESTS += crypto_tfm_ctx_aligned
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vma_flags_set_word
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vm_flags_set
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_of_node_to_nid
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_sme_active
@@ -255,10 +258,11 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += remove_memory_has_nid_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += add_memory_driver_managed_has_mhp_flags_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += num_registered_fb
NV_CONFTEST_TYPE_COMPILE_TESTS += pci_driver_has_driver_managed_dma
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += memory_failure_has_trapno_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += foll_longterm_present
NV_CONFTEST_TYPE_COMPILE_TESTS += bus_type_has_iommu_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += pci_resize_resource_has_exclude_bars_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += is_vma_write_locked_has_mm_lock_seq_arg
NV_CONFTEST_GENERIC_COMPILE_TESTS += dom0_kernel_present
NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_vgpu_kvm_build

View File

@@ -329,7 +329,7 @@ NvBool NV_API_CALL os_semaphore_may_sleep(void)
NvBool NV_API_CALL os_is_isr(void)
{
return (in_irq());
return (nv_in_hardirq());
}
// return TRUE if the caller is the super-user

View File

@@ -36,25 +36,25 @@
// and then checked back in. You cannot make changes to these sections without
// corresponding changes to the buildmeister script
#ifndef NV_BUILD_BRANCH
#define NV_BUILD_BRANCH r539_62
#define NV_BUILD_BRANCH r539_72
#endif
#ifndef NV_PUBLIC_BRANCH
#define NV_PUBLIC_BRANCH r539_62
#define NV_PUBLIC_BRANCH r539_72
#endif
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r535/r539_62-1069"
#define NV_BUILD_CHANGELIST_NUM (36903621)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r535/r539_72-1194"
#define NV_BUILD_CHANGELIST_NUM (37693931)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "rel/gpu_drv/r535/r539_62-1069"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (36903621)
#define NV_BUILD_NAME "rel/gpu_drv/r535/r539_72-1194"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (37693931)
#else /* Windows builds */
#define NV_BUILD_BRANCH_VERSION "r539_62-1"
#define NV_BUILD_CHANGELIST_NUM (36902724)
#define NV_BUILD_BRANCH_VERSION "r539_72-2"
#define NV_BUILD_CHANGELIST_NUM (37693931)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "539.63"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (36902724)
#define NV_BUILD_NAME "539.74"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (37693931)
#define NV_BUILD_BRANCH_BASE_VERSION R535
#endif
// End buildmeister python edited section

View File

@@ -4,7 +4,7 @@
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \
(defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1)
#define NV_VERSION_STRING "535.288.01"
#define NV_VERSION_STRING "535.309.01"
#else

View File

@@ -3,7 +3,7 @@
#define NV_COMPANY_NAME_STRING_SHORT "NVIDIA"
#define NV_COMPANY_NAME_STRING_FULL "NVIDIA Corporation"
#define NV_COMPANY_NAME_STRING NV_COMPANY_NAME_STRING_FULL
#define NV_COPYRIGHT_YEAR "2025"
#define NV_COPYRIGHT_YEAR "2026"
#define NV_COPYRIGHT "(C) " NV_COPYRIGHT_YEAR " NVIDIA Corporation. All rights reserved." // Please do not use the non-ascii copyright symbol for (C).
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \

View File

@@ -305,7 +305,7 @@ typedef struct nv_alloc_mapping_context_s {
NvU64 access_size;
NvU64 remap_prot_extra;
NvU32 prot;
NvBool valid;
NvU32 valid;
NvU32 caching;
} nv_alloc_mapping_context_t;

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -346,7 +346,7 @@ RmLogGpuCrash(OBJGPU *pGpu)
"NVRM: A GPU crash dump has been created. If possible, please run\n"
"NVRM: nvidia-bug-report.sh as root to collect this data before\n"
"NVRM: the NVIDIA kernel module is unloaded.\n");
if (hypervisorIsVgxHyper())
if (!IS_GSP_CLIENT(pGpu) && hypervisorIsVgxHyper())
{
nv_printf(NV_DBG_ERRORS, "NVRM: Dumping nvlogs buffers\n");
nvlogDumpToKernelLog(NV_FALSE);

View File

@@ -785,21 +785,6 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
#endif
},
{ /* [39] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdDmaGetPteInfo_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*flags=*/ 0x10u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x801801u,
/*paramSize=*/ sizeof(NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "deviceCtrlCmdDmaGetPteInfo"
#endif
},
{ /* [40] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -814,7 +799,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaFlush"
#endif
},
{ /* [41] */
{ /* [40] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -829,7 +814,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaAdvSchedGetVaCaps"
#endif
},
{ /* [42] */
{ /* [41] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -844,22 +829,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaGetPdeInfo"
#endif
},
{ /* [43] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdDmaSetPteInfo_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*flags=*/ 0x10u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x80180au,
/*paramSize=*/ sizeof(NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "deviceCtrlCmdDmaSetPteInfo"
#endif
},
{ /* [44] */
{ /* [42] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -874,7 +844,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaInvalidateTLB"
#endif
},
{ /* [45] */
{ /* [43] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -889,7 +859,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaGetCaps"
#endif
},
{ /* [46] */
{ /* [44] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -904,7 +874,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaSetVASpaceSize"
#endif
},
{ /* [47] */
{ /* [45] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -919,7 +889,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaUpdatePde2"
#endif
},
{ /* [48] */
{ /* [46] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -934,7 +904,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaEnablePrivilegedRange"
#endif
},
{ /* [49] */
{ /* [47] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c0000u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -949,7 +919,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaSetDefaultVASpace"
#endif
},
{ /* [50] */
{ /* [48] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x140004u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -964,7 +934,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaSetPageDirectory"
#endif
},
{ /* [51] */
{ /* [49] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x140004u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -979,7 +949,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaUnsetPageDirectory"
#endif
},
{ /* [52] */
{ /* [50] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -994,7 +964,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdMsencGetCaps"
#endif
},
{ /* [53] */
{ /* [51] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -1009,7 +979,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdBspGetCapsV2"
#endif
},
{ /* [54] */
{ /* [52] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -1024,7 +994,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdOsUnixVTSwitch"
#endif
},
{ /* [55] */
{ /* [53] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -1039,7 +1009,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdOsUnixVTGetFBInfo"
#endif
},
{ /* [56] */
{ /* [54] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -1054,7 +1024,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdNvjpgGetCapsV2"
#endif
},
{ /* [57] */
{ /* [55] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -1069,7 +1039,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdInternalPerfCudaLimitDisable"
#endif
},
{ /* [58] */
{ /* [56] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -1084,7 +1054,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdInternalPerfGetUnderpoweredGpuCount"
#endif
},
{ /* [59] */
{ /* [57] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xe50u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@@ -1104,7 +1074,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
const struct NVOC_EXPORT_INFO __nvoc_export_info_Device =
{
/*numEntries=*/ 60,
/*numEntries=*/ 58,
/*pExportEntries=*/ __nvoc_exported_method_def_Device
};
@@ -1161,10 +1131,6 @@ static void __nvoc_init_funcTable_Device_1(Device *pThis) {
pThis->__deviceCtrlCmdBifGetPciePowerControlMask__ = &deviceCtrlCmdBifGetPciePowerControlMask_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__deviceCtrlCmdDmaGetPteInfo__ = &deviceCtrlCmdDmaGetPteInfo_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__deviceCtrlCmdDmaUpdatePde2__ = &deviceCtrlCmdDmaUpdatePde2_IMPL;
#endif
@@ -1189,10 +1155,6 @@ static void __nvoc_init_funcTable_Device_1(Device *pThis) {
pThis->__deviceCtrlCmdDmaGetPdeInfo__ = &deviceCtrlCmdDmaGetPdeInfo_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__deviceCtrlCmdDmaSetPteInfo__ = &deviceCtrlCmdDmaSetPteInfo_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__deviceCtrlCmdDmaInvalidateTLB__ = &deviceCtrlCmdDmaInvalidateTLB_IMPL;
#endif

View File

@@ -87,14 +87,12 @@ struct Device {
NV_STATUS (*__deviceCtrlCmdBifAspmFeatureSupported__)(struct Device *, NV0080_CTRL_BIF_SET_ASPM_FEATURE_PARAMS *);
NV_STATUS (*__deviceCtrlCmdBifAspmCyaUpdate__)(struct Device *, NV0080_CTRL_BIF_ASPM_CYA_UPDATE_PARAMS *);
NV_STATUS (*__deviceCtrlCmdBifGetPciePowerControlMask__)(struct Device *, NV0080_CTRL_CMD_BIF_GET_PCIE_POWER_CONTROL_MASK_PARAMS *);
NV_STATUS (*__deviceCtrlCmdDmaGetPteInfo__)(struct Device *, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *);
NV_STATUS (*__deviceCtrlCmdDmaUpdatePde2__)(struct Device *, NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *);
NV_STATUS (*__deviceCtrlCmdDmaSetPageDirectory__)(struct Device *, NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *);
NV_STATUS (*__deviceCtrlCmdDmaUnsetPageDirectory__)(struct Device *, NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *);
NV_STATUS (*__deviceCtrlCmdDmaFlush__)(struct Device *, NV0080_CTRL_DMA_FLUSH_PARAMS *);
NV_STATUS (*__deviceCtrlCmdDmaAdvSchedGetVaCaps__)(struct Device *, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *);
NV_STATUS (*__deviceCtrlCmdDmaGetPdeInfo__)(struct Device *, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *);
NV_STATUS (*__deviceCtrlCmdDmaSetPteInfo__)(struct Device *, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *);
NV_STATUS (*__deviceCtrlCmdDmaInvalidateTLB__)(struct Device *, NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS *);
NV_STATUS (*__deviceCtrlCmdDmaGetCaps__)(struct Device *, NV0080_CTRL_DMA_GET_CAPS_PARAMS *);
NV_STATUS (*__deviceCtrlCmdDmaSetVASpaceSize__)(struct Device *, NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS *);
@@ -221,14 +219,12 @@ NV_STATUS __nvoc_objCreate_Device(Device**, Dynamic*, NvU32, struct CALL_CONTEXT
#define deviceCtrlCmdBifAspmFeatureSupported(pDevice, pBifAspmParams) deviceCtrlCmdBifAspmFeatureSupported_DISPATCH(pDevice, pBifAspmParams)
#define deviceCtrlCmdBifAspmCyaUpdate(pDevice, pBifAspmCyaParams) deviceCtrlCmdBifAspmCyaUpdate_DISPATCH(pDevice, pBifAspmCyaParams)
#define deviceCtrlCmdBifGetPciePowerControlMask(pDevice, pBifPciePowerControlParams) deviceCtrlCmdBifGetPciePowerControlMask_DISPATCH(pDevice, pBifPciePowerControlParams)
#define deviceCtrlCmdDmaGetPteInfo(pDevice, pParams) deviceCtrlCmdDmaGetPteInfo_DISPATCH(pDevice, pParams)
#define deviceCtrlCmdDmaUpdatePde2(pDevice, pParams) deviceCtrlCmdDmaUpdatePde2_DISPATCH(pDevice, pParams)
#define deviceCtrlCmdDmaSetPageDirectory(pDevice, pParams) deviceCtrlCmdDmaSetPageDirectory_DISPATCH(pDevice, pParams)
#define deviceCtrlCmdDmaUnsetPageDirectory(pDevice, pParams) deviceCtrlCmdDmaUnsetPageDirectory_DISPATCH(pDevice, pParams)
#define deviceCtrlCmdDmaFlush(pDevice, flushParams) deviceCtrlCmdDmaFlush_DISPATCH(pDevice, flushParams)
#define deviceCtrlCmdDmaAdvSchedGetVaCaps(pDevice, pParams) deviceCtrlCmdDmaAdvSchedGetVaCaps_DISPATCH(pDevice, pParams)
#define deviceCtrlCmdDmaGetPdeInfo(pDevice, pParams) deviceCtrlCmdDmaGetPdeInfo_DISPATCH(pDevice, pParams)
#define deviceCtrlCmdDmaSetPteInfo(pDevice, pParams) deviceCtrlCmdDmaSetPteInfo_DISPATCH(pDevice, pParams)
#define deviceCtrlCmdDmaInvalidateTLB(pDevice, pParams) deviceCtrlCmdDmaInvalidateTLB_DISPATCH(pDevice, pParams)
#define deviceCtrlCmdDmaGetCaps(pDevice, pDmaCapsParams) deviceCtrlCmdDmaGetCaps_DISPATCH(pDevice, pDmaCapsParams)
#define deviceCtrlCmdDmaSetVASpaceSize(pDevice, pParams) deviceCtrlCmdDmaSetVASpaceSize_DISPATCH(pDevice, pParams)
@@ -336,12 +332,6 @@ static inline NV_STATUS deviceCtrlCmdBifGetPciePowerControlMask_DISPATCH(struct
return pDevice->__deviceCtrlCmdBifGetPciePowerControlMask__(pDevice, pBifPciePowerControlParams);
}
NV_STATUS deviceCtrlCmdDmaGetPteInfo_IMPL(struct Device *pDevice, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams);
static inline NV_STATUS deviceCtrlCmdDmaGetPteInfo_DISPATCH(struct Device *pDevice, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams) {
return pDevice->__deviceCtrlCmdDmaGetPteInfo__(pDevice, pParams);
}
NV_STATUS deviceCtrlCmdDmaUpdatePde2_IMPL(struct Device *pDevice, NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *pParams);
static inline NV_STATUS deviceCtrlCmdDmaUpdatePde2_DISPATCH(struct Device *pDevice, NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *pParams) {
@@ -378,12 +368,6 @@ static inline NV_STATUS deviceCtrlCmdDmaGetPdeInfo_DISPATCH(struct Device *pDevi
return pDevice->__deviceCtrlCmdDmaGetPdeInfo__(pDevice, pParams);
}
NV_STATUS deviceCtrlCmdDmaSetPteInfo_IMPL(struct Device *pDevice, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams);
static inline NV_STATUS deviceCtrlCmdDmaSetPteInfo_DISPATCH(struct Device *pDevice, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams) {
return pDevice->__deviceCtrlCmdDmaSetPteInfo__(pDevice, pParams);
}
NV_STATUS deviceCtrlCmdDmaInvalidateTLB_IMPL(struct Device *pDevice, NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS *pParams);
static inline NV_STATUS deviceCtrlCmdDmaInvalidateTLB_DISPATCH(struct Device *pDevice, NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS *pParams) {

View File

@@ -245,40 +245,6 @@ dmaFreeMap_IMPL
return status;
}
//
// deviceCtrlCmdDmaGetPteInfo_IMPL
//
// Lock Requirements:
// Assert that API lock and GPUs lock held on entry
//
NV_STATUS
deviceCtrlCmdDmaGetPteInfo_IMPL
(
Device *pDevice,
NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams
)
{
OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice);
OBJVASPACE *pVAS = NULL;
NV_STATUS status = NV_OK;
CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams;
LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
NV_CHECK_OK_OR_RETURN(LEVEL_WARNING,
vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDevice), pRmCtrlParams->hObject,
pParams->hVASpace, &pVAS));
status = vaspaceGetPteInfo(pVAS, pGpu, pParams, NULL);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "vaspaceGetPteInfo failed\n");
}
return status;
}
//
// deviceCtrlCmdDmaUpdatePde2_IMPL
//
@@ -591,39 +557,6 @@ deviceCtrlCmdDmaUnsetPageDirectory_IMPL
return status;
}
//
// deviceCtrlCmdDmaSetPteInfo_IMPL
//
// Lock Requirements:
// Assert that API lock and GPUs lock held on entry
//
NV_STATUS
deviceCtrlCmdDmaSetPteInfo_IMPL
(
Device *pDevice,
NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams
)
{
OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice);
OBJVASPACE *pVAS = NULL;
NV_STATUS status = NV_OK;
LOCK_ASSERT_AND_RETURN(rmapiLockIsOwner() && rmGpuLockIsOwner());
NV_CHECK_OK_OR_RETURN(LEVEL_WARNING,
vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDevice), RES_GET_HANDLE(pDevice),
pParams->hVASpace, &pVAS));
status = vaspaceSetPteInfo(pVAS, pGpu, pParams);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "vaspaceGetPteInfo failed\n");
NV_ASSERT(0);
}
return status;
}
//
// deviceCtrlCmdDmaFlush_IMPL
//

View File

@@ -362,7 +362,7 @@ fabricvaspaceAllocNonContiguous_IMPL
{
NV_STATUS status = NV_OK;
NvU64 freeSize = 0;
NvU32 pageCount = (size / pageSize);
NvU32 pageCount;
NvU64 addr;
NvU32 idx;
NvBool bDefaultAllocMode;
@@ -379,6 +379,21 @@ fabricvaspaceAllocNonContiguous_IMPL
NV_ASSERT_OR_RETURN(NV_IS_ALIGNED64(align, pageSize), NV_ERR_INVALID_ARGUMENT);
NV_ASSERT_OR_RETURN(NV_IS_ALIGNED64(size, pageSize), NV_ERR_INVALID_ARGUMENT);
{
//
// Calculate page count and check for integer truncation.
// size / pageSize could exceed NvU32 max, causing undersized allocation.
//
NvU64 pageCount64 = size / pageSize;
if (pageCount64 > NV_U32_MAX)
{
NV_PRINTF(LEVEL_ERROR,
"Page count 0x%llx exceeds NvU32 max\n", pageCount64);
return NV_ERR_INVALID_ARGUMENT;
}
pageCount = (NvU32)pageCount64;
}
// Check if heap can satisfy the request.
NV_ASSERT_OK_OR_RETURN(fabricvaspaceGetFreeHeap(pFabricVAS, &freeSize));
if (freeSize < size)

View File

@@ -761,6 +761,14 @@ continue_alloc_object:
pHwResource = NULL;
}
if (src_hHwResHandle != 0 && pHwResource != NULL)
{
/* Increment the refCount for HW resource which is not allocated inside memlistConstruct_IMPL.
* This is to avoid freeing up of HwResource while source HW resource handle is still active.
*/
pMemory->pHwResource->refCount += 1;
}
memdescSetPteKind(pMemory->pMemDesc, pAllocParams->format);
memdescSetHwResId(pMemory->pMemDesc, hwResId);

View File

@@ -311,9 +311,8 @@ void vgpuDevWriteReg032(
OBJSYS *pSys = SYS_GET_INSTANCE();
OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
if(!pGpu ||
!pHypervisor || !pHypervisor->bDetected || !pHypervisor->bIsHVMGuest ||
!GPU_GET_KERNEL_BIF(pGpu))
if (!pGpu || !GPU_GET_KERNEL_BIF(pGpu) ||
(!IS_VIRTUAL(pGpu) && !(pHypervisor && pHypervisor->bDetected && pHypervisor->bIsHVMGuest)))
{
*vgpuHandled = NV_FALSE;
return;
@@ -382,7 +381,6 @@ NvU32 vgpuDevReadReg032(
OBJSYS *pSys = SYS_GET_INSTANCE();
OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
if (!pGpu || !GPU_GET_KERNEL_BIF(pGpu) ||
(!IS_VIRTUAL(pGpu) && !(pHypervisor && pHypervisor->bDetected && pHypervisor->bIsHVMGuest)))
{

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2015-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -22,6 +22,7 @@
*/
#define NVOC_RS_SERVER_H_PRIVATE_ACCESS_ALLOWED
#include "nvlog_inc.h"
#include "resserv/resserv.h"
#include "resserv/rs_server.h"
@@ -1416,7 +1417,10 @@ serverCopyResource
status = clientGetResourceRef(pClientSrc, pParams->hResourceSrc, &pResourceRefSrc);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_NOTICE, "Failed to find handle 0x%x under client 0x%x\n", pParams->hResourceSrc, pParams->hClientSrc);
goto done;
}
if (pResourceRefSrc->bInvalidated)
{
@@ -1439,7 +1443,7 @@ serverCopyResource
status = serverUpdateLockFlagsForCopy(pServer, pParams);
if (status != NV_OK)
return status;
goto done;
status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, &releaseFlags);
if (status != NV_OK)

View File

@@ -1,4 +1,4 @@
NVIDIA_VERSION = 535.288.01
NVIDIA_VERSION = 535.309.01
# This file.
VERSION_MK_FILE := $(lastword $(MAKEFILE_LIST))