580.94.13

This commit is contained in:
russellcnv
2025-12-18 14:38:12 -08:00
parent 58a0f49bed
commit a3af2867b7
94 changed files with 77091 additions and 74546 deletions

View File

@@ -79,7 +79,7 @@ ccflags-y += -I$(src)/common/inc
ccflags-y += -I$(src)
ccflags-y += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args
ccflags-y += -D__KERNEL__ -DMODULE -DNVRM
ccflags-y += -DNV_VERSION_STRING=\"580.94.11\"
ccflags-y += -DNV_VERSION_STRING=\"580.94.13\"
# Include and link Tegra out-of-tree modules.
ifneq ($(wildcard /usr/src/nvidia/nvidia-oot),)
@@ -187,6 +187,7 @@ NV_CONFTEST_CFLAGS += $(filter -std=%,$(KBUILD_CFLAGS))
NV_CONFTEST_CFLAGS += $(call cc-disable-warning,pointer-sign)
NV_CONFTEST_CFLAGS += $(call cc-option,-fshort-wchar,)
NV_CONFTEST_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types,)
NV_CONFTEST_CFLAGS += $(call cc-option,-fms-extensions,)
NV_CONFTEST_CFLAGS += -Wno-error
NV_CONFTEST_COMPILE_TEST_HEADERS := $(obj)/conftest/macros.h

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -59,8 +59,9 @@
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_ENABLED 0x00000001
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DEFAULT 0x00000002
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_MASK 0x000000F0
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_ALLOW_FALLBACK 0x00000010
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_MASK 0x000000F0
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_ALLOW_FALLBACK 0x00000010
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_DEFAULT_ON_WS_SERVER 0x00000020
#define NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE 0x00000012

View File

@@ -1294,7 +1294,8 @@ struct nv_pci_tegra_devfreq_dev;
typedef struct nv_linux_state_s {
nv_state_t nv_state;
atomic_t usage_count;
atomic_t usage_count;
NvU32 suspend_count;
struct device *dev;

View File

@@ -32,18 +32,6 @@
#include <linux/semaphore.h>
#include <linux/sched/signal.h> /* signal_pending */
#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_PREEMPT_RT_FULL)
typedef raw_spinlock_t nv_spinlock_t;
#define NV_DEFINE_SPINLOCK(lock) DEFINE_RAW_SPINLOCK(lock)
#define NV_SPIN_LOCK_INIT(lock) raw_spin_lock_init(lock)
#define NV_SPIN_LOCK_IRQ(lock) raw_spin_lock_irq(lock)
#define NV_SPIN_UNLOCK_IRQ(lock) raw_spin_unlock_irq(lock)
#define NV_SPIN_LOCK_IRQSAVE(lock,flags) raw_spin_lock_irqsave(lock,flags)
#define NV_SPIN_UNLOCK_IRQRESTORE(lock,flags) raw_spin_unlock_irqrestore(lock,flags)
#define NV_SPIN_LOCK(lock) raw_spin_lock(lock)
#define NV_SPIN_UNLOCK(lock) raw_spin_unlock(lock)
#define NV_SPIN_UNLOCK_WAIT(lock) raw_spin_unlock_wait(lock)
#else
typedef spinlock_t nv_spinlock_t;
#define NV_DEFINE_SPINLOCK(lock) DEFINE_SPINLOCK(lock)
#define NV_SPIN_LOCK_INIT(lock) spin_lock_init(lock)
@@ -54,7 +42,6 @@ typedef spinlock_t nv_spinlock_t;
#define NV_SPIN_LOCK(lock) spin_lock(lock)
#define NV_SPIN_UNLOCK(lock) spin_unlock(lock)
#define NV_SPIN_UNLOCK_WAIT(lock) spin_unlock_wait(lock)
#endif
#define NV_INIT_MUTEX(mutex) sema_init(mutex, 1)

View File

@@ -36,6 +36,19 @@
#define NV_MAX_ISR_DELAY_MS (NV_MAX_ISR_DELAY_US / 1000)
#define NV_NSECS_TO_JIFFIES(nsec) ((nsec) * HZ / 1000000000)
/*
* in_hardirq() was added in v5.11-rc1 (2020-12-15) to replace in_irq().
* Fall back to in_irq() for older kernels that don't have in_hardirq().
*/
static inline NvBool nv_in_hardirq(void)
{
#if defined(in_hardirq)
return in_hardirq();
#else
return in_irq();
#endif
}
#if !defined(NV_KTIME_GET_RAW_TS64_PRESENT)
static inline void ktime_get_raw_ts64(struct timespec64 *ts64)
{
@@ -82,7 +95,7 @@ static inline NV_STATUS nv_sleep_us(unsigned int us)
ktime_get_raw_ts64(&tm1);
#endif
if (in_irq() && (us > NV_MAX_ISR_DELAY_US))
if (nv_in_hardirq() && (us > NV_MAX_ISR_DELAY_US))
return NV_ERR_GENERIC;
mdelay_safe_msec = us / 1000;
@@ -127,7 +140,7 @@ static inline NV_STATUS nv_sleep_ms(unsigned int ms)
tm_start = tm_aux;
#endif
if (in_irq() && (ms > NV_MAX_ISR_DELAY_MS))
if (nv_in_hardirq() && (ms > NV_MAX_ISR_DELAY_MS))
{
return NV_ERR_GENERIC;
}

View File

@@ -1330,6 +1330,27 @@ compile_test() {
compile_check_conftest "$CODE" "NV_EVENTFD_SIGNAL_HAS_COUNTER_ARG" "" "types"
;;
get_dev_pagemap_has_pgmap_arg)
#
# Determine if the get_dev_pagemap() function has an additional
# 'pgmap' argument.
#
# This argument was removed by commit dd57f5feb19a
# (mm/memremap: remove unused get_dev_pagemap() parameter)
# in linux-next, expected in v6.18.
#
CODE="
#include <linux/memremap.h>
struct dev_pagemap *get_dev_pagemap_has_pgmap_arg(void) {
struct dev_pagemap *pgmap;
get_dev_pagemap(0, pgmap);
}"
compile_check_conftest "$CODE" "NV_GET_DEV_PAGEMAP_HAS_PGMAP_ARG" "" "types"
;;
drm_available)
# Determine if the DRM subsystem is usable
CODE="

View File

@@ -1559,7 +1559,7 @@ static int __nv_drm_cursor_atomic_check(struct drm_plane *plane,
WARN_ON(nv_plane->layer_idx != NVKMS_KAPI_LAYER_INVALID_IDX);
nv_drm_for_each_crtc_in_state(plane_state->state, crtc, crtc_state, i) {
for_each_new_crtc_in_state(plane_state->state, crtc, crtc_state, i) {
struct nv_drm_crtc_state *nv_crtc_state = to_nv_crtc_state(crtc_state);
struct NvKmsKapiHeadRequestedConfig *head_req_config =
&nv_crtc_state->req_config;
@@ -1605,7 +1605,7 @@ static int nv_drm_plane_atomic_check(struct drm_plane *plane,
WARN_ON(nv_plane->layer_idx == NVKMS_KAPI_LAYER_INVALID_IDX);
nv_drm_for_each_crtc_in_state(plane_state->state, crtc, crtc_state, i) {
for_each_new_crtc_in_state(plane_state->state, crtc, crtc_state, i) {
struct nv_drm_crtc_state *nv_crtc_state = to_nv_crtc_state(crtc_state);
struct NvKmsKapiHeadRequestedConfig *head_req_config =
&nv_crtc_state->req_config;
@@ -2435,7 +2435,7 @@ static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc,
req_config->flags.displaysChanged = NV_TRUE;
nv_drm_for_each_connector_in_state(crtc_state->state,
for_each_new_connector_in_state(crtc_state->state,
connector, connector_state, j) {
if (connector_state->crtc != crtc) {
continue;

View File

@@ -1717,6 +1717,11 @@ static long nv_drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return retcode;
}
static int nv_drm_load_noop(struct drm_device *dev, unsigned long flags)
{
return 0;
}
static const struct file_operations nv_drm_fops = {
.owner = THIS_MODULE,
@@ -1899,6 +1904,8 @@ static struct drm_driver nv_drm_driver = {
.gem_prime_res_obj = nv_drm_gem_prime_res_obj,
#endif
.load = nv_drm_load_noop,
.postclose = nv_drm_postclose,
.open = nv_drm_open,

View File

@@ -54,7 +54,7 @@
* drm_atomic_helper_disable_all() is copied from
* linux/drivers/gpu/drm/drm_atomic_helper.c and modified to use
* nv_drm_for_each_crtc instead of drm_for_each_crtc to loop over all crtcs,
* use nv_drm_for_each_*_in_state instead of for_each_connector_in_state to loop
* use for_each_new_*_in_state instead of for_each_connector_in_state to loop
* over all modeset object states, and use drm_atomic_state_free() if
* drm_atomic_state_put() is not available.
*
@@ -139,13 +139,13 @@ int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
plane_state->rotation = DRM_MODE_ROTATE_0;
}
nv_drm_for_each_connector_in_state(state, conn, conn_state, i) {
for_each_new_connector_in_state(state, conn, conn_state, i) {
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
if (ret < 0)
goto free;
}
nv_drm_for_each_plane_in_state(state, plane, plane_state, i) {
for_each_new_plane_in_state(state, plane, plane_state, i) {
ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
if (ret < 0)
goto free;

View File

@@ -138,154 +138,6 @@ nv_drm_prime_pages_to_sg(struct drm_device *dev,
int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
/*
* for_each_connector_in_state(), for_each_crtc_in_state() and
* for_each_plane_in_state() were added by kernel commit
* df63b9994eaf942afcdb946d27a28661d7dfbf2a which was Signed-off-by:
* Ander Conselvan de Oliveira <ander.conselvan.de.oliveira@intel.com>
* Daniel Vetter <daniel.vetter@ffwll.ch>
*
* for_each_connector_in_state(), for_each_crtc_in_state() and
* for_each_plane_in_state() were copied from
* include/drm/drm_atomic.h @
* 21a01abbe32a3cbeb903378a24e504bfd9fe0648
* which has the following copyright and license information:
*
* Copyright (C) 2014 Red Hat
* Copyright (C) 2014 Intel Corp.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rob Clark <robdclark@gmail.com>
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
/**
* nv_drm_for_each_connector_in_state - iterate over all connectors in an
* atomic update
* @__state: &struct drm_atomic_state pointer
* @connector: &struct drm_connector iteration cursor
* @connector_state: &struct drm_connector_state iteration cursor
* @__i: int iteration cursor, for macro-internal use
*
* This iterates over all connectors in an atomic update. Note that before the
* software state is committed (by calling drm_atomic_helper_swap_state(), this
* points to the new state, while afterwards it points to the old state. Due to
* this tricky confusion this macro is deprecated.
*/
#if !defined(for_each_connector_in_state)
#define nv_drm_for_each_connector_in_state(__state, \
connector, connector_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->num_connector && \
((connector) = (__state)->connectors[__i].ptr, \
(connector_state) = (__state)->connectors[__i].state, 1); \
(__i)++) \
for_each_if (connector)
#else
#define nv_drm_for_each_connector_in_state(__state, \
connector, connector_state, __i) \
for_each_connector_in_state(__state, connector, connector_state, __i)
#endif
/**
* nv_drm_for_each_crtc_in_state - iterate over all CRTCs in an atomic update
* @__state: &struct drm_atomic_state pointer
* @crtc: &struct drm_crtc iteration cursor
* @crtc_state: &struct drm_crtc_state iteration cursor
* @__i: int iteration cursor, for macro-internal use
*
* This iterates over all CRTCs in an atomic update. Note that before the
* software state is committed (by calling drm_atomic_helper_swap_state(), this
* points to the new state, while afterwards it points to the old state. Due to
* this tricky confusion this macro is deprecated.
*/
#if !defined(for_each_crtc_in_state)
#define nv_drm_for_each_crtc_in_state(__state, crtc, crtc_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->dev->mode_config.num_crtc && \
((crtc) = (__state)->crtcs[__i].ptr, \
(crtc_state) = (__state)->crtcs[__i].state, 1); \
(__i)++) \
for_each_if (crtc_state)
#else
#define nv_drm_for_each_crtc_in_state(__state, crtc, crtc_state, __i) \
for_each_crtc_in_state(__state, crtc, crtc_state, __i)
#endif
/**
* nv_drm_for_each_plane_in_state - iterate over all planes in an atomic update
* @__state: &struct drm_atomic_state pointer
* @plane: &struct drm_plane iteration cursor
* @plane_state: &struct drm_plane_state iteration cursor
* @__i: int iteration cursor, for macro-internal use
*
* This iterates over all planes in an atomic update. Note that before the
* software state is committed (by calling drm_atomic_helper_swap_state(), this
* points to the new state, while afterwards it points to the old state. Due to
* this tricky confusion this macro is deprecated.
*/
#if !defined(for_each_plane_in_state)
#define nv_drm_for_each_plane_in_state(__state, plane, plane_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->dev->mode_config.num_total_plane && \
((plane) = (__state)->planes[__i].ptr, \
(plane_state) = (__state)->planes[__i].state, 1); \
(__i)++) \
for_each_if (plane_state)
#else
#define nv_drm_for_each_plane_in_state(__state, plane, plane_state, __i) \
for_each_plane_in_state(__state, plane, plane_state, __i)
#endif
/*
* for_each_new_plane_in_state() was added by kernel commit
* 581e49fe6b411f407102a7f2377648849e0fa37f which was Signed-off-by:
* Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
* Daniel Vetter <daniel.vetter@ffwll.ch>
*
* This commit also added the old_state and new_state pointers to
* __drm_planes_state. Because of this, the best that can be done on kernel
* versions without this macro is for_each_plane_in_state.
*/
/**
* nv_drm_for_each_new_plane_in_state - iterate over all planes in an atomic update
* @__state: &struct drm_atomic_state pointer
* @plane: &struct drm_plane iteration cursor
* @new_plane_state: &struct drm_plane_state iteration cursor for the new state
* @__i: int iteration cursor, for macro-internal use
*
* This iterates over all planes in an atomic update, tracking only the new
* state. This is useful in enable functions, where we need the new state the
* hardware should be in when the atomic commit operation has completed.
*/
#if !defined(for_each_new_plane_in_state)
#define nv_drm_for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \
nv_drm_for_each_plane_in_state(__state, plane, new_plane_state, __i)
#else
#define nv_drm_for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \
for_each_new_plane_in_state(__state, plane, new_plane_state, __i)
#endif
#include <drm/drm_auth.h>
#include <drm/drm_file.h>

View File

@@ -108,8 +108,11 @@ static bool __will_generate_flip_event(struct drm_crtc *crtc,
return false;
}
/* Find out whether primary & overlay flip done events will be generated. */
nv_drm_for_each_plane_in_state(old_crtc_state->state,
/*
* Find out whether primary & overlay flip done events will be generated.
* Only called after drm_atomic_helper_swap_state, so we use old state.
*/
for_each_old_plane_in_state(old_crtc_state->state,
plane, old_plane_state, i) {
if (old_plane_state->crtc != crtc) {
continue;
@@ -193,7 +196,7 @@ static int __nv_drm_convert_in_fences(
return 0;
}
nv_drm_for_each_new_plane_in_state(state, plane, plane_state, i) {
for_each_new_plane_in_state(state, plane, plane_state, i) {
if ((plane->type == DRM_PLANE_TYPE_CURSOR) ||
(plane_state->crtc != crtc) ||
(plane_state->fence == NULL)) {
@@ -334,7 +337,8 @@ static int __nv_drm_get_syncpt_data(
head_reply_config = &reply_config->headReplyConfig[nv_crtc->head];
nv_drm_for_each_plane_in_state(old_crtc_state->state, plane, old_plane_state, i) {
/* Use old state because this is only called after drm_atomic_helper_swap_state */
for_each_old_plane_in_state(old_crtc_state->state, plane, old_plane_state, i) {
struct nv_drm_plane *nv_plane = to_nv_plane(plane);
if (plane->type == DRM_PLANE_TYPE_CURSOR || old_plane_state->crtc != crtc) {
@@ -395,7 +399,7 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
&(to_nv_atomic_state(state)->config);
struct NvKmsKapiModeSetReplyConfig reply_config = { };
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
int ret;
@@ -429,18 +433,10 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
memset(requested_config, 0, sizeof(*requested_config));
/* Loop over affected crtcs and construct NvKmsKapiRequestedModeSetConfig */
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
/*
* When committing a state, the new state is already stored in
* crtc->state. When checking a proposed state, the proposed state is
* stored in crtc_state.
*/
struct drm_crtc_state *new_crtc_state =
commit ? crtc->state : crtc_state;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
if (commit) {
struct drm_crtc_state *old_crtc_state = crtc_state;
struct nv_drm_crtc_state *nv_new_crtc_state =
to_nv_crtc_state(new_crtc_state);
@@ -497,10 +493,11 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
}
if (commit && nv_dev->supportsSyncpts) {
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
/* commit is true so we check old state */
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
/*! loop over affected crtcs and get NvKmsKapiModeSetReplyConfig */
ret = __nv_drm_get_syncpt_data(
nv_dev, crtc, crtc_state, requested_config, &reply_config);
nv_dev, crtc, old_crtc_state, requested_config, &reply_config);
if (ret != 0) {
return ret;
}
@@ -523,7 +520,7 @@ int nv_drm_atomic_check(struct drm_device *dev,
struct drm_crtc_state *crtc_state;
int i;
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
/*
* if the color management changed on the crtc, we need to update the
* crtc's plane's CSC matrices, so add the crtc's planes to the commit
@@ -619,7 +616,7 @@ int nv_drm_atomic_commit(struct drm_device *dev,
* Our system already implements such a queue, but due to
* bug 4054608, it is currently not used.
*/
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
/*
@@ -726,7 +723,7 @@ int nv_drm_atomic_commit(struct drm_device *dev,
goto done;
}
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
struct nv_drm_crtc_state *nv_new_crtc_state =
to_nv_crtc_state(crtc->state);

View File

@@ -30,6 +30,7 @@
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#include <drm/drm_print.h>
#include <drm/drm_device.h>
#include <drm/drm_gem.h>

View File

@@ -61,6 +61,7 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += mmu_notifier_ops_arch_invalidate_secondary_tlb
NV_CONFTEST_TYPE_COMPILE_TESTS += migrate_vma_added_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += migrate_device_range
NV_CONFTEST_TYPE_COMPILE_TESTS += handle_mm_fault_has_pt_regs_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += get_dev_pagemap_has_pgmap_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_unified_nodes
NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_home_node
NV_CONFTEST_TYPE_COMPILE_TESTS += mpol_preferred_many_present

View File

@@ -132,6 +132,12 @@
#define NV_UVM_GFP_FLAGS (GFP_KERNEL | __GFP_NOMEMALLOC)
#if defined(NV_GET_DEV_PAGEMAP_HAS_PGMAP_ARG)
#define NV_GET_DEV_PAGEMAP(pfn) get_dev_pagemap(pfn, NULL)
#else
#define NV_GET_DEV_PAGEMAP get_dev_pagemap
#endif
/* Return a nanosecond-precise value */
static inline NvU64 NV_GETTIME(void)
{

View File

@@ -43,8 +43,6 @@
#ifdef UVM_MIGRATE_VMA_SUPPORTED
static struct kmem_cache *g_uvm_migrate_vma_state_cache __read_mostly;
static const gfp_t g_migrate_vma_gfp_flags = NV_UVM_GFP_FLAGS | GFP_HIGHUSER_MOVABLE | __GFP_THISNODE;
static uvm_sgt_t *uvm_select_sgt(uvm_processor_id_t src_id, int src_nid, migrate_vma_state_t *state)
@@ -1497,7 +1495,7 @@ NV_STATUS uvm_migrate_pageable(uvm_migrate_args_t *uvm_migrate_args)
uvm_migrate_args->dst_node_id = uvm_gpu_numa_node(gpu);
}
state = nv_kmem_cache_zalloc(g_uvm_migrate_vma_state_cache, NV_UVM_GFP_FLAGS);
state = uvm_kvmalloc_zero(sizeof(migrate_vma_state_t));
if (!state)
return NV_ERR_NO_MEMORY;
@@ -1519,22 +1517,17 @@ NV_STATUS uvm_migrate_pageable(uvm_migrate_args_t *uvm_migrate_args)
out:
uvm_kvfree(state->dma.sgt_cpu);
uvm_kvfree(state->cpu_page_mask);
kmem_cache_free(g_uvm_migrate_vma_state_cache, state);
uvm_kvfree(state);
return status;
}
NV_STATUS uvm_migrate_pageable_init(void)
{
g_uvm_migrate_vma_state_cache = NV_KMEM_CACHE_CREATE("migrate_vma_state_t", migrate_vma_state_t);
if (!g_uvm_migrate_vma_state_cache)
return NV_ERR_NO_MEMORY;
return NV_OK;
}
void uvm_migrate_pageable_exit(void)
{
kmem_cache_destroy_safe(&g_uvm_migrate_vma_state_cache);
}
#endif

View File

@@ -360,7 +360,7 @@ static NV_STATUS alloc_device_p2p_mem(uvm_gpu_t *gpu,
// a reference to them, so take one now if using DEVICE_COHERENT pages.
if (gpu->parent->cdmm_enabled) {
get_page(page);
get_dev_pagemap(page_to_pfn(page), NULL);
NV_GET_DEV_PAGEMAP(page_to_pfn(page));
}
#else
// CDMM P2PDMA will never be enabled for this case

View File

@@ -468,9 +468,28 @@ nv_dma_buf_dup_mem_handles(
return NV_OK;
failed:
nv_dma_buf_undup_mem_handles_unlocked(sp, params->index, count, priv);
if (!priv->acquire_release_all_gpu_lock_on_dup)
{
//
// Undup requires taking all-GPUs lock.
// So if single GPU lock was taken,
// release it first so all-GPUs lock can be taken in
// nv_dma_buf_undup_mem_handles().
//
nv_dma_buf_release_gpu_lock(sp, priv);
nv_dma_buf_release_gpu_lock(sp, priv);
nv_dma_buf_undup_mem_handles(sp, params->index, count, priv);
}
else
{
//
// Here, all-GPUs lock is already taken, so undup the handles under
// the unlocked version of the function and then release the locks.
//
nv_dma_buf_undup_mem_handles_unlocked(sp, params->index, count, priv);
nv_dma_buf_release_gpu_lock(sp, priv);
}
unlock_api_lock:
rm_release_api_lock(sp);

View File

@@ -1310,11 +1310,6 @@ nv_pci_probe
nv_printf(NV_DBG_SETUP, "NVRM: probing 0x%x 0x%x, class 0x%x\n",
pci_dev->vendor, pci_dev->device, pci_dev->class);
if (nv_kmem_cache_alloc_stack(&sp) != 0)
{
return -1;
}
#ifdef NV_PCI_SRIOV_SUPPORT
if (pci_dev->is_virtfn)
{
@@ -1330,21 +1325,25 @@ nv_pci_probe
"since IOMMU is not present on the system.\n",
NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev),
NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn));
goto failed;
return -1;
}
nv_kmem_cache_free_stack(sp);
return 0;
#else
nv_printf(NV_DBG_ERRORS, "NVRM: Ignoring probe for VF %04x:%02x:%02x.%x ",
NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev),
NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn));
goto failed;
return -1;
#endif /* NV_VGPU_KVM_BUILD */
}
#endif /* NV_PCI_SRIOV_SUPPORT */
if (nv_kmem_cache_alloc_stack(&sp) != 0)
{
return -1;
}
if (!rm_is_supported_pci_device(
(pci_dev->class >> 16) & 0xFF,
(pci_dev->class >> 8) & 0xFF,

View File

@@ -1675,6 +1675,7 @@ static int nv_open_device(nv_state_t *nv, nvidia_stack_t *sp)
nv_assert_not_in_gpu_exclusion_list(sp, nv);
NV_ATOMIC_INC(nvl->usage_count);
return 0;
}

View File

@@ -371,7 +371,7 @@ NvBool NV_API_CALL os_semaphore_may_sleep(void)
NvBool NV_API_CALL os_is_isr(void)
{
return (in_irq());
return (nv_in_hardirq());
}
// return TRUE if the caller is the super-user