535.288.01

This commit is contained in:
Bernhard Stoeckner
2026-01-13 18:04:57 +01:00
parent 66ab8e8596
commit ef65a13097
15 changed files with 60 additions and 69 deletions

View File

@@ -72,7 +72,7 @@ nvidia_vma_open(struct vm_area_struct *vma)
if (at != NULL)
{
NV_ATOMIC_INC(at->usage_count);
atomic64_inc(&at->usage_count);
NV_PRINT_AT(NV_DBG_MEMINFO, at);
}
@@ -404,7 +404,7 @@ static int nvidia_mmap_sysmem(
int ret = 0;
unsigned long start = 0;
NV_ATOMIC_INC(at->usage_count);
atomic64_inc(&at->usage_count);
start = vma->vm_start;
for (j = page_index; j < (page_index + pages); j++)
@@ -436,7 +436,7 @@ static int nvidia_mmap_sysmem(
if (ret)
{
NV_ATOMIC_DEC(at->usage_count);
atomic64_dec(&at->usage_count);
return -EAGAIN;
}
start += PAGE_SIZE;

View File

@@ -798,7 +798,7 @@ nv_pci_remove(struct pci_dev *pci_dev)
* For eGPU, fall off the bus along with clients active is a valid scenario.
* Hence skipping the sanity check for eGPU.
*/
if ((NV_ATOMIC_READ(nvl->usage_count) != 0) && !(nv->is_external_gpu))
if ((atomic64_read(&nvl->usage_count) != 0) && !(nv->is_external_gpu))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Attempting to remove device %04x:%02x:%02x.%x with non-zero usage count!\n",
@@ -809,7 +809,7 @@ nv_pci_remove(struct pci_dev *pci_dev)
* We can't return from this function without corrupting state, so we wait for
* the usage count to go to zero.
*/
while (NV_ATOMIC_READ(nvl->usage_count) != 0)
while (atomic64_read(&nvl->usage_count) != 0)
{
/*
@@ -865,7 +865,7 @@ nv_pci_remove(struct pci_dev *pci_dev)
#endif
/* Update the frontend data structures */
if (NV_ATOMIC_READ(nvl->usage_count) == 0)
if (atomic64_read(&nvl->usage_count) == 0)
{
nvidia_frontend_remove_device((void *)&nv_fops, nvl);
}
@@ -890,7 +890,7 @@ nv_pci_remove(struct pci_dev *pci_dev)
nv_unregister_ibmnpu_devices(nv);
nv_destroy_ibmnpu_info(nv);
if (NV_ATOMIC_READ(nvl->usage_count) == 0)
if (atomic64_read(&nvl->usage_count) == 0)
{
nv_lock_destroy_locks(sp, nv);
}
@@ -906,7 +906,7 @@ nv_pci_remove(struct pci_dev *pci_dev)
num_nv_devices--;
if (NV_ATOMIC_READ(nvl->usage_count) == 0)
if (atomic64_read(&nvl->usage_count) == 0)
{
NV_PCI_DISABLE_DEVICE(pci_dev);
NV_KFREE(nvl, sizeof(nv_linux_state_t));

View File

@@ -889,7 +889,7 @@ nv_procfs_close_unbind_lock(
down(&nvl->ldata_lock);
if ((value == 1) && !(nv->flags & NV_FLAG_UNBIND_LOCK))
{
if (NV_ATOMIC_READ(nvl->usage_count) == 0)
if (atomic64_read(&nvl->usage_count) == 0)
rm_unbind_lock(sp, nv);
if (nv->flags & NV_FLAG_UNBIND_LOCK)

View File

@@ -167,7 +167,7 @@ NvBool nv_get_rsync_relaxed_ordering_mode(
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
/* shouldn't be called without opening a device */
WARN_ON(NV_ATOMIC_READ(nvl->usage_count) == 0);
WARN_ON(atomic64_read(&nvl->usage_count) == 0);
/*
* g_rsync_info.relaxed_ordering_mode can be safely accessed outside of
@@ -185,7 +185,7 @@ void nv_wait_for_rsync(
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
/* shouldn't be called without opening a device */
WARN_ON(NV_ATOMIC_READ(nvl->usage_count) == 0);
WARN_ON(atomic64_read(&nvl->usage_count) == 0);
/*
* g_rsync_info.relaxed_ordering_mode can be safely accessed outside of

View File

@@ -311,7 +311,7 @@ nv_alloc_t *nvos_create_alloc(
}
memset(at->page_table, 0, pt_size);
NV_ATOMIC_SET(at->usage_count, 0);
atomic64_set(&at->usage_count, 0);
for (i = 0; i < at->num_pages; i++)
{
@@ -341,7 +341,7 @@ int nvos_free_alloc(
if (at == NULL)
return -1;
if (NV_ATOMIC_READ(at->usage_count))
if (atomic64_read(&at->usage_count))
return 1;
for (i = 0; i < at->num_pages; i++)
@@ -1455,13 +1455,10 @@ static int nv_open_device(nv_state_t *nv, nvidia_stack_t *sp)
return -ENODEV;
}
if (unlikely(NV_ATOMIC_READ(nvl->usage_count) >= NV_S32_MAX))
return -EMFILE;
if ( ! (nv->flags & NV_FLAG_OPEN))
{
/* Sanity check: !NV_FLAG_OPEN requires usage_count == 0 */
if (NV_ATOMIC_READ(nvl->usage_count) != 0)
if (atomic64_read(&nvl->usage_count) != 0)
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
"Minor device %u is referenced without being open!\n",
@@ -1481,7 +1478,8 @@ static int nv_open_device(nv_state_t *nv, nvidia_stack_t *sp)
return -EBUSY;
}
NV_ATOMIC_INC(nvl->usage_count);
atomic64_inc(&nvl->usage_count);
return 0;
}
@@ -1780,7 +1778,7 @@ static void nv_close_device(nv_state_t *nv, nvidia_stack_t *sp)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
if (NV_ATOMIC_READ(nvl->usage_count) == 0)
if (atomic64_read(&nvl->usage_count) == 0)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Attempting to close unopened minor device %u!\n",
@@ -1789,7 +1787,7 @@ static void nv_close_device(nv_state_t *nv, nvidia_stack_t *sp)
return;
}
if (NV_ATOMIC_DEC_AND_TEST(nvl->usage_count))
if (atomic64_dec_and_test(&nvl->usage_count))
nv_stop_device(nv, sp);
}
@@ -1820,7 +1818,7 @@ nvidia_close_callback(
nv_close_device(nv, sp);
bRemove = (!NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv)) &&
(NV_ATOMIC_READ(nvl->usage_count) == 0) &&
(atomic64_read(&nvl->usage_count) == 0) &&
rm_get_device_remove_flag(sp, nv->gpu_id);
for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i)
@@ -1844,7 +1842,7 @@ nvidia_close_callback(
* any cleanup related to linux layer locks and nv linux state struct.
* nvidia_pci_remove when scheduled will do necessary cleanup.
*/
if ((NV_ATOMIC_READ(nvl->usage_count) == 0) && nv->removed)
if ((atomic64_read(&nvl->usage_count) == 0) && nv->removed)
{
nvidia_frontend_remove_device((void *)&nv_fops, nvl);
nv_lock_destroy_locks(sp, nv);
@@ -2309,7 +2307,7 @@ nvidia_ioctl(
* Only the current client should have an open file
* descriptor for the device, to allow safe offlining.
*/
if (NV_ATOMIC_READ(nvl->usage_count) > 1)
if (atomic64_read(&nvl->usage_count) > 1)
{
status = -EBUSY;
goto unlock;
@@ -2687,12 +2685,12 @@ nvidia_ctl_open(
/* save the nv away in file->private_data */
nvlfp->nvptr = nvl;
if (NV_ATOMIC_READ(nvl->usage_count) == 0)
if (atomic64_read(&nvl->usage_count) == 0)
{
nv->flags |= (NV_FLAG_OPEN | NV_FLAG_CONTROL);
}
NV_ATOMIC_INC(nvl->usage_count);
atomic64_inc(&nvl->usage_count);
up(&nvl->ldata_lock);
return 0;
@@ -2718,7 +2716,7 @@ nvidia_ctl_close(
nv_printf(NV_DBG_INFO, "NVRM: nvidia_ctl_close\n");
down(&nvl->ldata_lock);
if (NV_ATOMIC_DEC_AND_TEST(nvl->usage_count))
if (atomic64_dec_and_test(&nvl->usage_count))
{
nv->flags &= ~NV_FLAG_OPEN;
}
@@ -2887,7 +2885,7 @@ nv_alias_pages(
at->guest_id = guest_id;
*priv_data = at;
NV_ATOMIC_INC(at->usage_count);
atomic64_inc(&at->usage_count);
NV_PRINT_AT(NV_DBG_MEMINFO, at);
@@ -3462,7 +3460,7 @@ NV_STATUS NV_API_CALL nv_alloc_pages(
}
*priv_data = at;
NV_ATOMIC_INC(at->usage_count);
atomic64_inc(&at->usage_count);
NV_PRINT_AT(NV_DBG_MEMINFO, at);
@@ -3498,7 +3496,7 @@ NV_STATUS NV_API_CALL nv_free_pages(
* This is described in greater detail in the comments above the
* nvidia_vma_(open|release)() callbacks in nv-mmap.c.
*/
if (!NV_ATOMIC_DEC_AND_TEST(at->usage_count))
if (!atomic64_dec_and_test(&at->usage_count))
return NV_OK;
if (!at->flags.guest)
@@ -3526,7 +3524,7 @@ NvBool nv_lock_init_locks
NV_INIT_MUTEX(&nvl->ldata_lock);
NV_INIT_MUTEX(&nvl->mmap_lock);
NV_ATOMIC_SET(nvl->usage_count, 0);
atomic64_set(&nvl->usage_count, 0);
if (!rm_init_event_locks(sp, nv))
return NV_FALSE;