580.94.16

This commit is contained in:
russellcnv
2026-01-22 14:46:29 -08:00
parent a3af2867b7
commit c273d84a8b
68 changed files with 160855 additions and 160151 deletions

View File

@@ -413,7 +413,7 @@ nv_alloc_t *nvos_create_alloc(
return NULL;
}
NV_ATOMIC_SET(at->usage_count, 0);
atomic64_set(&at->usage_count, 0);
at->pid = os_get_current_process();
at->dev = dev;
@@ -428,7 +428,7 @@ int nvos_free_alloc(
if (at == NULL)
return -1;
if (NV_ATOMIC_READ(at->usage_count))
if (atomic64_read(&at->usage_count))
return 1;
kvfree(at->page_table);
@@ -1646,13 +1646,10 @@ static int nv_open_device(nv_state_t *nv, nvidia_stack_t *sp)
return -ENODEV;
}
if (unlikely(NV_ATOMIC_READ(nvl->usage_count) >= NV_S32_MAX))
return -EMFILE;
if ( ! (nv->flags & NV_FLAG_OPEN))
{
/* Sanity check: !NV_FLAG_OPEN requires usage_count == 0 */
if (NV_ATOMIC_READ(nvl->usage_count) != 0)
if (atomic64_read(&nvl->usage_count) != 0)
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
"Minor device %u is referenced without being open!\n",
@@ -1674,7 +1671,7 @@ static int nv_open_device(nv_state_t *nv, nvidia_stack_t *sp)
nv_assert_not_in_gpu_exclusion_list(sp, nv);
NV_ATOMIC_INC(nvl->usage_count);
atomic64_inc(&nvl->usage_count);
return 0;
}
@@ -2102,7 +2099,7 @@ static void nv_close_device(nv_state_t *nv, nvidia_stack_t *sp)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
if (NV_ATOMIC_READ(nvl->usage_count) == 0)
if (atomic64_read(&nvl->usage_count) == 0)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Attempting to close unopened minor device %u!\n",
@@ -2111,7 +2108,7 @@ static void nv_close_device(nv_state_t *nv, nvidia_stack_t *sp)
return;
}
if (NV_ATOMIC_DEC_AND_TEST(nvl->usage_count))
if (atomic64_dec_and_test(&nvl->usage_count))
nv_stop_device(nv, sp);
}
@@ -2156,7 +2153,7 @@ nvidia_close_callback(
nv_close_device(nv, sp);
bRemove = (!NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv)) &&
(NV_ATOMIC_READ(nvl->usage_count) == 0) &&
(atomic64_read(&nvl->usage_count) == 0) &&
rm_get_device_remove_flag(sp, nv->gpu_id);
nv_free_file_private(nvlfp);
@@ -2175,7 +2172,7 @@ nvidia_close_callback(
* any cleanup related to linux layer locks and nv linux state struct.
* nvidia_pci_remove when scheduled will do necessary cleanup.
*/
if ((NV_ATOMIC_READ(nvl->usage_count) == 0) && nv->removed)
if ((atomic64_read(&nvl->usage_count) == 0) && nv->removed)
{
nv_lock_destroy_locks(sp, nv);
NV_KFREE(nvl, sizeof(nv_linux_state_t));
@@ -2694,7 +2691,7 @@ nvidia_ioctl(
* Only the current client should have an open file
* descriptor for the device, to allow safe offlining.
*/
if (NV_ATOMIC_READ(nvl->usage_count) > 1)
if (atomic64_read(&nvl->usage_count) > 1)
{
status = -EBUSY;
goto unlock;
@@ -3083,12 +3080,12 @@ nvidia_ctl_open(
/* save the nv away in file->private_data */
nvlfp->nvptr = nvl;
if (NV_ATOMIC_READ(nvl->usage_count) == 0)
if (atomic64_read(&nvl->usage_count) == 0)
{
nv->flags |= (NV_FLAG_OPEN | NV_FLAG_CONTROL);
}
NV_ATOMIC_INC(nvl->usage_count);
atomic64_inc(&nvl->usage_count);
up(&nvl->ldata_lock);
return 0;
@@ -3113,7 +3110,7 @@ nvidia_ctl_close(
nv_printf(NV_DBG_INFO, "NVRM: nvidia_ctl_close\n");
down(&nvl->ldata_lock);
if (NV_ATOMIC_DEC_AND_TEST(nvl->usage_count))
if (atomic64_dec_and_test(&nvl->usage_count))
{
nv->flags &= ~NV_FLAG_OPEN;
}
@@ -3276,7 +3273,7 @@ nv_alias_pages(
at->guest_id = guest_id;
*priv_data = at;
NV_ATOMIC_INC(at->usage_count);
atomic64_inc(&at->usage_count);
NV_PRINT_AT(NV_DBG_MEMINFO, at);
@@ -3589,7 +3586,7 @@ NV_STATUS NV_API_CALL nv_register_sgt(
at->order = get_order(at->num_pages * PAGE_SIZE);
NV_ATOMIC_INC(at->usage_count);
atomic64_inc(&at->usage_count);
*priv_data = at;
@@ -3620,7 +3617,7 @@ void NV_API_CALL nv_unregister_sgt(
*import_priv = at->import_priv;
}
if (NV_ATOMIC_DEC_AND_TEST(at->usage_count))
if (atomic64_dec_and_test(&at->usage_count))
{
nvos_free_alloc(at);
}
@@ -3893,7 +3890,7 @@ NV_STATUS NV_API_CALL nv_alloc_pages(
}
*priv_data = at;
NV_ATOMIC_INC(at->usage_count);
atomic64_inc(&at->usage_count);
NV_PRINT_AT(NV_DBG_MEMINFO, at);
@@ -3929,7 +3926,7 @@ NV_STATUS NV_API_CALL nv_free_pages(
* This is described in greater detail in the comments above the
* nvidia_vma_(open|release)() callbacks in nv-mmap.c.
*/
if (!NV_ATOMIC_DEC_AND_TEST(at->usage_count))
if (!atomic64_dec_and_test(&at->usage_count))
return NV_OK;
if (!at->flags.guest && !at->import_sgt)
@@ -3958,7 +3955,7 @@ NvBool nv_lock_init_locks
NV_INIT_MUTEX(&nvl->mmap_lock);
NV_INIT_MUTEX(&nvl->open_q_lock);
NV_ATOMIC_SET(nvl->usage_count, 0);
atomic64_set(&nvl->usage_count, 0);
if (!rm_init_event_locks(sp, nv))
return NV_FALSE;
@@ -5342,6 +5339,7 @@ NV_STATUS NV_API_CALL nv_get_device_memory_config(
nv_state_t *nv,
NvU64 *compr_addr_sys_phys,
NvU64 *addr_guest_phys,
NvU64 *size_guest_phys,
NvU64 *rsvd_phys,
NvU32 *addr_width,
NvS32 *node_id
@@ -5365,6 +5363,10 @@ NV_STATUS NV_API_CALL nv_get_device_memory_config(
{
*addr_guest_phys = nvl->coherent_link_info.gpu_mem_pa;
}
if (size_guest_phys)
{
*size_guest_phys = nvl->coherent_link_info.gpu_mem_size;
}
if (rsvd_phys)
{
*rsvd_phys = nvl->coherent_link_info.rsvd_mem_pa;