mirror of
https://github.com/NVIDIA/open-gpu-kernel-modules.git
synced 2026-02-09 01:29:57 +00:00
590.48.01
This commit is contained in:
@@ -419,7 +419,7 @@ nv_alloc_t *nvos_create_alloc(
|
||||
return NULL;
|
||||
}
|
||||
|
||||
NV_ATOMIC_SET(at->usage_count, 0);
|
||||
atomic64_set(&at->usage_count, 0);
|
||||
at->pid = os_get_current_process();
|
||||
at->dev = dev;
|
||||
|
||||
@@ -434,7 +434,7 @@ int nvos_free_alloc(
|
||||
if (at == NULL)
|
||||
return -1;
|
||||
|
||||
if (NV_ATOMIC_READ(at->usage_count))
|
||||
if (atomic64_read(&at->usage_count))
|
||||
return 1;
|
||||
|
||||
kvfree(at->page_table);
|
||||
@@ -1656,13 +1656,10 @@ static int nv_open_device(nv_state_t *nv, nvidia_stack_t *sp)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (unlikely(NV_ATOMIC_READ(nvl->usage_count) >= NV_S32_MAX))
|
||||
return -EMFILE;
|
||||
|
||||
if ( ! (nv->flags & NV_FLAG_OPEN))
|
||||
{
|
||||
/* Sanity check: !NV_FLAG_OPEN requires usage_count == 0 */
|
||||
if (NV_ATOMIC_READ(nvl->usage_count) != 0)
|
||||
if (atomic64_read(&nvl->usage_count) != 0)
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
||||
"Minor device %u is referenced without being open!\n",
|
||||
@@ -1684,7 +1681,7 @@ static int nv_open_device(nv_state_t *nv, nvidia_stack_t *sp)
|
||||
|
||||
nv_assert_not_in_gpu_exclusion_list(sp, nv);
|
||||
|
||||
NV_ATOMIC_INC(nvl->usage_count);
|
||||
atomic64_inc(&nvl->usage_count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -2100,7 +2097,7 @@ static void nv_close_device(nv_state_t *nv, nvidia_stack_t *sp)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
if (NV_ATOMIC_READ(nvl->usage_count) == 0)
|
||||
if (atomic64_read(&nvl->usage_count) == 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: Attempting to close unopened minor device %u!\n",
|
||||
@@ -2109,7 +2106,7 @@ static void nv_close_device(nv_state_t *nv, nvidia_stack_t *sp)
|
||||
return;
|
||||
}
|
||||
|
||||
if (NV_ATOMIC_DEC_AND_TEST(nvl->usage_count))
|
||||
if (atomic64_dec_and_test(&nvl->usage_count))
|
||||
nv_stop_device(nv, sp);
|
||||
}
|
||||
|
||||
@@ -2154,7 +2151,7 @@ nvidia_close_callback(
|
||||
nv_close_device(nv, sp);
|
||||
|
||||
bRemove = (!NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv)) &&
|
||||
(NV_ATOMIC_READ(nvl->usage_count) == 0) &&
|
||||
(atomic64_read(&nvl->usage_count) == 0) &&
|
||||
rm_get_device_remove_flag(sp, nv->gpu_id);
|
||||
|
||||
nv_free_file_private(nvlfp);
|
||||
@@ -2173,7 +2170,7 @@ nvidia_close_callback(
|
||||
* any cleanup related to linux layer locks and nv linux state struct.
|
||||
* nvidia_pci_remove when scheduled will do necessary cleanup.
|
||||
*/
|
||||
if ((NV_ATOMIC_READ(nvl->usage_count) == 0) && nv->removed)
|
||||
if ((atomic64_read(&nvl->usage_count) == 0) && nv->removed)
|
||||
{
|
||||
nv_lock_destroy_locks(sp, nv);
|
||||
NV_KFREE(nvl, sizeof(nv_linux_state_t));
|
||||
@@ -2693,7 +2690,7 @@ nvidia_ioctl(
|
||||
* Only the current client should have an open file
|
||||
* descriptor for the device, to allow safe offlining.
|
||||
*/
|
||||
if (NV_ATOMIC_READ(nvl->usage_count) > 1)
|
||||
if (atomic64_read(&nvl->usage_count) > 1)
|
||||
{
|
||||
status = -EBUSY;
|
||||
goto unlock;
|
||||
@@ -3082,12 +3079,12 @@ nvidia_ctl_open(
|
||||
/* save the nv away in file->private_data */
|
||||
nvlfp->nvptr = nvl;
|
||||
|
||||
if (NV_ATOMIC_READ(nvl->usage_count) == 0)
|
||||
if (atomic64_read(&nvl->usage_count) == 0)
|
||||
{
|
||||
nv->flags |= (NV_FLAG_OPEN | NV_FLAG_CONTROL);
|
||||
}
|
||||
|
||||
NV_ATOMIC_INC(nvl->usage_count);
|
||||
atomic64_inc(&nvl->usage_count);
|
||||
up(&nvl->ldata_lock);
|
||||
|
||||
return 0;
|
||||
@@ -3112,7 +3109,7 @@ nvidia_ctl_close(
|
||||
nv_printf(NV_DBG_INFO, "NVRM: nvidia_ctl_close\n");
|
||||
|
||||
down(&nvl->ldata_lock);
|
||||
if (NV_ATOMIC_DEC_AND_TEST(nvl->usage_count))
|
||||
if (atomic64_dec_and_test(&nvl->usage_count))
|
||||
{
|
||||
nv->flags &= ~NV_FLAG_OPEN;
|
||||
}
|
||||
@@ -3275,7 +3272,7 @@ nv_alias_pages(
|
||||
|
||||
at->guest_id = guest_id;
|
||||
*priv_data = at;
|
||||
NV_ATOMIC_INC(at->usage_count);
|
||||
atomic64_inc(&at->usage_count);
|
||||
|
||||
NV_PRINT_AT(NV_DBG_MEMINFO, at);
|
||||
|
||||
@@ -3588,7 +3585,7 @@ NV_STATUS NV_API_CALL nv_register_sgt(
|
||||
|
||||
at->order = get_order(at->num_pages * PAGE_SIZE);
|
||||
|
||||
NV_ATOMIC_INC(at->usage_count);
|
||||
atomic64_inc(&at->usage_count);
|
||||
|
||||
*priv_data = at;
|
||||
|
||||
@@ -3619,7 +3616,7 @@ void NV_API_CALL nv_unregister_sgt(
|
||||
*import_priv = at->import_priv;
|
||||
}
|
||||
|
||||
if (NV_ATOMIC_DEC_AND_TEST(at->usage_count))
|
||||
if (atomic64_dec_and_test(&at->usage_count))
|
||||
{
|
||||
nvos_free_alloc(at);
|
||||
}
|
||||
@@ -3892,7 +3889,7 @@ NV_STATUS NV_API_CALL nv_alloc_pages(
|
||||
}
|
||||
|
||||
*priv_data = at;
|
||||
NV_ATOMIC_INC(at->usage_count);
|
||||
atomic64_inc(&at->usage_count);
|
||||
|
||||
NV_PRINT_AT(NV_DBG_MEMINFO, at);
|
||||
|
||||
@@ -3928,7 +3925,7 @@ NV_STATUS NV_API_CALL nv_free_pages(
|
||||
* This is described in greater detail in the comments above the
|
||||
* nvidia_vma_(open|release)() callbacks in nv-mmap.c.
|
||||
*/
|
||||
if (!NV_ATOMIC_DEC_AND_TEST(at->usage_count))
|
||||
if (!atomic64_dec_and_test(&at->usage_count))
|
||||
return NV_OK;
|
||||
|
||||
if (!at->flags.guest && !at->import_sgt)
|
||||
@@ -3957,7 +3954,7 @@ NvBool nv_lock_init_locks
|
||||
NV_INIT_MUTEX(&nvl->mmap_lock);
|
||||
NV_INIT_MUTEX(&nvl->open_q_lock);
|
||||
|
||||
NV_ATOMIC_SET(nvl->usage_count, 0);
|
||||
atomic64_set(&nvl->usage_count, 0);
|
||||
|
||||
if (!rm_init_event_locks(sp, nv))
|
||||
return NV_FALSE;
|
||||
|
||||
Reference in New Issue
Block a user