545.23.06

This commit is contained in:
Andy Ritger
2023-10-17 09:25:29 -07:00
parent f59818b751
commit b5bf85a8e3
917 changed files with 132480 additions and 110015 deletions

View File

@@ -50,10 +50,11 @@ extern "C" {
#define NVSWITCH_NSEC_PER_SEC 1000000000ULL
#define NVSWITCH_DBG_LEVEL_MMIO 0x0
#define NVSWITCH_DBG_LEVEL_INFO 0x1
#define NVSWITCH_DBG_LEVEL_SETUP 0x2
#define NVSWITCH_DBG_LEVEL_WARN 0x3
#define NVSWITCH_DBG_LEVEL_ERROR 0x4
#define NVSWITCH_DBG_LEVEL_NOISY 0x1
#define NVSWITCH_DBG_LEVEL_INFO 0x2
#define NVSWITCH_DBG_LEVEL_SETUP 0x3
#define NVSWITCH_DBG_LEVEL_WARN 0x4
#define NVSWITCH_DBG_LEVEL_ERROR 0x5
#define NVSWITCH_LOG_BUFFER_SIZE 512
@@ -337,7 +338,7 @@ nvswitch_lib_service_interrupts
);
/*
* @Brief : Get depth of error logs
* @Brief : Get depth of error logs and port event log
*
* @Description :
*
@@ -345,6 +346,7 @@ nvswitch_lib_service_interrupts
*
* @param[out] fatal Count of fatal errors
* @param[out] nonfatal Count of non-fatal errors
* @param[out] portEvent Count of port events
*
* @returns NVL_SUCCESS if there were no errors and interrupts were handled
* -NVL_NOT_FOUND if bad arguments provided
@@ -353,7 +355,7 @@ NvlStatus
nvswitch_lib_get_log_count
(
nvswitch_device *device,
NvU32 *fatal, NvU32 *nonfatal
NvU32 *fatal, NvU32 *nonfatal, NvU32 *portEvent
);
/*

View File

@@ -294,7 +294,7 @@ nvswitch_i2c_add_adapter
NV_PCI_BUS_NUMBER(pci_dev),
NV_PCI_SLOT_NUMBER(pci_dev),
PCI_FUNC(pci_dev->devfn));
if ((rc < 0) && (rc >= sizeof(adapter->name)))
if ((rc < 0) || (rc >= sizeof(adapter->name)))
{
goto cleanup;
}

View File

@@ -1865,11 +1865,10 @@ nvswitch_os_print
switch (log_level)
{
case NVSWITCH_DBG_LEVEL_MMIO:
case NVSWITCH_DBG_LEVEL_NOISY:
kern_level = KERN_DEBUG;
break;
case NVSWITCH_DBG_LEVEL_INFO:
kern_level = KERN_INFO;
break;
case NVSWITCH_DBG_LEVEL_SETUP:
kern_level = KERN_INFO;
break;

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -490,6 +490,7 @@ static struct file_operations g_nv_cap_drv_fops;
int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd)
{
#if NV_FILESYSTEM_ACCESS_AVAILABLE
struct file *file;
int dup_fd;
struct inode *inode = NULL;
@@ -558,10 +559,14 @@ int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd)
err:
fput(file);
return -1;
#else
return -1;
#endif
}
void NV_API_CALL nv_cap_close_fd(int fd)
{
#if NV_FILESYSTEM_ACCESS_AVAILABLE
if (fd == -1)
{
return;
@@ -599,6 +604,7 @@ void NV_API_CALL nv_cap_close_fd(int fd)
#endif
task_unlock(current);
#endif
}
static nv_cap_t* nv_cap_alloc(nv_cap_t *parent_cap, const char *name)

View File

@@ -107,7 +107,7 @@ nv_dma_buf_free_file_private(
if (priv->handles != NULL)
{
NV_KFREE(priv->handles, priv->total_objects * sizeof(priv->handles[0]));
os_free_mem(priv->handles);
priv->handles = NULL;
}
@@ -122,6 +122,8 @@ nv_dma_buf_alloc_file_private(
)
{
nv_dma_buf_file_private_t *priv = NULL;
NvU64 handles_size = num_handles * sizeof(priv->handles[0]);
NV_STATUS status;
NV_KZALLOC(priv, sizeof(nv_dma_buf_file_private_t));
if (priv == NULL)
@@ -131,11 +133,12 @@ nv_dma_buf_alloc_file_private(
mutex_init(&priv->lock);
NV_KZALLOC(priv->handles, num_handles * sizeof(priv->handles[0]));
if (priv->handles == NULL)
status = os_alloc_mem((void **) &priv->handles, handles_size);
if (status != NV_OK)
{
goto failed;
}
os_mem_set(priv->handles, 0, handles_size);
return priv;
@@ -1065,6 +1068,7 @@ nv_dma_buf_create(
status = rm_dma_buf_get_client_and_device(sp, priv->nv,
params->hClient,
params->handles[0],
&priv->h_client,
&priv->h_device,
&priv->h_subdevice,

View File

@@ -1,395 +0,0 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "os-interface.h"
#include "nv-linux.h"
#include "nv-reg.h"
#include "nv-frontend.h"
MODULE_LICENSE("Dual MIT/GPL");
MODULE_INFO(supported, "external");
MODULE_VERSION(NV_VERSION_STRING);
MODULE_ALIAS_CHARDEV_MAJOR(NV_MAJOR_DEVICE_NUMBER);
/*
* MODULE_IMPORT_NS() is added by commit id 8651ec01daeda
* ("module: add support for symbol namespaces") in 5.4
*/
#if defined(MODULE_IMPORT_NS)
/*
* DMA_BUF namespace is added by commit id 16b0314aa746
* ("dma-buf: move dma-buf symbols into the DMA_BUF module namespace") in 5.16
*/
MODULE_IMPORT_NS(DMA_BUF);
#endif
static NvU32 nv_num_instances;
// lock required to protect table.
struct semaphore nv_module_table_lock;
// minor number table
nvidia_module_t *nv_minor_num_table[NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX + 1];
int nvidia_init_module(void);
void nvidia_exit_module(void);
/* EXPORTS to Linux Kernel */
int nvidia_frontend_open(struct inode *, struct file *);
int nvidia_frontend_close(struct inode *, struct file *);
unsigned int nvidia_frontend_poll(struct file *, poll_table *);
int nvidia_frontend_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
long nvidia_frontend_unlocked_ioctl(struct file *, unsigned int, unsigned long);
long nvidia_frontend_compat_ioctl(struct file *, unsigned int, unsigned long);
int nvidia_frontend_mmap(struct file *, struct vm_area_struct *);
/* character driver entry points */
static struct file_operations nv_frontend_fops = {
.owner = THIS_MODULE,
.poll = nvidia_frontend_poll,
.unlocked_ioctl = nvidia_frontend_unlocked_ioctl,
#if NVCPU_IS_X86_64 || NVCPU_IS_AARCH64
.compat_ioctl = nvidia_frontend_compat_ioctl,
#endif
.mmap = nvidia_frontend_mmap,
.open = nvidia_frontend_open,
.release = nvidia_frontend_close,
};
/* Helper functions */
static int add_device(nvidia_module_t *module, nv_linux_state_t *device, NvBool all)
{
NvU32 i;
int rc = -1;
// look for free a minor number and assign unique minor number to this device
for (i = 0; i <= NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN; i++)
{
if (nv_minor_num_table[i] == NULL)
{
nv_minor_num_table[i] = module;
device->minor_num = i;
if (all == NV_TRUE)
{
device = device->next;
if (device == NULL)
{
rc = 0;
break;
}
}
else
{
rc = 0;
break;
}
}
}
return rc;
}
static int remove_device(nvidia_module_t *module, nv_linux_state_t *device)
{
int rc = -1;
// remove this device from minor_number table
if ((device != NULL) && (nv_minor_num_table[device->minor_num] != NULL))
{
nv_minor_num_table[device->minor_num] = NULL;
device->minor_num = 0;
rc = 0;
}
return rc;
}
/* Export functions */
int nvidia_register_module(nvidia_module_t *module)
{
int rc = 0;
NvU32 ctrl_minor_num;
down(&nv_module_table_lock);
if (module->instance >= NV_MAX_MODULE_INSTANCES)
{
printk("NVRM: NVIDIA module instance %d registration failed.\n",
module->instance);
rc = -EINVAL;
goto done;
}
ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance;
nv_minor_num_table[ctrl_minor_num] = module;
nv_num_instances++;
done:
up(&nv_module_table_lock);
return rc;
}
EXPORT_SYMBOL(nvidia_register_module);
int nvidia_unregister_module(nvidia_module_t *module)
{
int rc = 0;
NvU32 ctrl_minor_num;
down(&nv_module_table_lock);
ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance;
if (nv_minor_num_table[ctrl_minor_num] == NULL)
{
printk("NVRM: NVIDIA module for %d instance does not exist\n",
module->instance);
rc = -1;
}
else
{
nv_minor_num_table[ctrl_minor_num] = NULL;
nv_num_instances--;
}
up(&nv_module_table_lock);
return rc;
}
EXPORT_SYMBOL(nvidia_unregister_module);
int nvidia_frontend_add_device(nvidia_module_t *module, nv_linux_state_t * device)
{
int rc = -1;
NvU32 ctrl_minor_num;
down(&nv_module_table_lock);
ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance;
if (nv_minor_num_table[ctrl_minor_num] == NULL)
{
printk("NVRM: NVIDIA module for %d instance does not exist\n",
module->instance);
rc = -1;
}
else
{
rc = add_device(module, device, NV_FALSE);
}
up(&nv_module_table_lock);
return rc;
}
EXPORT_SYMBOL(nvidia_frontend_add_device);
int nvidia_frontend_remove_device(nvidia_module_t *module, nv_linux_state_t * device)
{
int rc = 0;
NvU32 ctrl_minor_num;
down(&nv_module_table_lock);
ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance;
if (nv_minor_num_table[ctrl_minor_num] == NULL)
{
printk("NVRM: NVIDIA module for %d instance does not exist\n",
module->instance);
rc = -1;
}
else
{
rc = remove_device(module, device);
}
up(&nv_module_table_lock);
return rc;
}
EXPORT_SYMBOL(nvidia_frontend_remove_device);
int nvidia_frontend_open(
struct inode *inode,
struct file *file
)
{
int rc = -ENODEV;
nvidia_module_t *module = NULL;
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
down(&nv_module_table_lock);
module = nv_minor_num_table[minor_num];
if ((module != NULL) && (module->open != NULL))
{
// Increment the reference count of module to ensure that module does
// not get unloaded if its corresponding device file is open, for
// example nvidiaN.ko should not get unloaded if /dev/nvidiaN is open.
if (!try_module_get(module->owner))
{
up(&nv_module_table_lock);
return -ENODEV;
}
rc = module->open(inode, file);
if (rc < 0)
{
module_put(module->owner);
}
}
up(&nv_module_table_lock);
return rc;
}
int nvidia_frontend_close(
struct inode *inode,
struct file *file
)
{
int rc = -ENODEV;
nvidia_module_t *module = NULL;
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
module = nv_minor_num_table[minor_num];
if ((module != NULL) && (module->close != NULL))
{
rc = module->close(inode, file);
// Decrement the reference count of module.
module_put(module->owner);
}
return rc;
}
unsigned int nvidia_frontend_poll(
struct file *file,
poll_table *wait
)
{
unsigned int mask = 0;
struct inode *inode = NV_FILE_INODE(file);
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
nvidia_module_t *module = nv_minor_num_table[minor_num];
if ((module != NULL) && (module->poll != NULL))
mask = module->poll(file, wait);
return mask;
}
int nvidia_frontend_ioctl(
struct inode *inode,
struct file *file,
unsigned int cmd,
unsigned long i_arg)
{
int rc = -ENODEV;
nvidia_module_t *module = NULL;
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
module = nv_minor_num_table[minor_num];
if ((module != NULL) && (module->ioctl != NULL))
rc = module->ioctl(inode, file, cmd, i_arg);
return rc;
}
long nvidia_frontend_unlocked_ioctl(
struct file *file,
unsigned int cmd,
unsigned long i_arg
)
{
return nvidia_frontend_ioctl(NV_FILE_INODE(file), file, cmd, i_arg);
}
long nvidia_frontend_compat_ioctl(
struct file *file,
unsigned int cmd,
unsigned long i_arg
)
{
return nvidia_frontend_ioctl(NV_FILE_INODE(file), file, cmd, i_arg);
}
int nvidia_frontend_mmap(
struct file *file,
struct vm_area_struct *vma
)
{
int rc = -ENODEV;
struct inode *inode = NV_FILE_INODE(file);
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
nvidia_module_t *module = nv_minor_num_table[minor_num];
if ((module != NULL) && (module->mmap != NULL))
rc = module->mmap(file, vma);
return rc;
}
static int __init nvidia_frontend_init_module(void)
{
int status = 0;
// initialise nvidia module table;
nv_num_instances = 0;
memset(nv_minor_num_table, 0, sizeof(nv_minor_num_table));
NV_INIT_MUTEX(&nv_module_table_lock);
status = nvidia_init_module();
if (status < 0)
{
return status;
}
// register char device
status = register_chrdev(NV_MAJOR_DEVICE_NUMBER, "nvidia-frontend", &nv_frontend_fops);
if (status < 0)
{
printk("NVRM: register_chrdev() failed!\n");
nvidia_exit_module();
}
return status;
}
static void __exit nvidia_frontend_exit_module(void)
{
/*
* If this is the last nvidia_module to be unregistered, cleanup and
* unregister char dev
*/
if (nv_num_instances == 1)
{
unregister_chrdev(NV_MAJOR_DEVICE_NUMBER, "nvidia-frontend");
}
nvidia_exit_module();
}
module_init(nvidia_frontend_init_module);
module_exit(nvidia_frontend_exit_module);

View File

@@ -1,47 +0,0 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_FRONTEND_H_
#define _NV_FRONTEND_H_
#include "nvtypes.h"
#include "nv-linux.h"
#include "nv-register-module.h"
#define NV_MAX_MODULE_INSTANCES 8
#define NV_FRONTEND_MINOR_NUMBER(x) minor((x)->i_rdev)
#define NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX 255
#define NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN (NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - \
NV_MAX_MODULE_INSTANCES)
#define NV_FRONTEND_IS_CONTROL_DEVICE(x) ((x <= NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX) && \
(x > NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN))
int nvidia_frontend_add_device(nvidia_module_t *, nv_linux_state_t *);
int nvidia_frontend_remove_device(nvidia_module_t *, nv_linux_state_t *);
extern nvidia_module_t *nv_minor_num_table[];
#endif

View File

@@ -247,6 +247,11 @@ int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferr
return 0;
}
int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname)
{
return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE);
}
// Returns true (non-zero) if the item was actually scheduled, and false if the
// item was already pending in a queue.
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)

View File

@@ -648,6 +648,16 @@ int nvidia_mmap_helper(
ret = nvidia_mmap_peer_io(vma, at, page_index, pages);
BUG_ON(NV_VMA_PRIVATE(vma));
if (ret)
{
return ret;
}
NV_PRINT_AT(NV_DBG_MEMINFO, at);
nv_vm_flags_set(vma, VM_IO);
nv_vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
}
else
{
@@ -661,17 +671,21 @@ int nvidia_mmap_helper(
NV_VMA_PRIVATE(vma) = at;
ret = nvidia_mmap_sysmem(vma, at, page_index, pages);
if (ret)
{
return ret;
}
NV_PRINT_AT(NV_DBG_MEMINFO, at);
//
// VM_MIXEDMAP will be set by vm_insert_page() in nvidia_mmap_sysmem().
// VM_SHARED is added to avoid any undesired copy-on-write effects.
//
nv_vm_flags_set(vma, VM_SHARED);
nv_vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
}
if (ret)
{
return ret;
}
NV_PRINT_AT(NV_DBG_MEMINFO, at);
nv_vm_flags_set(vma, VM_IO | VM_LOCKED | VM_RESERVED);
nv_vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
}
if ((prot & NV_PROTECT_WRITEABLE) == 0)
@@ -706,11 +720,16 @@ int nvidia_mmap(
return -EINVAL;
}
sp = nv_nvlfp_get_sp(nvlfp, NV_FOPS_STACK_INDEX_MMAP);
status = nv_kmem_cache_alloc_stack(&sp);
if (status != 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Unable to allocate altstack for mmap\n");
return status;
}
status = nvidia_mmap_helper(nv, nvlfp, sp, vma, NULL);
nv_nvlfp_put_sp(nvlfp, NV_FOPS_STACK_INDEX_MMAP);
nv_kmem_cache_free_stack(sp);
return status;
}

View File

@@ -640,7 +640,6 @@ int nvidia_p2p_put_pages(
status = nv_p2p_put_pages(NV_P2P_PAGE_TABLE_TYPE_NON_PERSISTENT,
sp, p2p_token, va_space,
virtual_address, &page_table);
nv_kmem_cache_free_stack(sp);
return nvidia_p2p_map_status(status);

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -25,7 +25,6 @@
#include "nv-pci-types.h"
#include "nv-pci.h"
#include "nv-ibmnpu.h"
#include "nv-frontend.h"
#include "nv-msi.h"
#include "nv-hypervisor.h"
@@ -80,19 +79,15 @@ static NvBool nv_treat_missing_irq_as_error(void)
#endif
}
static void nv_init_dynamic_power_management
static void nv_get_pci_sysfs_config
(
nvidia_stack_t *sp,
struct pci_dev *pci_dev
struct pci_dev *pci_dev,
nv_linux_state_t *nvl
)
{
nv_linux_state_t *nvl = pci_get_drvdata(pci_dev);
nv_state_t *nv = NV_STATE_PTR(nvl);
#if NV_FILESYSTEM_ACCESS_AVAILABLE
char filename[50];
int ret;
NvBool pr3_acpi_method_present = NV_FALSE;
nvl->sysfs_config_file = NULL;
ret = snprintf(filename, sizeof(filename),
"/sys/bus/pci/devices/%04x:%02x:%02x.0/config",
@@ -143,6 +138,22 @@ static void nv_init_dynamic_power_management
#endif
}
}
#endif
}
static void nv_init_dynamic_power_management
(
nvidia_stack_t *sp,
struct pci_dev *pci_dev
)
{
nv_linux_state_t *nvl = pci_get_drvdata(pci_dev);
nv_state_t *nv = NV_STATE_PTR(nvl);
NvBool pr3_acpi_method_present = NV_FALSE;
nvl->sysfs_config_file = NULL;
nv_get_pci_sysfs_config(pci_dev, nvl);
if (nv_get_hypervisor_type() != OS_HYPERVISOR_UNKNOWN)
{
@@ -287,12 +298,29 @@ nv_init_coherent_link_info
if (!NVCPU_IS_AARCH64)
return;
if (device_property_read_u64(nvl->dev, "nvidia,gpu-mem-base-pa", &pa) != 0)
goto failed;
if (device_property_read_u64(nvl->dev, "nvidia,gpu-mem-pxm-start", &pxm_start) != 0)
goto failed;
if (device_property_read_u64(nvl->dev, "nvidia,gpu-mem-pxm-count", &pxm_count) != 0)
goto failed;
if (device_property_read_u64(nvl->dev, "nvidia,gpu-mem-base-pa", &pa) != 0)
{
/*
* This implies that the DSD key for PXM start and count is present
* while the one for Physical Address (PA) is absent.
*/
if (nv_get_hypervisor_type() == OS_HYPERVISOR_UNKNOWN)
{
/* Fail for the baremetal case */
goto failed;
}
/*
* For the virtualization usecase on SHH, the coherent GPU memory
* PA is exposed as BAR1 to the VM and the nvidia,gpu-mem-base-pa
* is not present. Set the GPU memory PA to the BAR1 start address.
*/
pa = nv->fb->cpu_address;
}
NV_DEV_PRINTF(NV_DBG_INFO, nv, "DSD properties: \n");
NV_DEV_PRINTF(NV_DBG_INFO, nv, "\tGPU memory PA: 0x%lx \n", pa);
@@ -310,7 +338,7 @@ nv_init_coherent_link_info
}
}
if (NVreg_EnableUserNUMAManagement)
if (NVreg_EnableUserNUMAManagement && !os_is_vgx_hyper())
{
NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_OFFLINE);
nvl->numa_info.use_auto_online = NV_TRUE;
@@ -689,13 +717,14 @@ next_bar:
*/
LOCK_NV_LINUX_DEVICES();
nv_linux_add_device_locked(nvl);
if (nv_linux_add_device_locked(nvl) != 0)
{
UNLOCK_NV_LINUX_DEVICES();
goto err_zero_dev;
}
UNLOCK_NV_LINUX_DEVICES();
if (nvidia_frontend_add_device((void *)&nv_fops, nvl) != 0)
goto err_remove_device;
pm_vt_switch_required(nvl->dev, NV_TRUE);
nv_init_dynamic_power_management(sp, pci_dev);
@@ -711,7 +740,6 @@ next_bar:
if (nvidia_vgpu_vfio_probe(nvl->pci_dev) != NV_OK)
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to register device to vGPU VFIO module");
nvidia_frontend_remove_device((void *)&nv_fops, nvl);
goto err_vgpu_kvm;
}
#endif
@@ -741,7 +769,6 @@ err_vgpu_kvm:
nv_procfs_remove_gpu(nvl);
rm_cleanup_dynamic_power_management(sp, nv);
pm_vt_switch_unregister(nvl->dev);
err_remove_device:
LOCK_NV_LINUX_DEVICES();
nv_linux_remove_device_locked(nvl);
UNLOCK_NV_LINUX_DEVICES();
@@ -874,12 +901,6 @@ nv_pci_remove(struct pci_dev *pci_dev)
nvidia_vgpu_vfio_remove(pci_dev, NV_TRUE);
#endif
/* Update the frontend data structures */
if (NV_ATOMIC_READ(nvl->usage_count) == 0)
{
nvidia_frontend_remove_device((void *)&nv_fops, nvl);
}
if ((nv->flags & NV_FLAG_PERSISTENT_SW_STATE) || (nv->flags & NV_FLAG_OPEN))
{
nv_acpi_unregister_notifier(nvl);

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -977,13 +977,9 @@ numa_is_change_allowed(nv_numa_status_t current_state, nv_numa_status_t requeste
static NV_STATUS
numa_status_read(
nv_state_t *nv,
nv_stack_t *sp,
NvS32 *nid,
NvS32 *status,
NvU64 *numa_mem_addr,
NvU64 *numa_mem_size,
nv_offline_addresses_t *list
nv_state_t *nv,
nv_stack_t *sp,
nv_ioctl_numa_info_t *numa_info
)
{
NV_STATUS rm_status;
@@ -1000,24 +996,17 @@ numa_status_read(
{
if (nv_platform_supports_numa(nvl))
{
*nid = nvl->numa_info.node_id;
*status = nv_get_numa_status(nvl);
*numa_mem_addr = 0;
*numa_mem_size = 0;
memset(list, 0x0, sizeof(*list));
memset(numa_info, 0x0, sizeof(*numa_info));
numa_info->nid = nvl->numa_info.node_id;
numa_info->status = nv_get_numa_status(nvl);
}
rm_status = NV_ERR_NOT_READY;
goto done;
}
list->numEntries = ARRAY_SIZE(list->addresses);
rm_status = rm_get_gpu_numa_info(sp, nv,
nid, numa_mem_addr, numa_mem_size,
list->addresses, &list->numEntries);
if (rm_status == NV_OK && *nid == NUMA_NO_NODE)
rm_status = rm_get_gpu_numa_info(sp, nv, numa_info);
if (rm_status == NV_OK && numa_info->nid == NUMA_NO_NODE)
{
//
// RM returns NUMA_NO_NODE when running MIG instances because
@@ -1033,7 +1022,7 @@ numa_status_read(
//
rm_status = NV_ERR_NOT_SUPPORTED;
}
*status = nv_get_numa_status(nvl);
numa_info->status = nv_get_numa_status(nvl);
done:
up(&nvl->ldata_lock);
@@ -1049,18 +1038,15 @@ nv_procfs_read_offline_pages(
NvU32 i;
int retval = 0;
NV_STATUS rm_status;
nv_ioctl_numa_info_t numa_info;
nv_ioctl_numa_info_t numa_info = { 0 };
nv_procfs_private_t *nvpp = s->private;
nv_stack_t *sp = nvpp->sp;
nv_state_t *nv = nvpp->nv;
rm_status = numa_status_read(nv, sp,
&numa_info.nid,
&numa_info.status,
&numa_info.numa_mem_addr,
&numa_info.numa_mem_size,
&numa_info.offline_addresses);
numa_info.offline_addresses.numEntries =
ARRAY_SIZE(numa_info.offline_addresses.addresses);
rm_status = numa_status_read(nv, sp, &numa_info);
if (rm_status != NV_OK)
return -EIO;
@@ -1131,18 +1117,17 @@ nv_procfs_read_numa_status(
{
int retval = 0;
NV_STATUS rm_status;
nv_ioctl_numa_info_t numa_info;
nv_ioctl_numa_info_t numa_info = { 0 };
nv_procfs_private_t *nvpp = s->private;
nv_stack_t *sp = nvpp->sp;
nv_state_t *nv = nvpp->nv;
rm_status = numa_status_read(nv, sp,
&numa_info.nid,
&numa_info.status,
&numa_info.numa_mem_addr,
&numa_info.numa_mem_size,
&numa_info.offline_addresses);
/*
* Note: we leave numa_info.offline_addresses.numEntries as 0, so that
* the numa_status_read() callchain doesn't perform expensive page
* querying that we don't need here.
*/
rm_status = numa_status_read(nv, sp, &numa_info);
if ((rm_status != NV_OK) && (rm_status != NV_ERR_NOT_READY))
return -EIO;

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -771,16 +771,11 @@
/*
* Option: OpenRmEnableUnsupportedGpus
*
* Open nvidia.ko support for features beyond what is used on Data Center GPUs
* is still fairly immature, so for now require users to opt into use of open
* nvidia.ko with a special registry key, if not on a Data Center GPU.
* This option to require opt in for use of Open RM on non-Data Center
* GPUs is deprecated and no longer required. The kernel module parameter
* is left here, though ignored, for backwards compatibility.
*/
#define __NV_OPENRM_ENABLE_UNSUPPORTED_GPUS OpenRmEnableUnsupportedGpus
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS NV_REG_STRING(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS)
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE 0x00000000
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_ENABLE 0x00000001
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE
/*
* Option: NVreg_DmaRemapPeerMmio
@@ -853,7 +848,7 @@ NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT, 3);
NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 200);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE, NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS, NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG);
NV_DEFINE_REG_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS, NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT);
NV_DEFINE_REG_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS, 1);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_USER_NUMA_MANAGEMENT, 1);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MEMORY_POOL_SIZE, 0);

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -25,7 +25,6 @@
#include "os-interface.h"
#include "nv-linux.h"
#include "nv-frontend.h"
NV_STATUS NV_API_CALL nv_add_mapping_context_to_file(
nv_state_t *nv,

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -514,28 +514,37 @@ NV_STATUS nv_alloc_system_pages(
struct device *dev = at->dev;
dma_addr_t bus_addr;
// Order should be zero except for EGM allocations.
unsigned int alloc_page_size = PAGE_SIZE << at->order;
unsigned int alloc_page_shift = BIT_IDX_32(alloc_page_size);
unsigned int alloc_num_pages = NV_CEIL(at->num_pages * PAGE_SIZE, alloc_page_size);
unsigned int sub_page_idx;
unsigned int sub_page_offset;
unsigned int os_pages_in_page = alloc_page_size / PAGE_SIZE;
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %u: %u pages\n", __FUNCTION__, at->num_pages);
"NVRM: VM: %u: %u order0 pages, %u order\n", __FUNCTION__, at->num_pages, at->order);
gfp_mask = nv_compute_gfp_mask(nv, at);
for (i = 0; i < at->num_pages; i++)
for (i = 0; i < alloc_num_pages; i++)
{
if (at->flags.unencrypted && (dev != NULL))
{
virt_addr = (unsigned long)dma_alloc_coherent(dev,
PAGE_SIZE,
alloc_page_size,
&bus_addr,
gfp_mask);
at->flags.coherent = NV_TRUE;
}
else if (at->flags.node)
{
NV_ALLOC_PAGES_NODE(virt_addr, at->node_id, 0, gfp_mask);
NV_ALLOC_PAGES_NODE(virt_addr, at->node_id, at->order, gfp_mask);
}
else
{
NV_GET_FREE_PAGES(virt_addr, 0, gfp_mask);
NV_GET_FREE_PAGES(virt_addr, at->order, gfp_mask);
}
if (virt_addr == 0)
@@ -547,49 +556,55 @@ NV_STATUS nv_alloc_system_pages(
}
#if !defined(__GFP_ZERO)
if (at->flags.zeroed)
memset((void *)virt_addr, 0, PAGE_SIZE);
memset((void *)virt_addr, 0, alloc_page_size);
#endif
phys_addr = nv_get_kern_phys_address(virt_addr);
if (phys_addr == 0)
sub_page_offset = 0;
for (sub_page_idx = 0; sub_page_idx < os_pages_in_page; sub_page_idx++)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: %s: failed to look up physical address\n",
__FUNCTION__);
NV_FREE_PAGES(virt_addr, 0);
status = NV_ERR_OPERATING_SYSTEM;
goto failed;
}
unsigned long sub_page_virt_addr = virt_addr + sub_page_offset;
phys_addr = nv_get_kern_phys_address(sub_page_virt_addr);
if (phys_addr == 0)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: %s: failed to look up physical address\n",
__FUNCTION__);
NV_FREE_PAGES(sub_page_virt_addr, at->order);
status = NV_ERR_OPERATING_SYSTEM;
goto failed;
}
#if defined(_PAGE_NX)
if (((_PAGE_NX & pgprot_val(PAGE_KERNEL)) != 0) &&
(phys_addr < 0x400000))
{
nv_printf(NV_DBG_SETUP,
"NVRM: VM: %s: discarding page @ 0x%llx\n",
__FUNCTION__, phys_addr);
--i;
continue;
}
if (((_PAGE_NX & pgprot_val(PAGE_KERNEL)) != 0) &&
(phys_addr < 0x400000))
{
nv_printf(NV_DBG_SETUP,
"NVRM: VM: %s: discarding page @ 0x%llx\n",
__FUNCTION__, phys_addr);
--i;
continue;
}
#endif
page_ptr = at->page_table[i];
page_ptr->phys_addr = phys_addr;
page_ptr->page_count = NV_GET_PAGE_COUNT(page_ptr);
page_ptr->virt_addr = virt_addr;
page_ptr = at->page_table[(i * os_pages_in_page) + sub_page_idx];
page_ptr->phys_addr = phys_addr;
page_ptr->page_count = NV_GET_PAGE_COUNT(page_ptr);
page_ptr->virt_addr = sub_page_virt_addr;
//
// Use unencrypted dma_addr returned by dma_alloc_coherent() as
// nv_phys_to_dma() returns encrypted dma_addr when AMD SEV is enabled.
//
if (at->flags.coherent)
page_ptr->dma_addr = bus_addr;
else if (dev)
page_ptr->dma_addr = nv_phys_to_dma(dev, page_ptr->phys_addr);
else
page_ptr->dma_addr = page_ptr->phys_addr;
//
// Use unencrypted dma_addr returned by dma_alloc_coherent() as
// nv_phys_to_dma() returns encrypted dma_addr when AMD SEV is enabled.
//
if (at->flags.coherent)
page_ptr->dma_addr = bus_addr;
else if (dev != NULL)
page_ptr->dma_addr = nv_phys_to_dma(dev, page_ptr->phys_addr);
else
page_ptr->dma_addr = page_ptr->phys_addr;
NV_MAYBE_RESERVE_PAGE(page_ptr);
NV_MAYBE_RESERVE_PAGE(page_ptr);
sub_page_offset += PAGE_SIZE;
}
}
if (at->cache_type != NV_MEMORY_CACHED)
@@ -602,16 +617,16 @@ failed:
{
for (j = 0; j < i; j++)
{
page_ptr = at->page_table[j];
page_ptr = at->page_table[j * os_pages_in_page];
NV_MAYBE_UNRESERVE_PAGE(page_ptr);
if (at->flags.coherent)
{
dma_free_coherent(dev, PAGE_SIZE, (void *)page_ptr->virt_addr,
dma_free_coherent(dev, alloc_page_size, (void *)page_ptr->virt_addr,
page_ptr->dma_addr);
}
else
{
NV_FREE_PAGES(page_ptr->virt_addr, 0);
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
}
}
}
@@ -627,6 +642,12 @@ void nv_free_system_pages(
unsigned int i;
struct device *dev = at->dev;
// Order should be zero except for EGM allocations.
unsigned int alloc_page_size = PAGE_SIZE << at->order;
unsigned int alloc_page_shift = BIT_IDX_32(alloc_page_size);
unsigned int alloc_num_pages = NV_CEIL(at->num_pages * PAGE_SIZE, alloc_page_size);
unsigned int os_pages_in_page = alloc_page_size / PAGE_SIZE;
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages);
@@ -650,14 +671,20 @@ void nv_free_system_pages(
}
NV_MAYBE_UNRESERVE_PAGE(page_ptr);
}
for (i = 0; i < at->num_pages; i += os_pages_in_page)
{
page_ptr = at->page_table[i];
if (at->flags.coherent)
{
dma_free_coherent(dev, PAGE_SIZE, (void *)page_ptr->virt_addr,
dma_free_coherent(dev, alloc_page_size, (void *)page_ptr->virt_addr,
page_ptr->dma_addr);
}
else
{
NV_FREE_PAGES(page_ptr->virt_addr, 0);
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
}
}
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -36,6 +36,8 @@
#include "nv-reg.h"
#include "nv-msi.h"
#include "nv-pci-table.h"
#include "nv-chardev-numbers.h"
#include "nv-register-module.h" // TODO remove once NVKMS migrates
#if defined(NV_UVM_ENABLE)
#include "nv_uvm_interface.h"
@@ -48,7 +50,6 @@
#include "nvlink_proto.h"
#include "nvlink_caps.h"
#include "nv-frontend.h"
#include "nv-hypervisor.h"
#include "nv-ibmnpu.h"
#include "nv-rsync.h"
@@ -61,6 +62,7 @@
#endif
#include <linux/firmware.h>
#include <linux/cdev.h>
#include <sound/core.h> /* HDA struct snd_card */
@@ -102,6 +104,24 @@
#define RM_THRESHOLD_UNAHNDLED_IRQ_COUNT 99900
#define RM_UNHANDLED_TIMEOUT_US 100000
MODULE_LICENSE("Dual MIT/GPL");
MODULE_INFO(supported, "external");
MODULE_VERSION(NV_VERSION_STRING);
MODULE_ALIAS_CHARDEV_MAJOR(NV_MAJOR_DEVICE_NUMBER);
/*
* MODULE_IMPORT_NS() is added by commit id 8651ec01daeda
* ("module: add support for symbol namespaces") in 5.4
*/
#if defined(MODULE_IMPORT_NS)
/*
* DMA_BUF namespace is added by commit id 16b0314aa746
* ("dma-buf: move dma-buf symbols into the DMA_BUF module namespace") in 5.16
*/
MODULE_IMPORT_NS(DMA_BUF);
#endif // defined(MODULE_IMPORT_NS)
const NvBool nv_is_rm_firmware_supported_os = NV_TRUE;
// Deprecated, use NV_REG_ENABLE_GPU_FIRMWARE instead
@@ -114,17 +134,32 @@ NV_MODULE_STRING_PARAMETER(rm_firmware_active);
nv_cap_t *nvidia_caps_root = NULL;
/*
* our global state; one per device
* Global counts for tracking if all devices were initialized properly
*/
NvU32 num_nv_devices = 0;
NvU32 num_probed_nv_devices = 0;
nv_linux_state_t *nv_linux_devices;
/*
* And one for the control device
* Global list and table of per-device state
* note: both nv_linux_devices and nv_linux_minor_num_table
* are protected by nv_linux_devices_lock
*/
nv_linux_state_t *nv_linux_devices;
static nv_linux_state_t *nv_linux_minor_num_table[NV_MINOR_DEVICE_NUMBER_REGULAR_MAX + 1];
// Global state for the control device
nv_linux_state_t nv_ctl_device = { { 0 } };
// cdev covering the region of regular (non-control) devices
static struct cdev nv_linux_devices_cdev;
// cdev covering the control device
static struct cdev nv_linux_control_device_cdev;
// lock for nvidia_register_module "extmod" emulation
// TODO remove once NVKMS migrates
static struct semaphore nv_extmod_lock;
extern NvU32 nv_dma_remap_peer_mmio;
nv_kthread_q_t nv_kthread_q;
@@ -199,17 +234,19 @@ static int nvidia_open (struct inode *, struct file *);
static int nvidia_close (struct inode *, struct file *);
static unsigned int nvidia_poll (struct file *, poll_table *);
static int nvidia_ioctl (struct inode *, struct file *, unsigned int, unsigned long);
static long nvidia_unlocked_ioctl (struct file *, unsigned int, unsigned long);
/* character device entry points*/
nvidia_module_t nv_fops = {
.owner = THIS_MODULE,
.module_name = MODULE_NAME,
.instance = MODULE_INSTANCE_NUMBER,
.open = nvidia_open,
.close = nvidia_close,
.ioctl = nvidia_ioctl,
.mmap = nvidia_mmap,
.poll = nvidia_poll,
static struct file_operations nvidia_fops = {
.owner = THIS_MODULE,
.poll = nvidia_poll,
.unlocked_ioctl = nvidia_unlocked_ioctl,
#if NVCPU_IS_X86_64 || NVCPU_IS_AARCH64
.compat_ioctl = nvidia_unlocked_ioctl,
#endif
.mmap = nvidia_mmap,
.open = nvidia_open,
.release = nvidia_close,
};
#if defined(CONFIG_PM)
@@ -512,6 +549,7 @@ nv_module_state_init(nv_stack_t *sp)
}
nv_linux_devices = NULL;
memset(nv_linux_minor_num_table, 0, sizeof(nv_linux_minor_num_table));
NV_INIT_MUTEX(&nv_linux_devices_lock);
init_rwsem(&nv_system_pm_lock);
@@ -590,8 +628,6 @@ static void
nv_drivers_exit(void)
{
nv_pci_unregister_driver();
nvidia_unregister_module(&nv_fops);
}
static int __init
@@ -599,14 +635,6 @@ nv_drivers_init(void)
{
int rc;
rc = nvidia_register_module(&nv_fops);
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: failed to register character device.\n");
return rc;
}
rc = nv_pci_register_driver();
if (rc < 0)
{
@@ -616,11 +644,6 @@ nv_drivers_init(void)
}
exit:
if (rc < 0)
{
nvidia_unregister_module(&nv_fops);
}
return rc;
}
@@ -644,6 +667,8 @@ nv_module_init(nv_stack_t **sp)
{
int rc;
NV_INIT_MUTEX(&nv_extmod_lock); // TODO remove once NVKMS migrates
rc = nv_module_resources_init(sp);
if (rc < 0)
{
@@ -745,7 +770,48 @@ static void nv_caps_root_exit(void)
nvidia_caps_root = NULL;
}
int __init nvidia_init_module(void)
static int nv_register_chrdev(
unsigned int minor,
unsigned int count,
struct cdev *cdev,
const char *name,
struct file_operations *fops
)
{
int rc;
rc = register_chrdev_region(MKDEV(NV_MAJOR_DEVICE_NUMBER, minor),
count, name);
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: register_chrdev_region() failed for %s!\n", name);
return rc;
}
cdev_init(cdev, fops);
rc = cdev_add(cdev, MKDEV(NV_MAJOR_DEVICE_NUMBER, minor), count);
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: cdev_add() failed for %s!\n", name);
unregister_chrdev_region(MKDEV(NV_MAJOR_DEVICE_NUMBER, minor), count);
return rc;
}
return rc;
}
static void nv_unregister_chrdev(
unsigned int minor,
unsigned int count,
struct cdev *cdev
)
{
cdev_del(cdev);
unregister_chrdev_region(MKDEV(NV_MAJOR_DEVICE_NUMBER, minor), count);
}
static int __init nvidia_init_module(void)
{
int rc;
NvU32 count;
@@ -846,10 +912,39 @@ int __init nvidia_init_module(void)
}
#endif
/*
* Register char devices for both the region of regular devices
* as well as the control device.
*
* NOTE: THIS SHOULD BE DONE LAST.
*/
rc = nv_register_chrdev(0, NV_MINOR_DEVICE_NUMBER_REGULAR_MAX + 1,
&nv_linux_devices_cdev, "nvidia", &nvidia_fops);
if (rc < 0)
{
goto no_chrdev_exit;
}
rc = nv_register_chrdev(NV_MINOR_DEVICE_NUMBER_CONTROL_DEVICE, 1,
&nv_linux_control_device_cdev, "nvidiactl", &nvidia_fops);
if (rc < 0)
{
goto partial_chrdev_exit;
}
__nv_init_sp = sp;
return 0;
partial_chrdev_exit:
nv_unregister_chrdev(0, NV_MINOR_DEVICE_NUMBER_REGULAR_MAX + 1,
&nv_linux_devices_cdev);
no_chrdev_exit:
#if defined(NV_UVM_ENABLE)
nv_uvm_exit();
#endif
drivers_exit:
nv_drivers_exit();
@@ -865,10 +960,15 @@ procfs_exit:
return rc;
}
void nvidia_exit_module(void)
static void __exit nvidia_exit_module(void)
{
nvidia_stack_t *sp = __nv_init_sp;
nv_unregister_chrdev(NV_MINOR_DEVICE_NUMBER_CONTROL_DEVICE, 1,
&nv_linux_control_device_cdev);
nv_unregister_chrdev(0, NV_MINOR_DEVICE_NUMBER_REGULAR_MAX + 1,
&nv_linux_devices_cdev);
#if defined(NV_UVM_ENABLE)
nv_uvm_exit();
#endif
@@ -887,20 +987,11 @@ void nvidia_exit_module(void)
static void *nv_alloc_file_private(void)
{
nv_linux_file_private_t *nvlfp;
unsigned int i;
NV_KZALLOC(nvlfp, sizeof(nv_linux_file_private_t));
if (!nvlfp)
return NULL;
if (rm_is_altstack_in_use())
{
for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i)
{
NV_INIT_MUTEX(&nvlfp->fops_sp_lock[i]);
}
}
init_waitqueue_head(&nvlfp->waitqueue);
NV_SPIN_LOCK_INIT(&nvlfp->fp_lock);
@@ -933,27 +1024,32 @@ static int nv_is_control_device(
struct inode *inode
)
{
return (minor((inode)->i_rdev) == NV_CONTROL_DEVICE_MINOR);
return (minor((inode)->i_rdev) == NV_MINOR_DEVICE_NUMBER_CONTROL_DEVICE);
}
/*
* Search the global list of nv devices for the one with the given minor device
* number. If found, nvl is returned with nvl->ldata_lock taken.
* Find the nv device with the given minor device number in the minor number
* table. If found, nvl is returned with nvl->ldata_lock taken.
*/
static nv_linux_state_t *find_minor(NvU32 minor)
{
nv_linux_state_t *nvl;
if (minor > NV_MINOR_DEVICE_NUMBER_REGULAR_MAX)
return NULL;
LOCK_NV_LINUX_DEVICES();
nvl = nv_linux_devices;
while (nvl != NULL)
nvl = nv_linux_minor_num_table[minor];
if (nvl->minor_num == minor)
{
if (nvl->minor_num == minor)
{
down(&nvl->ldata_lock);
break;
}
nvl = nvl->next;
down(&nvl->ldata_lock);
}
else
{
// nv_linux_minor_num_table out of sync -- this shouldn't happen
WARN_ON(1);
nvl = NULL;
}
UNLOCK_NV_LINUX_DEVICES();
@@ -1521,8 +1617,6 @@ nvidia_open(
int rc = 0;
nv_linux_file_private_t *nvlfp = NULL;
nvidia_stack_t *sp = NULL;
unsigned int i;
unsigned int k;
nv_printf(NV_DBG_INFO, "NVRM: nvidia_open...\n");
@@ -1540,21 +1634,6 @@ nvidia_open(
return rc;
}
for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i)
{
rc = nv_kmem_cache_alloc_stack(&nvlfp->fops_sp[i]);
if (rc != 0)
{
nv_kmem_cache_free_stack(sp);
for (k = 0; k < i; ++k)
{
nv_kmem_cache_free_stack(nvlfp->fops_sp[k]);
}
nv_free_file_private(nvlfp);
return rc;
}
}
NV_SET_FILE_PRIVATE(file, nvlfp);
nvlfp->sp = sp;
@@ -1610,11 +1689,6 @@ failed:
{
if (nvlfp != NULL)
{
nv_kmem_cache_free_stack(sp);
for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i)
{
nv_kmem_cache_free_stack(nvlfp->fops_sp[i]);
}
nv_free_file_private(nvlfp);
NV_SET_FILE_PRIVATE(file, NULL);
}
@@ -1805,7 +1879,6 @@ nvidia_close_callback(
nv_linux_state_t *nvl = nvlfp->nvptr;
nv_state_t *nv = NV_STATE_PTR(nvl);
nvidia_stack_t *sp = nvlfp->sp;
unsigned int i;
NvBool bRemove = NV_FALSE;
rm_cleanup_file_private(sp, nv, &nvlfp->nvfp);
@@ -1821,11 +1894,6 @@ nvidia_close_callback(
(NV_ATOMIC_READ(nvl->usage_count) == 0) &&
rm_get_device_remove_flag(sp, nv->gpu_id);
for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i)
{
nv_kmem_cache_free_stack(nvlfp->fops_sp[i]);
}
nv_free_file_private(nvlfp);
/*
@@ -1844,7 +1912,6 @@ nvidia_close_callback(
*/
if ((NV_ATOMIC_READ(nvl->usage_count) == 0) && nv->removed)
{
nvidia_frontend_remove_device((void *)&nv_fops, nvl);
nv_lock_destroy_locks(sp, nv);
NV_KFREE(nvl, sizeof(nv_linux_state_t));
}
@@ -2047,7 +2114,12 @@ nvidia_ioctl(
if (status < 0)
return status;
sp = nv_nvlfp_get_sp(nvlfp, NV_FOPS_STACK_INDEX_IOCTL);
status = nv_kmem_cache_alloc_stack(&sp);
if (status != 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Unable to allocate altstack for ioctl\n");
goto done;
}
rmStatus = nv_check_gpu_state(nv);
if (rmStatus == NV_ERR_GPU_IS_LOST)
@@ -2233,15 +2305,7 @@ nvidia_ioctl(
goto done;
}
api->offline_addresses.numEntries =
ARRAY_SIZE(api->offline_addresses.addresses),
rmStatus = rm_get_gpu_numa_info(sp, nv,
&(api->nid),
&(api->numa_mem_addr),
&(api->numa_mem_size),
(api->offline_addresses.addresses),
&(api->offline_addresses.numEntries));
rmStatus = rm_get_gpu_numa_info(sp, nv, api);
if (rmStatus != NV_OK)
{
status = -EBUSY;
@@ -2359,7 +2423,7 @@ unlock:
}
done:
nv_nvlfp_put_sp(nvlfp, NV_FOPS_STACK_INDEX_IOCTL);
nv_kmem_cache_free_stack(sp);
up_read(&nv_system_pm_lock);
@@ -2379,6 +2443,15 @@ done:
return status;
}
long nvidia_unlocked_ioctl(
struct file *file,
unsigned int cmd,
unsigned long i_arg
)
{
return nvidia_ioctl(NV_FILE_INODE(file), file, cmd, i_arg);
}
irqreturn_t
nvidia_isr_msix(
int irq,
@@ -2693,7 +2766,6 @@ nvidia_ctl_close(
nv_state_t *nv = NV_STATE_PTR(nvl);
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file);
nvidia_stack_t *sp = nvlfp->sp;
unsigned int i;
nv_printf(NV_DBG_INFO, "NVRM: nvidia_ctl_close\n");
@@ -2736,11 +2808,6 @@ nvidia_ctl_close(
nvlfp->num_attached_gpus = 0;
}
for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i)
{
nv_kmem_cache_free_stack(nvlfp->fops_sp[i]);
}
nv_free_file_private(nvlfp);
NV_SET_FILE_PRIVATE(file, NULL);
@@ -3326,6 +3393,7 @@ NV_STATUS NV_API_CALL nv_free_kernel_mapping(
NV_STATUS NV_API_CALL nv_alloc_pages(
nv_state_t *nv,
NvU32 page_count,
NvU64 page_size,
NvBool contiguous,
NvU32 cache_type,
NvBool zeroed,
@@ -3413,9 +3481,14 @@ NV_STATUS NV_API_CALL nv_alloc_pages(
}
if (at->flags.contig)
{
status = nv_alloc_contig_pages(nv, at);
}
else
{
at->order = get_order(page_size);
status = nv_alloc_system_pages(nv, at);
}
if (status != NV_OK)
goto failed;
@@ -3643,7 +3716,7 @@ nv_file_private_t* NV_API_CALL nv_get_file_private(
if (ctl)
{
if (MINOR(rdev) != NV_CONTROL_DEVICE_MINOR)
if (MINOR(rdev) != NV_MINOR_DEVICE_NUMBER_CONTROL_DEVICE)
goto fail;
}
else
@@ -3651,9 +3724,9 @@ nv_file_private_t* NV_API_CALL nv_get_file_private(
NvBool found = NV_FALSE;
int i;
for (i = 0; i <= NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN; i++)
for (i = 0; i <= NV_MINOR_DEVICE_NUMBER_REGULAR_MAX; i++)
{
if ((nv_minor_num_table[i] != NULL) && (MINOR(rdev) == i))
if ((nv_linux_minor_num_table[i] != NULL) && (MINOR(rdev) == i))
{
found = NV_TRUE;
break;
@@ -4375,6 +4448,7 @@ nvidia_transition_dynamic_power(
nv_linux_state_t *nvl = pci_get_drvdata(pci_dev);
nv_state_t *nv = NV_STATE_PTR(nvl);
nvidia_stack_t *sp = NULL;
NvBool bTryAgain = NV_FALSE;
NV_STATUS status;
if ((nv->flags & (NV_FLAG_OPEN | NV_FLAG_PERSISTENT_SW_STATE)) == 0)
@@ -4387,10 +4461,19 @@ nvidia_transition_dynamic_power(
return -ENOMEM;
}
status = rm_transition_dynamic_power(sp, nv, enter);
status = rm_transition_dynamic_power(sp, nv, enter, &bTryAgain);
nv_kmem_cache_free_stack(sp);
if (bTryAgain)
{
/*
* Return -EAGAIN so that kernel PM core will not treat this as a fatal error and
* reschedule the callback again in the future.
*/
return -EAGAIN;
}
return (status == NV_OK) ? 0 : -EIO;
}
@@ -4885,34 +4968,37 @@ NV_STATUS NV_API_CALL nv_get_device_memory_config(
NvS32 *node_id
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status = NV_ERR_NOT_SUPPORTED;
#if defined(NVCPU_PPC64LE)
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
if (!nv_platform_supports_numa(nvl))
{
return NV_ERR_NOT_SUPPORTED;
}
#if defined(NVCPU_PPC64LE)
nv_npu_numa_info_t *numa_info;
numa_info = &nvl->npu->numa_info;
if (node_id != NULL)
{
*node_id = nvl->numa_info.node_id;
}
if (compr_addr_sys_phys != NULL)
{
*compr_addr_sys_phys =
numa_info->compr_sys_phys_addr;
}
nv_npu_numa_info_t *numa_info;
if (addr_guest_phys != NULL)
{
*addr_guest_phys =
numa_info->guest_phys_addr;
numa_info = &nvl->npu->numa_info;
if (compr_addr_sys_phys != NULL)
{
*compr_addr_sys_phys =
numa_info->compr_sys_phys_addr;
}
if (addr_guest_phys != NULL)
{
*addr_guest_phys =
numa_info->guest_phys_addr;
}
}
if (addr_width != NULL)
@@ -4923,6 +5009,8 @@ NV_STATUS NV_API_CALL nv_get_device_memory_config(
status = NV_OK;
#endif
#if defined(NVCPU_AARCH64)
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
if (node_id != NULL)
{
*node_id = nvl->numa_info.node_id;
@@ -5014,6 +5102,7 @@ NV_STATUS NV_API_CALL nv_indicate_idle(
nv_state_t *nv
)
{
#if NV_FILESYSTEM_ACCESS_AVAILABLE
#if defined(NV_PM_RUNTIME_AVAILABLE)
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct device *dev = nvl->dev;
@@ -5052,6 +5141,9 @@ NV_STATUS NV_API_CALL nv_indicate_idle(
#else
return NV_ERR_NOT_SUPPORTED;
#endif
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
NV_STATUS NV_API_CALL nv_indicate_not_idle(
@@ -5098,8 +5190,27 @@ NvBool NV_API_CALL nv_dynamic_power_available(
}
/* caller should hold nv_linux_devices_lock using LOCK_NV_LINUX_DEVICES */
void nv_linux_add_device_locked(nv_linux_state_t *nvl)
int nv_linux_add_device_locked(nv_linux_state_t *nvl)
{
int rc = -1;
int i;
// look for free a minor number and assign unique minor number to this device
for (i = 0; i <= NV_MINOR_DEVICE_NUMBER_REGULAR_MAX; i++)
{
if (nv_linux_minor_num_table[i] == NULL)
{
nv_linux_minor_num_table[i] = nvl;
nvl->minor_num = i;
rc = 0;
break;
}
}
// bail if no minor number is free
if (rc != 0)
return rc;
if (nv_linux_devices == NULL) {
nv_linux_devices = nvl;
}
@@ -5109,6 +5220,8 @@ void nv_linux_add_device_locked(nv_linux_state_t *nvl)
for (tnvl = nv_linux_devices; tnvl->next != NULL; tnvl = tnvl->next);
tnvl->next = nvl;
}
return rc;
}
/* caller should hold nv_linux_devices_lock using LOCK_NV_LINUX_DEVICES */
@@ -5123,6 +5236,8 @@ void nv_linux_remove_device_locked(nv_linux_state_t *nvl)
for (tnvl = nv_linux_devices; tnvl->next != nvl; tnvl = tnvl->next);
tnvl->next = nvl->next;
}
nv_linux_minor_num_table[nvl->minor_num] = NULL;
}
void NV_API_CALL nv_control_soc_irqs(nv_state_t *nv, NvBool bEnable)
@@ -5641,3 +5756,91 @@ failed:
NV_DEV_PRINTF(NV_DBG_INFO, nv, "Cannot get EGM info\n");
return NV_ERR_NOT_SUPPORTED;
}
/*
* nvidia_register_module "extmod" emulation
*
* TODO remove once NVKMS migrates
*
* Emulate nv-frontend's behavior of enabling the use of minor number 254
* given module->instance == 1 via the file operations provided in the
* nvidia_module_t structure.
*
* This path is only used by NVKMS and will be removed once NVKMS migrates
* to export its own file_operations structure directly.
*/
static struct file_operations nv_extmod_fops;
static struct cdev nv_extmod_cdev;
static nvidia_module_t *nv_extmod;
static long nv_extmod_unlocked_ioctl(
struct file *file,
unsigned int cmd,
unsigned long i_arg
)
{
return nv_extmod->ioctl(NV_FILE_INODE(file), file, cmd, i_arg);
}
int nvidia_register_module(nvidia_module_t *module)
{
int rc;
down(&nv_extmod_lock);
if ((nv_extmod != NULL) || (module == NULL) || (module->instance != 1))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: NVIDIA module (emulated) registration failed.\n");
up(&nv_extmod_lock);
return -EINVAL;
}
memset(&nv_extmod_fops, 0, sizeof(nv_extmod_fops));
nv_extmod_fops.owner = module->owner;
nv_extmod_fops.poll = module->poll;
nv_extmod_fops.unlocked_ioctl = nv_extmod_unlocked_ioctl;
#if NVCPU_IS_X86_64 || NVCPU_IS_AARCH64
nv_extmod_fops.compat_ioctl = nv_extmod_unlocked_ioctl;
#endif
nv_extmod_fops.mmap = module->mmap;
nv_extmod_fops.open = module->open;
nv_extmod_fops.release = module->close;
rc = nv_register_chrdev(NV_MINOR_DEVICE_NUMBER_MODESET_DEVICE, 1,
&nv_extmod_cdev, "nvidia-modeset", &nv_extmod_fops);
if (rc < 0)
{
up(&nv_extmod_lock);
return rc;
}
nv_extmod = module;
up(&nv_extmod_lock);
return rc;
}
EXPORT_SYMBOL(nvidia_register_module);
int nvidia_unregister_module(nvidia_module_t *module)
{
down(&nv_extmod_lock);
if (nv_extmod == NULL)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: NVIDIA module (emulated) non-existent de-registration.\n");
up(&nv_extmod_lock);
return -EINVAL;
}
nv_unregister_chrdev(NV_MINOR_DEVICE_NUMBER_MODESET_DEVICE, 1,
&nv_extmod_cdev);
nv_extmod = NULL;
up(&nv_extmod_lock);
return 0;
}
EXPORT_SYMBOL(nvidia_unregister_module);
module_init(nvidia_init_module);
module_exit(nvidia_exit_module);

View File

@@ -209,7 +209,7 @@ NV_STATUS nvUvmInterfaceSessionCreate(uvmGpuSessionHandle *session,
memset(platformInfo, 0, sizeof(*platformInfo));
platformInfo->atsSupported = nv_ats_supported;
platformInfo->confComputingEnabled = os_cc_enabled;
platformInfo->sevEnabled = os_cc_enabled;
status = rm_gpu_ops_create_session(sp, (gpuSessionHandle *)session);

View File

@@ -30,7 +30,6 @@ NVIDIA_SOURCES += nvidia/nv-report-err.c
NVIDIA_SOURCES += nvidia/nv-rsync.c
NVIDIA_SOURCES += nvidia/nv-msi.c
NVIDIA_SOURCES += nvidia/nv-caps.c
NVIDIA_SOURCES += nvidia/nv-frontend.c
NVIDIA_SOURCES += nvidia/nv_uvm_interface.c
NVIDIA_SOURCES += nvidia/libspdm_aead.c
NVIDIA_SOURCES += nvidia/libspdm_ecc.c

View File

@@ -639,3 +639,8 @@ int nvlink_is_admin(void)
{
return NV_IS_SUSER();
}
NvU64 nvlink_get_platform_time(void)
{
return nv_ktime_get_raw_ns();
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -64,6 +64,7 @@ void nvlink_assert(int expression);
void nvlink_sleep(unsigned int ms);
void nvlink_print(const char *, int, const char *, int, const char *, ...);
int nvlink_is_admin(void);
NvU64 nvlink_get_platform_time(void);
// Capability functions
NvlStatus nvlink_acquire_fabric_mgmt_cap(void *osPrivate, NvU64 capDescriptor);

View File

@@ -782,6 +782,8 @@ inline void NV_API_CALL out_string(const char *str)
printk("%s", str);
}
#define NV_PRINT_LOCAL_BUFF_LEN_MAX 530
/*
* nv_printf() prints to the kernel log for the driver.
* Returns the number of characters written.
@@ -790,11 +792,38 @@ int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...)
{
va_list arglist;
int chars_written = 0;
NvBool bForced = (NV_DBG_FORCE_LEVEL(debuglevel) == debuglevel);
debuglevel = debuglevel & 0xff;
if (debuglevel >= ((cur_debuglevel >> 4) & 0x3))
// This function is protected by the "_nv_dbg_lock" lock, so it is still
// thread-safe to store the print buffer in a static variable, thus
// avoiding a problem with kernel stack size.
static char buff[NV_PRINT_LOCAL_BUFF_LEN_MAX];
/*
* Print a message if:
* 1. Caller indicates that filtering should be skipped, or
* 2. debuglevel is at least cur_debuglevel for DBG_MODULE_OS (bits 4:5). Support for print
* modules has been removed with DBG_PRINTF, so this check should be cleaned up.
*/
if (bForced ||
(debuglevel >= ((cur_debuglevel >> 4) & 0x3)))
{
size_t length;
char *temp;
size_t loglevel_length = 0, format_length = strlen(printf_format);
size_t length = 0;
const char *loglevel = "";
switch (debuglevel)
{
case NV_DBG_INFO: loglevel = KERN_DEBUG; break;
case NV_DBG_SETUP: loglevel = KERN_NOTICE; break;
case NV_DBG_WARNINGS: loglevel = KERN_WARNING; break;
case NV_DBG_ERRORS: loglevel = KERN_ERR; break;
case NV_DBG_HW_ERRORS: loglevel = KERN_CRIT; break;
case NV_DBG_FATAL: loglevel = KERN_CRIT; break;
}
loglevel_length = strlen(loglevel);
// When printk is called to extend the output of the previous line
// (i.e. when the previous line did not end in \n), the printk call
@@ -814,24 +843,19 @@ int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...)
// string always contains only one \n (at the end) and NV_PRINTF_EX
// is deleted. But that is unlikely to ever happen.
length = strlen(printf_format);
length = loglevel_length + format_length + sizeof(KERN_CONT);
if (length < 1)
return 0;
temp = kmalloc(length + sizeof(KERN_CONT), GFP_ATOMIC);
if (!temp)
return 0;
// KERN_CONT changed in the 3.6 kernel, so we can't assume its
// composition or size.
memcpy(temp, KERN_CONT, sizeof(KERN_CONT) - 1);
memcpy(temp + sizeof(KERN_CONT) - 1, printf_format, length + 1);
memcpy(buff, KERN_CONT, sizeof(KERN_CONT) - 1);
memcpy(buff + sizeof(KERN_CONT) - 1, loglevel, loglevel_length);
memcpy(buff + sizeof(KERN_CONT) - 1 + loglevel_length, printf_format, length + 1);
va_start(arglist, printf_format);
chars_written = vprintk(temp, arglist);
chars_written = vprintk(buff, arglist);
va_end(arglist);
kfree(temp);
}
return chars_written;
@@ -1199,10 +1223,10 @@ NvBool NV_API_CALL os_is_efi_enabled(void)
void NV_API_CALL os_get_screen_info(
NvU64 *pPhysicalAddress,
NvU16 *pFbWidth,
NvU16 *pFbHeight,
NvU16 *pFbDepth,
NvU16 *pFbPitch,
NvU32 *pFbWidth,
NvU32 *pFbHeight,
NvU32 *pFbDepth,
NvU32 *pFbPitch,
NvU64 consoleBar1Address,
NvU64 consoleBar2Address
)
@@ -1807,6 +1831,7 @@ NV_STATUS NV_API_CALL os_open_temporary_file
void **ppFile
)
{
#if NV_FILESYSTEM_ACCESS_AVAILABLE
#if defined(O_TMPFILE)
struct file *file;
const char *default_path = "/tmp";
@@ -1852,6 +1877,9 @@ NV_STATUS NV_API_CALL os_open_temporary_file
#else
return NV_ERR_NOT_SUPPORTED;
#endif
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
void NV_API_CALL os_close_file
@@ -1859,7 +1887,9 @@ void NV_API_CALL os_close_file
void *pFile
)
{
#if NV_FILESYSTEM_ACCESS_AVAILABLE
filp_close(pFile, NULL);
#endif
}
#define NV_MAX_NUM_FILE_IO_RETRIES 10
@@ -1872,6 +1902,7 @@ NV_STATUS NV_API_CALL os_write_file
NvU64 offset
)
{
#if NV_FILESYSTEM_ACCESS_AVAILABLE
loff_t f_pos = offset;
ssize_t num_written;
int num_retries = NV_MAX_NUM_FILE_IO_RETRIES;
@@ -1902,6 +1933,9 @@ retry:
}
return NV_OK;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
NV_STATUS NV_API_CALL os_read_file
@@ -1912,6 +1946,7 @@ NV_STATUS NV_API_CALL os_read_file
NvU64 offset
)
{
#if NV_FILESYSTEM_ACCESS_AVAILABLE
loff_t f_pos = offset;
ssize_t num_read;
int num_retries = NV_MAX_NUM_FILE_IO_RETRIES;
@@ -1942,6 +1977,9 @@ retry:
}
return NV_OK;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
NV_STATUS NV_API_CALL os_open_readonly_file
@@ -1950,6 +1988,7 @@ NV_STATUS NV_API_CALL os_open_readonly_file
void **ppFile
)
{
#if NV_FILESYSTEM_ACCESS_AVAILABLE
struct file *file;
/*
@@ -1971,6 +2010,9 @@ NV_STATUS NV_API_CALL os_open_readonly_file
*ppFile = (void *)file;
return NV_OK;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
NV_STATUS NV_API_CALL os_open_and_read_file