This commit is contained in:
Andy Ritger
2022-11-10 08:39:33 -08:00
parent 7c345b838b
commit 758b4ee818
1323 changed files with 262135 additions and 60754 deletions

View File

@@ -642,6 +642,12 @@ nvswitch_os_get_platform_time
void
);
NvU64
nvswitch_os_get_platform_time_epoch
(
void
);
#if (defined(_WIN32) || defined(_WIN64))
#define NVSWITCH_PRINT_ATTRIB(str, arg1)
#else

View File

@@ -239,9 +239,6 @@ static long nvswitch_ctl_unlocked_ioctl(struct file *file,
struct file_operations device_fops =
{
.owner = THIS_MODULE,
#if defined(NV_FILE_OPERATIONS_HAS_IOCTL)
.ioctl = nvswitch_device_ioctl,
#endif
.unlocked_ioctl = nvswitch_device_unlocked_ioctl,
.open = nvswitch_device_open,
.release = nvswitch_device_release,
@@ -251,9 +248,6 @@ struct file_operations device_fops =
struct file_operations ctl_fops =
{
.owner = THIS_MODULE,
#if defined(NV_FILE_OPERATIONS_HAS_IOCTL)
.ioctl = nvswitch_ctl_ioctl,
#endif
.unlocked_ioctl = nvswitch_ctl_unlocked_ioctl,
};
@@ -574,6 +568,8 @@ nvswitch_deinit_device
NVSWITCH_DEV *nvswitch_dev
)
{
nvswitch_deinit_i2c_adapters(nvswitch_dev);
nvswitch_lib_disable_interrupts(nvswitch_dev->lib_device);
nvswitch_shutdown_device_interrupt(nvswitch_dev);
@@ -1452,16 +1448,12 @@ nvswitch_remove
list_del(&nvswitch_dev->list_node);
nvswitch_deinit_i2c_adapters(nvswitch_dev);
WARN_ON(!list_empty(&nvswitch_dev->i2c_adapter_list));
pci_set_drvdata(pci_dev, NULL);
nvswitch_deinit_background_tasks(nvswitch_dev);
nvswitch_deinit_device(nvswitch_dev);
pci_set_drvdata(pci_dev, NULL);
pci_iounmap(pci_dev, nvswitch_dev->bar0);
pci_release_regions(pci_dev);
@@ -1822,8 +1814,7 @@ nvswitch_exit
//
// Get current time in seconds.nanoseconds
// In this implementation, the time is from epoch time
// (midnight UTC of January 1, 1970)
// In this implementation, the time is monotonic time
//
NvU64
nvswitch_os_get_platform_time
@@ -1837,6 +1828,28 @@ nvswitch_os_get_platform_time
return (NvU64) timespec64_to_ns(&ts);
}
//
// Get current time in seconds.nanoseconds
// In this implementation, the time is from epoch time
// (midnight UTC of January 1, 1970).
// This implementation cannot be used for polling loops
// due to clock skew during system startup (bug 3302382,
// 3297170, 3273847, 3277478, 200693329).
// Instead, nvswitch_os_get_platform_time() is used
// for polling loops
//
NvU64
nvswitch_os_get_platform_time_epoch
(
void
)
{
struct timespec64 ts;
ktime_get_real_ts64(&ts);
return (NvU64) timespec64_to_ns(&ts);
}
void
nvswitch_os_print
(

View File

@@ -164,7 +164,7 @@ static void nv_acpi_powersource_hotplug_event(acpi_handle handle, u32 event_type
if (nv_acpi_get_powersource(&ac_plugged) != NV_OK)
return;
rm_system_event(pNvAcpiObject->sp, NV_SYSTEM_ACPI_BATTERY_POWER_EVENT, !ac_plugged);
rm_power_source_change_event(pNvAcpiObject->sp, !ac_plugged);
}
}
/*

View File

@@ -62,6 +62,10 @@ static nv_cap_table_entry_t g_nv_cap_mig_table[] =
{"/driver/nvidia/capabilities/mig/monitor"}
};
static nv_cap_table_entry_t g_nv_cap_sys_table[] =
{
};
#define NV_CAP_MIG_CI_ENTRIES(_gi) \
{_gi "/ci0/access"}, \
{_gi "/ci1/access"}, \
@@ -173,8 +177,6 @@ struct
#define NV_CAP_NAME_BUF_SIZE 128
static struct proc_dir_entry *nv_cap_procfs_dir;
static struct proc_dir_entry *nv_cap_procfs_nvlink_minors;
static struct proc_dir_entry *nv_cap_procfs_mig_minors;
static int nv_procfs_read_nvlink_minors(struct seq_file *s, void *v)
{
@@ -195,6 +197,25 @@ static int nv_procfs_read_nvlink_minors(struct seq_file *s, void *v)
return 0;
}
static int nv_procfs_read_sys_minors(struct seq_file *s, void *v)
{
int i, count;
char name[NV_CAP_NAME_BUF_SIZE];
count = NV_CAP_NUM_ENTRIES(g_nv_cap_sys_table);
for (i = 0; i < count; i++)
{
if (sscanf(g_nv_cap_sys_table[i].name,
"/driver/nvidia/capabilities/%s", name) == 1)
{
name[sizeof(name) - 1] = '\0';
seq_printf(s, "%s %d\n", name, g_nv_cap_sys_table[i].minor);
}
}
return 0;
}
static int nv_procfs_read_mig_minors(struct seq_file *s, void *v)
{
int i, count, gpu;
@@ -230,6 +251,8 @@ NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(nvlink_minors, nv_system_pm_lock);
NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(mig_minors, nv_system_pm_lock);
NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(sys_minors, nv_system_pm_lock);
static void nv_cap_procfs_exit(void)
{
if (!nv_cap_procfs_dir)
@@ -237,32 +260,39 @@ static void nv_cap_procfs_exit(void)
return;
}
nv_procfs_unregister_all(nv_cap_procfs_dir, nv_cap_procfs_dir);
#if defined(CONFIG_PROC_FS)
proc_remove(nv_cap_procfs_dir);
#endif
nv_cap_procfs_dir = NULL;
}
int nv_cap_procfs_init(void)
{
static struct proc_dir_entry *file_entry;
nv_cap_procfs_dir = NV_CREATE_PROC_DIR(NV_CAP_PROCFS_DIR, NULL);
if (nv_cap_procfs_dir == NULL)
{
return -EACCES;
}
nv_cap_procfs_mig_minors = NV_CREATE_PROC_FILE("mig-minors",
nv_cap_procfs_dir,
mig_minors,
NULL);
if (nv_cap_procfs_mig_minors == NULL)
file_entry = NV_CREATE_PROC_FILE("mig-minors", nv_cap_procfs_dir,
mig_minors, NULL);
if (file_entry == NULL)
{
goto cleanup;
}
nv_cap_procfs_nvlink_minors = NV_CREATE_PROC_FILE("nvlink-minors",
nv_cap_procfs_dir,
nvlink_minors,
NULL);
if (nv_cap_procfs_nvlink_minors == NULL)
file_entry = NV_CREATE_PROC_FILE("nvlink-minors", nv_cap_procfs_dir,
nvlink_minors, NULL);
if (file_entry == NULL)
{
goto cleanup;
}
file_entry = NV_CREATE_PROC_FILE("sys-minors", nv_cap_procfs_dir,
sys_minors, NULL);
if (file_entry == NULL)
{
goto cleanup;
}
@@ -320,6 +350,7 @@ static void nv_cap_tables_init(void)
nv_cap_table_init(g_nv_cap_nvlink_table);
nv_cap_table_init(g_nv_cap_mig_table);
nv_cap_table_init(g_nv_cap_mig_gpu_table);
nv_cap_table_init(g_nv_cap_sys_table);
}
static ssize_t nv_cap_procfs_write(struct file *file,
@@ -517,7 +548,7 @@ int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd)
spin_lock(&files->file_lock);
fdt = files_fdtable(files);
NV_SET_CLOSE_ON_EXEC(dup_fd, fdt);
__set_bit(dup_fd, fdt->close_on_exec);
spin_unlock(&files->file_lock);
}

View File

@@ -197,8 +197,7 @@ NV_STATUS nv_create_dma_map_scatterlist(nv_dma_map_t *dma_map)
break;
}
#if !defined(NV_SG_ALLOC_TABLE_FROM_PAGES_PRESENT) || \
defined(NV_DOM0_KERNEL_PRESENT)
#if defined(NV_DOM0_KERNEL_PRESENT)
{
NvU64 page_idx = NV_DMA_SUBMAP_IDX_TO_PAGE_IDX(i);
nv_fill_scatterlist(submap->sgt.sgl,
@@ -774,14 +773,16 @@ static NvBool nv_dma_use_map_resource
nv_dma_device_t *dma_dev
)
{
#if defined(NV_DMA_MAP_RESOURCE_PRESENT)
const struct dma_map_ops *ops = get_dma_ops(dma_dev->dev);
#endif
if (nv_dma_remap_peer_mmio == NV_DMA_REMAP_PEER_MMIO_DISABLE)
{
return NV_FALSE;
}
#if defined(NV_DMA_MAP_RESOURCE_PRESENT)
const struct dma_map_ops *ops = get_dma_ops(dma_dev->dev);
if (ops == NULL)
{
/* On pre-5.0 kernels, if dma_map_resource() is present, then we

View File

@@ -24,6 +24,15 @@
#include "nv-dmabuf.h"
#if defined(CONFIG_DMA_SHARED_BUFFER)
//
// The Linux kernel's dma_length in struct scatterlist is unsigned int
// which limits the maximum sg length to 4GB - 1.
// To get around this limitation, the BAR1 scatterlist returned by RM
// is split into (4GB - PAGE_SIZE) sized chunks to build the sg_table.
//
#define NV_DMA_BUF_SG_MAX_LEN ((NvU32)(NVBIT64(32) - PAGE_SIZE))
typedef struct nv_dma_buf_mem_handle
{
NvHandle h_memory;
@@ -77,24 +86,20 @@ nv_dma_buf_alloc_file_private(
{
nv_dma_buf_file_private_t *priv = NULL;
NV_KMALLOC(priv, sizeof(nv_dma_buf_file_private_t));
NV_KZALLOC(priv, sizeof(nv_dma_buf_file_private_t));
if (priv == NULL)
{
return NULL;
}
memset(priv, 0, sizeof(nv_dma_buf_file_private_t));
mutex_init(&priv->lock);
NV_KMALLOC(priv->handles, num_handles * sizeof(priv->handles[0]));
NV_KZALLOC(priv->handles, num_handles * sizeof(priv->handles[0]));
if (priv->handles == NULL)
{
goto failed;
}
memset(priv->handles, 0, num_handles * sizeof(priv->handles[0]));
return priv;
failed:
@@ -257,26 +262,36 @@ nv_dma_buf_unmap_unlocked(
nv_dma_device_t *peer_dma_dev,
nv_dma_buf_file_private_t *priv,
struct sg_table *sgt,
NvU32 count
NvU32 mapped_handle_count
)
{
NV_STATUS status;
NvU32 i;
NvU64 dma_len;
NvU64 dma_addr;
NvU64 bar1_va;
NvBool bar1_unmap_needed;
struct scatterlist *sg = NULL;
bar1_unmap_needed = (priv->bar1_va_ref_count == 0);
for_each_sg(sgt->sgl, sg, count, i)
sg = sgt->sgl;
for (i = 0; i < mapped_handle_count; i++)
{
dma_addr = sg_dma_address(sg);
dma_len = priv->handles[i].size;
bar1_va = priv->handles[i].bar1_va;
NvU64 handle_size = priv->handles[i].size;
WARN_ON(sg_dma_len(sg) != priv->handles[i].size);
dma_addr = sg_dma_address(sg);
dma_len = 0;
//
// Seek ahead in the scatterlist until the handle size is covered.
// IOVA unmap can then be done all at once instead of doing it
// one sg at a time.
//
while(handle_size != dma_len)
{
dma_len += sg_dma_len(sg);
sg = sg_next(sg);
}
nv_dma_unmap_peer(peer_dma_dev, (dma_len / os_page_size), dma_addr);
@@ -307,7 +322,8 @@ nv_dma_buf_map(
nv_dma_device_t peer_dma_dev = {{ 0 }};
NvBool bar1_map_needed;
NvBool bar1_unmap_needed;
NvU32 count = 0;
NvU32 mapped_handle_count = 0;
NvU32 num_sg_entries = 0;
NvU32 i = 0;
int rc = 0;
@@ -352,20 +368,29 @@ nv_dma_buf_map(
goto unlock_api_lock;
}
NV_KMALLOC(sgt, sizeof(struct sg_table));
NV_KZALLOC(sgt, sizeof(struct sg_table));
if (sgt == NULL)
{
goto unlock_gpu_lock;
}
memset(sgt, 0, sizeof(struct sg_table));
//
// Pre-calculate number of sg entries we need based on handle size.
// This is needed to allocate sg_table.
//
for (i = 0; i < priv->num_objects; i++)
{
NvU64 count = priv->handles[i].size + NV_DMA_BUF_SG_MAX_LEN - 1;
do_div(count, NV_DMA_BUF_SG_MAX_LEN);
num_sg_entries += count;
}
//
// RM currently returns contiguous BAR1, so we create as many
// sg entries as the number of handles being mapped.
// sg entries as num_sg_entries calculated above.
// When RM can alloc discontiguous BAR1, this code will need to be revisited.
//
rc = sg_alloc_table(sgt, priv->num_objects, GFP_KERNEL);
rc = sg_alloc_table(sgt, num_sg_entries, GFP_KERNEL);
if (rc != 0)
{
goto free_sgt;
@@ -375,7 +400,8 @@ nv_dma_buf_map(
peer_dma_dev.addressable_range.limit = (NvU64)dev->dma_mask;
bar1_map_needed = bar1_unmap_needed = (priv->bar1_va_ref_count == 0);
for_each_sg(sgt->sgl, sg, priv->num_objects, i)
sg = sgt->sgl;
for (i = 0; i < priv->num_objects; i++)
{
NvU64 dma_addr;
NvU64 dma_len;
@@ -393,9 +419,15 @@ nv_dma_buf_map(
}
}
mapped_handle_count++;
dma_addr = priv->handles[i].bar1_va;
dma_len = priv->handles[i].size;
//
// IOVA map the full handle at once and then breakdown the range
// (dma_addr, dma_addr + dma_len) into smaller sg entries.
//
status = nv_dma_map_peer(&peer_dma_dev, priv->nv->dma_dev,
0x1, (dma_len / os_page_size), &dma_addr);
if (status != NV_OK)
@@ -409,14 +441,23 @@ nv_dma_buf_map(
priv->handles[i].bar1_va);
}
mapped_handle_count--;
// Unmap remaining memory handles
goto unmap_handles;
}
sg_set_page(sg, NULL, dma_len, 0);
sg_dma_address(sg) = (dma_addr_t)dma_addr;
sg_dma_len(sg) = dma_len;
count++;
while(dma_len != 0)
{
NvU32 sg_len = NV_MIN(dma_len, NV_DMA_BUF_SG_MAX_LEN);
sg_set_page(sg, NULL, sg_len, 0);
sg_dma_address(sg) = (dma_addr_t)dma_addr;
sg_dma_len(sg) = sg_len;
dma_addr += sg_len;
dma_len -= sg_len;
sg = sg_next(sg);
}
}
priv->bar1_va_ref_count++;
@@ -432,7 +473,7 @@ nv_dma_buf_map(
return sgt;
unmap_handles:
nv_dma_buf_unmap_unlocked(sp, &peer_dma_dev, priv, sgt, count);
nv_dma_buf_unmap_unlocked(sp, &peer_dma_dev, priv, sgt, mapped_handle_count);
sg_free_table(sgt);

View File

@@ -26,19 +26,11 @@
#include "nv-reg.h"
#include "nv-frontend.h"
#if defined(MODULE_LICENSE)
MODULE_LICENSE("Dual MIT/GPL");
#endif
#if defined(MODULE_INFO)
MODULE_INFO(supported, "external");
#endif
#if defined(MODULE_VERSION)
MODULE_VERSION(NV_VERSION_STRING);
#endif
#ifdef MODULE_ALIAS_CHARDEV_MAJOR
MODULE_INFO(supported, "external");
MODULE_VERSION(NV_VERSION_STRING);
MODULE_ALIAS_CHARDEV_MAJOR(NV_MAJOR_DEVICE_NUMBER);
#endif
/*
* MODULE_IMPORT_NS() is added by commit id 8651ec01daeda
@@ -79,9 +71,6 @@ int nvidia_frontend_mmap(struct file *, struct vm_area_struct *);
static struct file_operations nv_frontend_fops = {
.owner = THIS_MODULE,
.poll = nvidia_frontend_poll,
#if defined(NV_FILE_OPERATIONS_HAS_IOCTL)
.ioctl = nvidia_frontend_ioctl,
#endif
.unlocked_ioctl = nvidia_frontend_unlocked_ioctl,
#if NVCPU_IS_X86_64 || NVCPU_IS_AARCH64
.compat_ioctl = nvidia_frontend_compat_ioctl,

View File

@@ -168,6 +168,17 @@ static int nv_i2c_algo_smbus_xfer(
sizeof(data->block),
(NvU8 *)data->block);
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
(read_write == I2C_SMBUS_READ) ?
NV_I2C_CMD_BLOCK_READ :
NV_I2C_CMD_BLOCK_WRITE,
(NvU8)(addr & 0x7f), (NvU8)command,
(NvU8)data->block[0],
(NvU8 *)&data->block[1]);
break;
default:
rc = -EINVAL;
rmStatus = NV_ERR_INVALID_ARGUMENT;
@@ -195,7 +206,8 @@ static u32 nv_i2c_algo_functionality(struct i2c_adapter *adapter)
I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_BLOCK_DATA);
I2C_FUNC_SMBUS_BLOCK_DATA |
I2C_FUNC_SMBUS_I2C_BLOCK);
}
nv_kmem_cache_free_stack(sp);

View File

@@ -37,7 +37,6 @@
*/
const NvU32 P9_L1D_CACHE_DEFAULT_BLOCK_SIZE = 0x80;
#if defined(NV_OF_GET_PROPERTY_PRESENT)
static NvU32 nv_ibm_get_cpu_l1d_cache_block_size(void)
{
const __be32 *block_size_prop;
@@ -60,12 +59,6 @@ static NvU32 nv_ibm_get_cpu_l1d_cache_block_size(void)
return be32_to_cpu(*block_size_prop);
}
#else
static NvU32 nv_ibm_get_cpu_l1d_cache_block_size(void)
{
return P9_L1D_CACHE_DEFAULT_BLOCK_SIZE;
}
#endif
/*
* GPU device memory can be exposed to the kernel as NUMA node memory via the

View File

@@ -169,7 +169,6 @@ void nv_kthread_q_stop(nv_kthread_q_t *q)
//
// This function is never invoked when there is no NUMA preference (preferred
// node is NUMA_NO_NODE).
#if NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1
static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
nv_kthread_q_t *q,
int preferred_node,
@@ -217,7 +216,6 @@ static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
return thread[i];
}
#endif
int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferred_node)
{
@@ -231,11 +229,7 @@ int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferr
q->q_kthread = kthread_create(_main_loop, q, q_name);
}
else {
#if NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1
q->q_kthread = thread_create_on_node(_main_loop, q, preferred_node, q_name);
#else
return -ENOTSUPP;
#endif
}
if (IS_ERR(q->q_kthread)) {

View File

@@ -431,7 +431,7 @@ static int nvidia_mmap_numa(
const nv_alloc_mapping_context_t *mmap_context)
{
NvU64 start, addr;
unsigned int pages;
NvU64 pages;
NvU64 i;
pages = NV_VMA_SIZE(vma) >> PAGE_SHIFT;
@@ -680,13 +680,11 @@ int nvidia_mmap(
return -EINVAL;
}
down(&nvlfp->fops_sp_lock[NV_FOPS_STACK_INDEX_MMAP]);
sp = nvlfp->fops_sp[NV_FOPS_STACK_INDEX_MMAP];
sp = nv_nvlfp_get_sp(nvlfp, NV_FOPS_STACK_INDEX_MMAP);
status = nvidia_mmap_helper(nv, nvlfp, sp, vma, NULL);
up(&nvlfp->fops_sp_lock[NV_FOPS_STACK_INDEX_MMAP]);
nv_nvlfp_put_sp(nvlfp, NV_FOPS_STACK_INDEX_MMAP);
return status;
}

View File

@@ -36,7 +36,7 @@ void NV_API_CALL nv_init_msi(nv_state_t *nv)
nv->interrupt_line = nvl->pci_dev->irq;
nv->flags |= NV_FLAG_USES_MSI;
nvl->num_intr = 1;
NV_KMALLOC(nvl->irq_count, sizeof(nv_irq_count_info_t) * nvl->num_intr);
NV_KZALLOC(nvl->irq_count, sizeof(nv_irq_count_info_t) * nvl->num_intr);
if (nvl->irq_count == NULL)
{
@@ -47,7 +47,6 @@ void NV_API_CALL nv_init_msi(nv_state_t *nv)
}
else
{
memset(nvl->irq_count, 0, sizeof(nv_irq_count_info_t) * nvl->num_intr);
nvl->current_num_irq_tracked = 0;
}
}
@@ -100,7 +99,7 @@ void NV_API_CALL nv_init_msix(nv_state_t *nv)
msix_entries->entry = i;
}
NV_KMALLOC(nvl->irq_count, sizeof(nv_irq_count_info_t) * num_intr);
NV_KZALLOC(nvl->irq_count, sizeof(nv_irq_count_info_t) * num_intr);
if (nvl->irq_count == NULL)
{
@@ -109,7 +108,6 @@ void NV_API_CALL nv_init_msix(nv_state_t *nv)
}
else
{
memset(nvl->irq_count, 0, sizeof(nv_irq_count_info_t) * num_intr);
nvl->current_num_irq_tracked = 0;
}
rc = nv_pci_enable_msix(nvl, num_intr);

View File

@@ -0,0 +1,233 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include <linux/kernel.h> // For container_of
#include <linux/hrtimer.h>
#include <linux/ktime.h>
#include <linux/timer.h>
#include "os-interface.h"
#include "nv-linux.h"
#if !defined(NVCPU_PPC64LE)
#define NV_NANO_TIMER_USE_HRTIMER 1
#else
#define NV_NANO_TIMER_USE_HRTIMER 0
#endif // !defined(NVCPU_PPC64LE)
struct nv_nano_timer
{
#if NV_NANO_TIMER_USE_HRTIMER
struct hrtimer hr_timer; // This parameter holds linux high resolution timer object
// can get replaced with platform specific timer object
#else
struct timer_list jiffy_timer;
#endif
nv_linux_state_t *nv_linux_state;
void (*nv_nano_timer_callback)(struct nv_nano_timer *nv_nstimer);
void *pTmrEvent;
};
/*!
* @brief runs nano second resolution timer callback
*
* @param[in] nv_nstimer Pointer to nv_nano_timer_t object
*/
static void
nvidia_nano_timer_callback(
nv_nano_timer_t *nv_nstimer)
{
nv_state_t *nv = NULL;
nv_linux_state_t *nvl = nv_nstimer->nv_linux_state;
nvidia_stack_t *sp = NULL;
if (nv_kmem_cache_alloc_stack(&sp) != 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: no cache memory \n");
return;
}
nv = NV_STATE_PTR(nvl);
if (rm_run_nano_timer_callback(sp, nv, nv_nstimer->pTmrEvent) != NV_OK)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Error in service of callback \n");
}
nv_kmem_cache_free_stack(sp);
}
/*!
* @brief Allocates nano second resolution timer object
*
* @returns nv_nano_timer_t allocated pointer
*/
static nv_nano_timer_t *nv_alloc_nano_timer(void)
{
nv_nano_timer_t *nv_nstimer;
NV_KMALLOC(nv_nstimer, sizeof(nv_nano_timer_t));
if (nv_nstimer == NULL)
{
return NULL;
}
memset(nv_nstimer, 0, sizeof(nv_nano_timer_t));
return nv_nstimer;
}
#if NV_NANO_TIMER_USE_HRTIMER
static enum hrtimer_restart nv_nano_timer_callback_typed_data(struct hrtimer *hrtmr)
{
struct nv_nano_timer *nv_nstimer =
container_of(hrtmr, struct nv_nano_timer, hr_timer);
nv_nstimer->nv_nano_timer_callback(nv_nstimer);
return HRTIMER_NORESTART;
}
#else
static inline void nv_jiffy_timer_callback_typed_data(struct timer_list *timer)
{
struct nv_nano_timer *nv_nstimer =
container_of(timer, struct nv_nano_timer, jiffy_timer);
nv_nstimer->nv_nano_timer_callback(nv_nstimer);
}
static inline void nv_jiffy_timer_callback_anon_data(unsigned long arg)
{
struct nv_nano_timer *nv_nstimer = (struct nv_nano_timer *)arg;
nv_nstimer->nv_nano_timer_callback(nv_nstimer);
}
#endif
/*!
* @brief Creates & initializes nano second resolution timer object
*
* @param[in] nv Per gpu linux state
* @param[in] tmrEvent pointer to TMR_EVENT
* @param[in] nv_nstimer Pointer to nv_nano_timer_t object
*/
void NV_API_CALL nv_create_nano_timer(
nv_state_t *nv,
void *pTmrEvent,
nv_nano_timer_t **pnv_nstimer)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
nv_nano_timer_t *nv_nstimer = nv_alloc_nano_timer();
if (nv_nstimer == NULL)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Not able to create timer object \n");
*pnv_nstimer = NULL;
return;
}
nv_nstimer->nv_linux_state = nvl;
nv_nstimer->pTmrEvent = pTmrEvent;
nv_nstimer->nv_nano_timer_callback = nvidia_nano_timer_callback;
#if NV_NANO_TIMER_USE_HRTIMER
hrtimer_init(&nv_nstimer->hr_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
nv_nstimer->hr_timer.function = nv_nano_timer_callback_typed_data;
#else
#if defined(NV_TIMER_SETUP_PRESENT)
timer_setup(&nv_nstimer->jiffy_timer, nv_jiffy_timer_callback_typed_data, 0);
#else
init_timer(&nv_nstimer->jiffy_timer);
nv_nstimer->jiffy_timer.function = nv_jiffy_timer_callback_anon_data;
nv_nstimer->jiffy_timer.data = (unsigned long)nv_nstimer;
#endif // NV_TIMER_SETUP_PRESENT
#endif // NV_NANO_TIMER_USE_HRTIMER
*pnv_nstimer = nv_nstimer;
}
/*!
* @brief Starts nano second resolution timer
*
* @param[in] nv Per gpu linux state
* @param[in] nv_nstimer Pointer to nv_nano_timer_t object
* @param[in] timens time in nano seconds
*/
void NV_API_CALL nv_start_nano_timer(
nv_state_t *nv,
nv_nano_timer_t *nv_nstimer,
NvU64 time_ns)
{
#if NV_NANO_TIMER_USE_HRTIMER
ktime_t ktime = ktime_set(0, time_ns);
hrtimer_start(&nv_nstimer->hr_timer, ktime, HRTIMER_MODE_REL);
#else
unsigned long time_jiffies;
NvU32 time_us;
time_us = (NvU32)(time_ns / 1000);
if (time_us == 0)
{
nv_printf(NV_DBG_WARNINGS, "NVRM: Timer value cannot be less than 1 usec.\n");
}
time_jiffies = usecs_to_jiffies(time_us);
mod_timer(&nv_nstimer->jiffy_timer, jiffies + time_jiffies);
#endif
}
/*!
* @brief Cancels nano second resolution timer
*
* @param[in] nv Per gpu linux state
* @param[in] nv_nstimer Pointer to nv_nano_timer_t object
*/
void NV_API_CALL nv_cancel_nano_timer(
nv_state_t *nv,
nv_nano_timer_t *nv_nstimer)
{
#if NV_NANO_TIMER_USE_HRTIMER
hrtimer_cancel(&nv_nstimer->hr_timer);
#else
del_timer(&nv_nstimer->jiffy_timer);
#endif
}
/*!
* @brief Cancels & deletes nano second resolution timer object
*
* @param[in] nv Per gpu linux state
* @param[in] nv_nstimer Pointer to nv_nano_timer_t object
*/
void NV_API_CALL nv_destroy_nano_timer(
nv_state_t *nv,
nv_nano_timer_t *nv_nstimer)
{
nv_cancel_nano_timer(nv, nv_nstimer);
NV_KFREE(nv_nstimer, sizeof(nv_nano_timer_t));
}

View File

@@ -172,6 +172,8 @@ nv_pci_probe
NvBool prev_nv_ats_supported = nv_ats_supported;
NV_STATUS status;
NvBool last_bar_64bit = NV_FALSE;
NvU8 regs_bar_index = nv_bar_index_to_os_bar_index(pci_dev,
NV_GPU_BAR_INDEX_REGS);
nv_printf(NV_DBG_SETUP, "NVRM: probing 0x%x 0x%x, class 0x%x\n",
pci_dev->vendor, pci_dev->device, pci_dev->class);
@@ -350,28 +352,26 @@ next_bar:
goto failed;
}
if (!request_mem_region(NV_PCI_RESOURCE_START(pci_dev, NV_GPU_BAR_INDEX_REGS),
NV_PCI_RESOURCE_SIZE(pci_dev, NV_GPU_BAR_INDEX_REGS),
if (!request_mem_region(NV_PCI_RESOURCE_START(pci_dev, regs_bar_index),
NV_PCI_RESOURCE_SIZE(pci_dev, regs_bar_index),
nv_device_name))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: request_mem_region failed for %dM @ 0x%llx. This can\n"
"NVRM: occur when a driver such as rivatv is loaded and claims\n"
"NVRM: ownership of the device's registers.\n",
(NV_PCI_RESOURCE_SIZE(pci_dev, NV_GPU_BAR_INDEX_REGS) >> 20),
(NvU64)NV_PCI_RESOURCE_START(pci_dev, NV_GPU_BAR_INDEX_REGS));
(NV_PCI_RESOURCE_SIZE(pci_dev, regs_bar_index) >> 20),
(NvU64)NV_PCI_RESOURCE_START(pci_dev, regs_bar_index));
goto failed;
}
NV_KMALLOC(nvl, sizeof(nv_linux_state_t));
NV_KZALLOC(nvl, sizeof(nv_linux_state_t));
if (nvl == NULL)
{
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate memory\n");
goto err_not_supported;
}
os_mem_set(nvl, 0, sizeof(nv_linux_state_t));
nv = NV_STATE_PTR(nvl);
pci_set_drvdata(pci_dev, (void *)nvl);
@@ -498,9 +498,7 @@ next_bar:
if (nvidia_frontend_add_device((void *)&nv_fops, nvl) != 0)
goto err_remove_device;
#if defined(NV_PM_VT_SWITCH_REQUIRED_PRESENT)
pm_vt_switch_required(nvl->dev, NV_TRUE);
#endif
nv_init_dynamic_power_management(sp, pci_dev);
@@ -544,9 +542,7 @@ err_vgpu_kvm:
#endif
nv_procfs_remove_gpu(nvl);
rm_cleanup_dynamic_power_management(sp, nv);
#if defined(NV_PM_VT_SWITCH_REQUIRED_PRESENT)
pm_vt_switch_unregister(nvl->dev);
#endif
err_remove_device:
LOCK_NV_LINUX_DEVICES();
nv_linux_remove_device_locked(nvl);
@@ -561,8 +557,8 @@ err_not_supported:
{
NV_KFREE(nvl, sizeof(nv_linux_state_t));
}
release_mem_region(NV_PCI_RESOURCE_START(pci_dev, NV_GPU_BAR_INDEX_REGS),
NV_PCI_RESOURCE_SIZE(pci_dev, NV_GPU_BAR_INDEX_REGS));
release_mem_region(NV_PCI_RESOURCE_START(pci_dev, regs_bar_index),
NV_PCI_RESOURCE_SIZE(pci_dev, regs_bar_index));
NV_PCI_DISABLE_DEVICE(pci_dev);
pci_set_drvdata(pci_dev, NULL);
failed:
@@ -576,6 +572,8 @@ nv_pci_remove(struct pci_dev *pci_dev)
nv_linux_state_t *nvl = NULL;
nv_state_t *nv;
nvidia_stack_t *sp = NULL;
NvU8 regs_bar_index = nv_bar_index_to_os_bar_index(pci_dev,
NV_GPU_BAR_INDEX_REGS);
nv_printf(NV_DBG_SETUP, "NVRM: removing GPU %04x:%02x:%02x.%x\n",
NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev),
@@ -671,9 +669,7 @@ nv_pci_remove(struct pci_dev *pci_dev)
UNLOCK_NV_LINUX_DEVICES();
#if defined(NV_PM_VT_SWITCH_REQUIRED_PRESENT)
pm_vt_switch_unregister(&pci_dev->dev);
#endif
#if defined(NV_VGPU_KVM_BUILD)
/* Arg 2 == NV_TRUE means that the PCI device should be removed */
@@ -717,8 +713,8 @@ nv_pci_remove(struct pci_dev *pci_dev)
rm_i2c_remove_adapters(sp, nv);
rm_free_private_state(sp, nv);
release_mem_region(NV_PCI_RESOURCE_START(pci_dev, NV_GPU_BAR_INDEX_REGS),
NV_PCI_RESOURCE_SIZE(pci_dev, NV_GPU_BAR_INDEX_REGS));
release_mem_region(NV_PCI_RESOURCE_START(pci_dev, regs_bar_index),
NV_PCI_RESOURCE_SIZE(pci_dev, regs_bar_index));
num_nv_devices--;
@@ -751,6 +747,11 @@ nv_pci_shutdown(struct pci_dev *pci_dev)
return;
}
if (nvl != NULL)
{
nvl->nv_state.is_shutdown = NV_TRUE;
}
/* pci_clear_master is not defined for !CONFIG_PCI */
#ifdef CONFIG_PCI
pci_clear_master(pci_dev);
@@ -1000,6 +1001,10 @@ struct pci_driver nv_pci_driver = {
.probe = nv_pci_probe,
.remove = nv_pci_remove,
.shutdown = nv_pci_shutdown,
#if defined(NV_USE_VFIO_PCI_CORE) && \
defined(NV_PCI_DRIVER_HAS_DRIVER_MANAGED_DMA)
.driver_managed_dma = NV_TRUE,
#endif
#if defined(CONFIG_PM)
.driver.pm = &nv_pm_ops,
#endif

View File

@@ -1,47 +0,0 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#if defined(CONFIG_PROC_FS)
#include "nv-procfs-utils.h"
void
nv_procfs_unregister_all(struct proc_dir_entry *entry, struct proc_dir_entry *delimiter)
{
#if defined(NV_PROC_REMOVE_PRESENT)
proc_remove(entry);
#else
while (entry)
{
struct proc_dir_entry *next = entry->next;
if (entry->subdir)
nv_procfs_unregister_all(entry->subdir, delimiter);
remove_proc_entry(entry->name, entry->parent);
if (entry == delimiter)
break;
entry = next;
}
#endif
}
#endif

View File

@@ -201,8 +201,6 @@ nv_procfs_read_power(
const char *dynamic_power_status;
const char *gc6_support;
const char *gcoff_support;
NvU32 limitRated, limitCurr;
NV_STATUS status;
if (nv_kmem_cache_alloc_stack(&sp) != 0)
{
@@ -220,20 +218,7 @@ nv_procfs_read_power(
seq_printf(s, " Video Memory Self Refresh: %s\n", gc6_support);
gcoff_support = rm_get_gpu_gcx_support(sp, nv, NV_FALSE);
seq_printf(s, " Video Memory Off: %s\n\n", gcoff_support);
seq_printf(s, "Power Limits:\n");
status = rm_get_clientnvpcf_power_limits(sp, nv, &limitRated, &limitCurr);
if (status != NV_OK)
{
seq_printf(s, " Default: N/A milliwatts\n");
seq_printf(s, " GPU Boost: N/A milliwatts\n");
}
else
{
seq_printf(s, " Default: %u milliwatts\n", limitRated);
seq_printf(s, " GPU Boost: %u milliwatts\n", limitCurr);
}
seq_printf(s, " Video Memory Off: %s\n", gcoff_support);
nv_kmem_cache_free_stack(sp);
return 0;
@@ -288,13 +273,12 @@ nv_procfs_open_file(
nv_procfs_private_t *nvpp = NULL;
nvidia_stack_t *sp = NULL;
NV_KMALLOC(nvpp, sizeof(nv_procfs_private_t));
NV_KZALLOC(nvpp, sizeof(nv_procfs_private_t));
if (nvpp == NULL)
{
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate procfs private!\n");
return -ENOMEM;
}
memset(nvpp, 0, sizeof(*nvpp));
NV_INIT_MUTEX(&nvpp->sp_lock);
@@ -1384,7 +1368,7 @@ int nv_procfs_init(void)
return 0;
#if defined(CONFIG_PROC_FS)
failed:
nv_procfs_unregister_all(proc_nvidia, proc_nvidia);
proc_remove(proc_nvidia);
return -ENOMEM;
#endif
}
@@ -1392,7 +1376,7 @@ failed:
void nv_procfs_exit(void)
{
#if defined(CONFIG_PROC_FS)
nv_procfs_unregister_all(proc_nvidia, proc_nvidia);
proc_remove(proc_nvidia);
#endif
}
@@ -1463,7 +1447,7 @@ int nv_procfs_add_gpu(nv_linux_state_t *nvl)
failed:
if (proc_nvidia_gpu)
{
nv_procfs_unregister_all(proc_nvidia_gpu, proc_nvidia_gpu);
proc_remove(proc_nvidia_gpu);
}
return -1;
#endif
@@ -1472,6 +1456,6 @@ failed:
void nv_procfs_remove_gpu(nv_linux_state_t *nvl)
{
#if defined(CONFIG_PROC_FS)
nv_procfs_unregister_all(nvl->proc_dir, nvl->proc_dir);
proc_remove(nvl->proc_dir);
#endif
}

View File

@@ -295,8 +295,18 @@ static NV_STATUS nv_alloc_coherent_pages(
unsigned int gfp_mask;
unsigned long virt_addr = 0;
dma_addr_t bus_addr;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct device *dev = nvl->dev;
nv_linux_state_t *nvl;
struct device *dev;
if (!nv)
{
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: coherent page alloc on nvidiactl not supported\n", __FUNCTION__);
return NV_ERR_NOT_SUPPORTED;
}
nvl = NV_GET_NVL_FROM_NV_STATE(nv);
dev = nvl->dev;
gfp_mask = nv_compute_gfp_mask(nv, at);
@@ -691,29 +701,3 @@ void nv_vm_unmap_pages(
nv_vunmap(virt_addr, count);
}
void nv_address_space_init_once(struct address_space *mapping)
{
#if defined(NV_ADDRESS_SPACE_INIT_ONCE_PRESENT)
address_space_init_once(mapping);
#else
memset(mapping, 0, sizeof(*mapping));
INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
#if defined(NV_ADDRESS_SPACE_HAS_RWLOCK_TREE_LOCK)
//
// The .tree_lock member variable was changed from type rwlock_t, to
// spinlock_t, on 25 July 2008, by mainline commit
// 19fd6231279be3c3bdd02ed99f9b0eb195978064.
//
rwlock_init(&mapping->tree_lock);
#else
spin_lock_init(&mapping->tree_lock);
#endif
spin_lock_init(&mapping->i_mmap_lock);
INIT_LIST_HEAD(&mapping->private_list);
spin_lock_init(&mapping->private_lock);
INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
#endif /* !NV_ADDRESS_SPACE_INIT_ONCE_PRESENT */
}

View File

@@ -21,6 +21,14 @@
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/module.h> // for MODULE_FIRMWARE
// must precede "nv.h" and "nv-firmware.h" includes
#define NV_FIRMWARE_PATH_FOR_FILENAME(filename) "nvidia/" NV_VERSION_STRING "/" filename
#define NV_FIRMWARE_DECLARE_GSP_FILENAME(filename) \
MODULE_FIRMWARE(NV_FIRMWARE_PATH_FOR_FILENAME(filename));
#include "nv-firmware.h"
#include "nvmisc.h"
#include "os-interface.h"
#include "nv-linux.h"
@@ -90,11 +98,6 @@ const NvBool nv_is_rm_firmware_supported_os = NV_TRUE;
char *rm_firmware_active = NULL;
NV_MODULE_STRING_PARAMETER(rm_firmware_active);
#define NV_FIRMWARE_GSP_FILENAME "nvidia/" NV_VERSION_STRING "/gsp.bin"
#define NV_FIRMWARE_GSP_LOG_FILENAME "nvidia/" NV_VERSION_STRING "/gsp_log.bin"
MODULE_FIRMWARE(NV_FIRMWARE_GSP_FILENAME);
/*
* Global NVIDIA capability state, for GPU driver
*/
@@ -283,15 +286,13 @@ nv_alloc_t *nvos_create_alloc(
nv_alloc_t *at;
unsigned int pt_size, i;
NV_KMALLOC(at, sizeof(nv_alloc_t));
NV_KZALLOC(at, sizeof(nv_alloc_t));
if (at == NULL)
{
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate alloc info\n");
return NULL;
}
memset(at, 0, sizeof(nv_alloc_t));
at->dev = dev;
pt_size = num_pages * sizeof(nvidia_pte_t *);
if (os_alloc_mem((void **)&at->page_table, pt_size) != NV_OK)
@@ -885,16 +886,18 @@ static void *nv_alloc_file_private(void)
nv_linux_file_private_t *nvlfp;
unsigned int i;
NV_KMALLOC(nvlfp, sizeof(nv_linux_file_private_t));
NV_KZALLOC(nvlfp, sizeof(nv_linux_file_private_t));
if (!nvlfp)
return NULL;
memset(nvlfp, 0, sizeof(nv_linux_file_private_t));
for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i)
if (rm_is_altstack_in_use())
{
NV_INIT_MUTEX(&nvlfp->fops_sp_lock[i]);
for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i)
{
NV_INIT_MUTEX(&nvlfp->fops_sp_lock[i]);
}
}
init_waitqueue_head(&nvlfp->waitqueue);
NV_SPIN_LOCK_INIT(&nvlfp->fp_lock);
@@ -1167,7 +1170,7 @@ static int nv_start_device(nv_state_t *nv, nvidia_stack_t *sp)
goto failed;
}
if (nv_dev_is_pci(nvl->dev) && (nv->pci_info.device_id == 0))
if (dev_is_pci(nvl->dev) && (nv->pci_info.device_id == 0))
{
nv_printf(NV_DBG_ERRORS, "NVRM: open of non-existent GPU with minor number %d\n", nvl->minor_num);
rc = -ENXIO;
@@ -1210,7 +1213,7 @@ static int nv_start_device(nv_state_t *nv, nvidia_stack_t *sp)
}
#if defined(NV_LINUX_PCIE_MSI_SUPPORTED)
if (nv_dev_is_pci(nvl->dev))
if (dev_is_pci(nvl->dev))
{
if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE))
{
@@ -1354,7 +1357,7 @@ failed:
if(nvl->irq_count)
NV_KFREE(nvl->irq_count, nvl->num_intr * sizeof(nv_irq_count_info_t));
}
if (nv->flags & NV_FLAG_USES_MSIX)
else if (nv->flags & NV_FLAG_USES_MSIX)
{
nv->flags &= ~NV_FLAG_USES_MSIX;
pci_disable_msix(nvl->pci_dev);
@@ -1464,7 +1467,7 @@ static void nv_init_mapping_revocation(nv_linux_state_t *nvl,
down(&nvl->mmap_lock);
/* Set up struct address_space for use with unmap_mapping_range() */
nv_address_space_init_once(&nvlfp->mapping);
address_space_init_once(&nvlfp->mapping);
nvlfp->mapping.host = inode;
nvlfp->mapping.a_ops = inode->i_mapping->a_ops;
#if defined(NV_ADDRESS_SPACE_HAS_BACKING_DEV_INFO)
@@ -1976,7 +1979,7 @@ static int nvidia_read_card_info(nv_ioctl_card_info_t *ci, size_t num_entries)
ci[i].reg_address = nv->regs->cpu_address;
ci[i].reg_size = nv->regs->size;
ci[i].minor_number = nvl->minor_num;
if (nv_dev_is_pci(nvl->dev))
if (dev_is_pci(nvl->dev))
{
ci[i].fb_address = nv->fb->cpu_address;
ci[i].fb_size = nv->fb->size;
@@ -2015,8 +2018,7 @@ nvidia_ioctl(
if (status < 0)
return status;
down(&nvlfp->fops_sp_lock[NV_FOPS_STACK_INDEX_IOCTL]);
sp = nvlfp->fops_sp[NV_FOPS_STACK_INDEX_IOCTL];
sp = nv_nvlfp_get_sp(nvlfp, NV_FOPS_STACK_INDEX_IOCTL);
rmStatus = nv_check_gpu_state(nv);
if (rmStatus == NV_ERR_GPU_IS_LOST)
@@ -2327,7 +2329,7 @@ unlock:
}
done:
up(&nvlfp->fops_sp_lock[NV_FOPS_STACK_INDEX_IOCTL]);
nv_nvlfp_put_sp(nvlfp, NV_FOPS_STACK_INDEX_IOCTL);
up_read(&nv_system_pm_lock);
@@ -2392,7 +2394,7 @@ nvidia_isr(
NvU64 currentTime = 0;
NvBool found_irq = NV_FALSE;
rm_gpu_copy_mmu_faults_unlocked(nvl->sp[NV_DEV_STACK_ISR], nv, &rm_serviceable_fault_cnt);
rm_gpu_handle_mmu_faults(nvl->sp[NV_DEV_STACK_ISR], nv, &rm_serviceable_fault_cnt);
rm_fault_handling_needed = (rm_serviceable_fault_cnt != 0);
#if defined (NV_UVM_ENABLE)
@@ -3546,23 +3548,10 @@ NvBool NV_API_CALL nv_is_rm_firmware_active(
return NV_FALSE;
}
const char *nv_firmware_path(
nv_firmware_t fw_type
)
{
switch (fw_type)
{
case NV_FIRMWARE_GSP:
return NV_FIRMWARE_GSP_FILENAME;
case NV_FIRMWARE_GSP_LOG:
return NV_FIRMWARE_GSP_LOG_FILENAME;
}
return "";
}
const void* NV_API_CALL nv_get_firmware(
nv_state_t *nv,
nv_firmware_t fw_type,
nv_firmware_type_t fw_type,
nv_firmware_chip_family_t fw_chip_family,
const void **fw_buf,
NvU32 *fw_size
)
@@ -3572,7 +3561,7 @@ const void* NV_API_CALL nv_get_firmware(
// path is relative to /lib/firmware
// if this fails it will print an error to dmesg
if (request_firmware(&fw, nv_firmware_path(fw_type), nvl->dev) != 0)
if (request_firmware(&fw, nv_firmware_path(fw_type, fw_chip_family), nvl->dev) != 0)
return NULL;
*fw_size = fw->size;
@@ -3972,7 +3961,7 @@ nvidia_suspend(
nv_linux_state_t *nvl;
nv_state_t *nv;
if (nv_dev_is_pci(dev))
if (dev_is_pci(dev))
{
pci_dev = to_pci_dev(dev);
nvl = pci_get_drvdata(pci_dev);
@@ -4050,7 +4039,7 @@ nvidia_resume(
nv_linux_state_t *nvl;
nv_state_t *nv;
if (nv_dev_is_pci(dev))
if (dev_is_pci(dev))
{
pci_dev = to_pci_dev(dev);
nvl = pci_get_drvdata(pci_dev);
@@ -4904,7 +4893,7 @@ NV_STATUS NV_API_CALL nv_get_nvlink_line_rate(
NvU32 *linerate
)
{
#if defined(NV_PNV_PCI_GET_NPU_DEV_PRESENT) && defined(NV_OF_GET_PROPERTY_PRESENT)
#if defined(NV_PNV_PCI_GET_NPU_DEV_PRESENT)
nv_linux_state_t *nvl;
struct pci_dev *npuDev;
@@ -5141,7 +5130,7 @@ void NV_API_CALL nv_audio_dynamic_power(
struct pci_dev *audio_pci_dev, *pci_dev;
struct snd_card *card;
if (!nv_dev_is_pci(dev))
if (!dev_is_pci(dev))
return;
pci_dev = to_pci_dev(dev);
@@ -5291,45 +5280,19 @@ static int nv_match_dev_state(const void *data, struct file *filp, unsigned fd)
return (data == nvl);
}
NvBool NV_API_CALL nv_match_gpu_os_info(nv_state_t *nv, void *os_info)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
return nv_match_dev_state(nvl, os_info, -1);
}
NvBool NV_API_CALL nv_is_gpu_accessible(nv_state_t *nv)
{
struct files_struct *files = current->files;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
#ifdef NV_ITERATE_FD_PRESENT
return !!iterate_fd(files, 0, nv_match_dev_state, nvl);
#else
struct fdtable *fdtable;
int ret_val = 0;
int fd = 0;
if (files == NULL)
return 0;
spin_lock(&files->file_lock);
for (fdtable = files_fdtable(files); fd < fdtable->max_fds; fd++)
{
struct file *filp;
#ifdef READ_ONCE
filp = READ_ONCE(fdtable->fd[fd]);
#else
filp = ACCESS_ONCE(fdtable->fd[fd]);
smp_read_barrier_depends();
#endif
if (filp == NULL)
continue;
ret_val = nv_match_dev_state(nvl, filp, fd);
if (ret_val)
break;
}
spin_unlock(&files->file_lock);
return !!ret_val;
#endif
}
NvBool NV_API_CALL nv_platform_supports_s0ix(void)

View File

@@ -4,6 +4,7 @@ NVIDIA_SOURCES_CXX ?=
NVIDIA_SOURCES += nvidia/nv.c
NVIDIA_SOURCES += nvidia/nv-pci.c
NVIDIA_SOURCES += nvidia/nv-dmabuf.c
NVIDIA_SOURCES += nvidia/nv-nano-timer.c
NVIDIA_SOURCES += nvidia/nv-acpi.c
NVIDIA_SOURCES += nvidia/nv-cray.c
NVIDIA_SOURCES += nvidia/nv-dma.c
@@ -12,7 +13,6 @@ NVIDIA_SOURCES += nvidia/nv-mmap.c
NVIDIA_SOURCES += nvidia/nv-p2p.c
NVIDIA_SOURCES += nvidia/nv-pat.c
NVIDIA_SOURCES += nvidia/nv-procfs.c
NVIDIA_SOURCES += nvidia/nv-procfs-utils.c
NVIDIA_SOURCES += nvidia/nv-usermap.c
NVIDIA_SOURCES += nvidia/nv-vm.c
NVIDIA_SOURCES += nvidia/nv-vtophys.c

View File

@@ -120,27 +120,21 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_array_uc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_array_uc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_cache
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_wc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += sg_alloc_table
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_get_domain_bus_and_slot
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_num_physpages
NV_CONFTEST_FUNCTION_COMPILE_TESTS += efi_enabled
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pde_data
NV_CONFTEST_FUNCTION_COMPILE_TESTS += proc_remove
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pm_vt_switch_required
NV_CONFTEST_FUNCTION_COMPILE_TESTS += xen_ioemu_inject_msi
NV_CONFTEST_FUNCTION_COMPILE_TESTS += phys_to_dma
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_dma_ops
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_attr_macros
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_map_page_attrs
NV_CONFTEST_FUNCTION_COMPILE_TESTS += write_cr4
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_get_property
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_find_node_by_phandle
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_node_to_nid
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pnv_pci_get_npu_dev
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_get_ibm_chip_id
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_bus_address
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_stop_and_remove_bus_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_remove_bus_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += register_cpu_notifier
NV_CONFTEST_FUNCTION_COMPILE_TESTS += cpuhp_setup_state
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_map_resource
@@ -148,10 +142,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_backlight_device_by_name
NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_enable_msix_range
NV_CONFTEST_FUNCTION_COMPILE_TESTS += kernel_read_has_pointer_pos_arg
NV_CONFTEST_FUNCTION_COMPILE_TESTS += kernel_write
NV_CONFTEST_FUNCTION_COMPILE_TESTS += kthread_create_on_node
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_find_matching_node
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dev_is_pci
NV_CONFTEST_FUNCTION_COMPILE_TESTS += kernel_write_has_pointer_pos_arg
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_direct_map_resource
NV_CONFTEST_FUNCTION_COMPILE_TESTS += tegra_get_platform
NV_CONFTEST_FUNCTION_COMPILE_TESTS += tegra_bpmp_send_receive
@@ -161,18 +152,14 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += jiffies_to_timespec
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_real_ts64
NV_CONFTEST_FUNCTION_COMPILE_TESTS += full_name_hash
NV_CONFTEST_FUNCTION_COMPILE_TESTS += hlist_for_each_entry
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_enable_atomic_ops_to_root
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vga_tryget
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pgprot_decrypted
NV_CONFTEST_FUNCTION_COMPILE_TESTS += cc_mkdec
NV_CONFTEST_FUNCTION_COMPILE_TESTS += iterate_fd
NV_CONFTEST_FUNCTION_COMPILE_TESTS += seq_read_iter
NV_CONFTEST_FUNCTION_COMPILE_TESTS += sg_page_iter_page
NV_CONFTEST_FUNCTION_COMPILE_TESTS += unsafe_follow_pfn
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_close_on_exec
NV_CONFTEST_FUNCTION_COMPILE_TESTS += add_memory_driver_managed
NV_CONFTEST_FUNCTION_COMPILE_TESTS += device_property_read_u64
NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_of_platform_populate
@@ -199,7 +186,9 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_map_atomic
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_has_dynamic_attachment
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_attachment_has_peer2peer
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_set_mask_and_coherent
NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_clk_bulk_get_all
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_task_ioprio
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mdev_set_iommu_device
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_of_node_to_nid
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_sme_active
@@ -221,20 +210,14 @@ NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dram_types
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_pxm_to_node
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_screen_info
NV_CONFTEST_TYPE_COMPILE_TESTS += file_operations
NV_CONFTEST_TYPE_COMPILE_TESTS += kuid_t
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += swiotlb_dma_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += noncoherent_swiotlb_dma_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_has_address
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_insert_pfn_prot
NV_CONFTEST_TYPE_COMPILE_TESTS += vmf_insert_pfn_prot
NV_CONFTEST_TYPE_COMPILE_TESTS += address_space_init_once
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += vmbus_channel_has_ringbuffer_page
NV_CONFTEST_TYPE_COMPILE_TESTS += device_driver_of_match_table
NV_CONFTEST_TYPE_COMPILE_TESTS += device_of_node
NV_CONFTEST_TYPE_COMPILE_TESTS += node_states_n_memory
NV_CONFTEST_TYPE_COMPILE_TESTS += kmem_cache_has_kobj_remove_work
NV_CONFTEST_TYPE_COMPILE_TESTS += sysfs_slab_unlink
NV_CONFTEST_TYPE_COMPILE_TESTS += proc_ops
@@ -243,10 +226,10 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += vmalloc_has_pgprot_t_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
NV_CONFTEST_TYPE_COMPILE_TESTS += pci_channel_state
NV_CONFTEST_TYPE_COMPILE_TESTS += pci_dev_has_ats_enabled
NV_CONFTEST_TYPE_COMPILE_TESTS += mt_device_gre
NV_CONFTEST_TYPE_COMPILE_TESTS += remove_memory_has_nid_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += add_memory_driver_managed_has_mhp_flags_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += num_registered_fb
NV_CONFTEST_TYPE_COMPILE_TESTS += pci_driver_has_driver_managed_dma
NV_CONFTEST_GENERIC_COMPILE_TESTS += dom0_kernel_present
NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_vgpu_kvm_build
@@ -254,7 +237,10 @@ NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_grid_build
NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_grid_csp_build
NV_CONFTEST_GENERIC_COMPILE_TESTS += get_user_pages
NV_CONFTEST_GENERIC_COMPILE_TESTS += get_user_pages_remote
NV_CONFTEST_GENERIC_COMPILE_TESTS += pin_user_pages
NV_CONFTEST_GENERIC_COMPILE_TESTS += pin_user_pages_remote
NV_CONFTEST_GENERIC_COMPILE_TESTS += pm_runtime_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += vm_fault_t
NV_CONFTEST_GENERIC_COMPILE_TESTS += pci_class_multimedia_hd_audio
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += vfio_pci_core_available

View File

@@ -1,66 +0,0 @@
/*******************************************************************************
Copyright (c) 2022 NVidia Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
#ifndef NVLINK_INBAND_DRV_HDR_H
#define NVLINK_INBAND_DRV_HDR_H
/*
* This header file defines the header that should be used by RM and NVSwitch
* driver to sync minions on both the sides before an actual inband message
* transfer is initiated.
*
* Modifying the existing header structure is not allowed. A versioning
* policy must be enforced if such changes are needed in the future.
*
* - Avoid use of enums or bit fields. Always use fixed types.
* - Avoid conditional fields in the structs
* - Avoid nested and complex structs. Keep them simple and flat for ease of
* encoding and decoding.
* - Avoid embedded pointers. Flexible arrays at the end of the struct are allowed.
* - Always use the packed struct to typecast inband messages. More details:
* - Always have reserved flags or fields to CYA given the stable ABI conditions.
*/
/* Align to byte boundaries */
#pragma pack(push, 1)
#include "nvtypes.h"
#define NVLINK_INBAND_MAX_XFER_SIZE 0x100
#define NVLINK_INBAND_MAX_XFER_AT_ONCE 4
#define NVLINK_INBAND_DRV_HDR_TYPE_START NVBIT(0)
#define NVLINK_INBAND_DRV_HDR_TYPE_MID NVBIT(1)
#define NVLINK_INBAND_DRV_HDR_TYPE_END NVBIT(2)
/* Rest of the bits are reserved for future use and must be always set zero. */
typedef struct
{
NvU8 data;
} nvlink_inband_drv_hdr_t;
#pragma pack(pop)
/* Don't add any code after this line */
#endif

View File

@@ -1,166 +0,0 @@
/*******************************************************************************
Copyright (c) 2022 NVidia Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
#ifndef NVLINK_INBAND_MSG_HDR_H
#define NVLINK_INBAND_MSG_HDR_H
/*
* Messages do not have individual versioning, instead a strict ABI is maintained. When a change is
* required on existing message, instead of modifying corresponding message structure, a completely
* new message type (like INBAND_MSG_TYPE_XXX_V1, INBAND_MSG_TYPE_XXX_V2) and corresponding message
* definition structure needs to be added. Do not modify existing structs in any way.
*
* Messages may contain fields which are debug only and must be used for logging purpose. Such
* fields shouldn't be trusted.
*
* - Avoid use of enums or bitfields. Always use fixed types.
* - Avoid conditional fields in the structs.
* - Avoid nested and complex structs. Keep them simple and flat for ease of encoding and decoding.
* - Avoid embedded pointers. Flexible arrays at the end of the struct are allowed.
* - Always use the packed struct to typecast inband messages. More details:
* - Always have reserved flags or fields to CYA given the stable ABI conditions.
*/
/* Align to byte boundaries */
#pragma pack(push, 1)
#include "nvtypes.h"
#include "nvmisc.h"
#include "nvCpuUuid.h"
#include "nvstatus.h"
#include "nvstatuscodes.h"
#define NVLINK_INBAND_MAX_MSG_SIZE 4096
#define NVLINK_INBAND_MSG_MAGIC_ID_FM 0xadbc
/* Nvlink Inband messages types */
#define NVLINK_INBAND_MSG_TYPE_GPU_PROBE_REQ 0
#define NVLINK_INBAND_MSG_TYPE_GPU_PROBE_RSP 1
#define NVLINK_INBAND_MSG_TYPE_MC_TEAM_SETUP_REQ 2
#define NVLINK_INBAND_MSG_TYPE_MC_TEAM_SETUP_RSP 3
#define NVLINK_INBAND_MSG_TYPE_MC_TEAM_RELEASE_REQ 4
/* Nvlink Inband message packet header */
typedef struct
{
NvU16 magicId; /* Identifier to represent in-band msg, will be NVLINK_INBAND_MSG_MAGIC_ID */
NvU64 requestId; /* Unique Id for a request and response will carry same id */
NV_STATUS status; /* High level status of the message/request */
NvU16 type; /* Type of encoded message. One of NVLINK_INBAND_MSG_TYPE_xxx */
NvU32 length; /* Length of encoded message */
NvU8 reserved[8]; /* For future use. Must be initialized to zero */
} nvlink_inband_msg_header_t;
#define NVLINK_INBAND_GPU_PROBE_CAPS_SRIOV_ENABLED NVBIT(0)
/* Add more caps as need in the future */
typedef struct
{
NvU32 pciInfo; /* Encoded as Bus:Device:Function.(debug only) */
NvU8 moduleId; /* GPIO based physical/module ID of the GPU. (debug only) */
NvUuid uuid; /* UUID of the GPU. (debug only) */
NvU64 discoveredLinkMask; /* GPU's discovered NVLink mask info. (debug only) */
NvU64 enabledLinkMask; /* GPU's currently enabled NvLink mask info. (debug only) */
NvU32 gpuCapMask; /* GPU capabilities, one of NVLINK_INBAND_GPU_PROBE_CAPS */
NvU8 reserved[16]; /* For future use. Must be initialized to zero */
} nvlink_inband_gpu_probe_req_t;
typedef struct
{
nvlink_inband_msg_header_t msgHdr;
nvlink_inband_gpu_probe_req_t probeReq;
} nvlink_inband_gpu_probe_req_msg_t;
#define NVLINK_INBAND_FM_CAPS_MC_TEAM_SETUP_V1 NVBIT64(0)
#define NVLINK_INBAND_FM_CAPS_MC_TEAM_RELEASE_V1 NVBIT64(1)
typedef struct
{
NvU64 gpuHandle; /* Unique handle assigned by initialization entity for this GPU */
NvU32 gfId; /* GFID which supports NVLink */
NvU64 fmCaps; /* Capability of FM e.g. what features FM support. */
NvU16 nodeId; /* Node ID of the system where this GPU belongs */
NvU16 fabricPartitionId; /* Partition ID if the GPU belongs to a fabric partition */
NvU16 clusterId; /* Cluster ID to which this node belongs */
NvU64 gpaAddress; /* GPA starting address for the GPU */
NvU64 gpaAddressRange; /* GPU GPA address range */
NvU64 flaAddress; /* FLA starting address for the GPU */
NvU64 flaAddressRange; /* GPU FLA address range */
NvU8 reserved[32]; /* For future use. Must be initialized to zero */
} nvlink_inband_gpu_probe_rsp_t;
typedef struct
{
nvlink_inband_msg_header_t msgHdr;
nvlink_inband_gpu_probe_rsp_t probeRsp;
} nvlink_inband_gpu_probe_rsp_msg_t;
typedef struct
{
NvU64 mcAllocSize; /* Multicast allocation size requested */
NvU32 flags; /* For future use. Must be initialized to zero */
NvU8 reserved[8]; /* For future use. Must be initialized to zero */
NvU16 numGpuHandles; /* Number of GPUs in this team */
NvU64 gpuHandles[]; /* Array of probed handles, should be last */
} nvlink_inband_mc_team_setup_req_t;
typedef struct
{
nvlink_inband_msg_header_t msgHdr;
nvlink_inband_mc_team_setup_req_t mcTeamSetupReq;
} nvlink_inband_mc_team_setup_req_msg_t;
typedef struct
{
NvU64 mcTeamHandle; /* Unique handle assigned for this Multicast team */
NvU32 flags; /* For future use. Must be initialized to zero */
NvU8 reserved[8]; /* For future use. Must be initialized to zero */
NvU64 mcAddressBase; /* FLA starting address assigned for the Multicast slot */
NvU64 mcAddressSize; /* Size of FLA assigned to the Multicast slot */
} nvlink_inband_mc_team_setup_rsp_t;
typedef struct
{
nvlink_inband_msg_header_t msgHdr;
nvlink_inband_mc_team_setup_rsp_t mcTeamSetupRsp;
} nvlink_inband_mc_team_setup_rsp_msg_t;
typedef struct
{
NvU64 mcTeamHandle; /* Unique handle assigned for the Multicast team */
NvU32 flags; /* For future use. Must be initialized to zero */
NvU8 reserved[8]; /* For future use. Must be initialized to zero */
} nvlink_inband_mc_team_release_req_t;
typedef struct
{
nvlink_inband_msg_header_t msgHdr;
nvlink_inband_mc_team_release_req_t mcTeamReleaseReq;
} nvlink_inband_mc_team_release_req_msg_t;
#pragma pack(pop)
/* Don't add any code after this line */
#endif

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -101,7 +101,7 @@ static void nvlink_permissions_exit(void)
return;
}
NV_REMOVE_PROC_ENTRY(nvlink_permissions);
proc_remove(nvlink_permissions);
nvlink_permissions = NULL;
}
@@ -133,7 +133,7 @@ static void nvlink_procfs_exit(void)
return;
}
NV_REMOVE_PROC_ENTRY(nvlink_procfs_dir);
proc_remove(nvlink_procfs_dir);
nvlink_procfs_dir = NULL;
}
@@ -207,8 +207,6 @@ static int nvlink_fops_release(struct inode *inode, struct file *filp)
nvlink_print(NVLINK_DBG_INFO, "nvlink driver close\n");
WARN_ON(private == NULL);
mutex_lock(&nvlink_drvctx.lock);
if (private->capability_fds.fabric_mgmt > 0)
@@ -306,9 +304,6 @@ static const struct file_operations nvlink_fops = {
.owner = THIS_MODULE,
.open = nvlink_fops_open,
.release = nvlink_fops_release,
#if defined(NV_FILE_OPERATIONS_HAS_IOCTL)
.ioctl = nvlink_fops_ioctl,
#endif
.unlocked_ioctl = nvlink_fops_unlocked_ioctl,
};

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -233,6 +233,90 @@ NV_STATUS NV_API_CALL os_release_semaphore
return NV_OK;
}
typedef struct rw_semaphore os_rwlock_t;
void* NV_API_CALL os_alloc_rwlock(void)
{
os_rwlock_t *os_rwlock = NULL;
NV_STATUS rmStatus = os_alloc_mem((void *)&os_rwlock, sizeof(os_rwlock_t));
if (rmStatus != NV_OK)
{
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate rw_semaphore!\n");
return NULL;
}
init_rwsem(os_rwlock);
return os_rwlock;
}
void NV_API_CALL os_free_rwlock(void *pRwLock)
{
os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock;
os_free_mem(os_rwlock);
}
NV_STATUS NV_API_CALL os_acquire_rwlock_read(void *pRwLock)
{
os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock;
if (!NV_MAY_SLEEP())
{
return NV_ERR_INVALID_REQUEST;
}
down_read(os_rwlock);
return NV_OK;
}
NV_STATUS NV_API_CALL os_acquire_rwlock_write(void *pRwLock)
{
os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock;
if (!NV_MAY_SLEEP())
{
return NV_ERR_INVALID_REQUEST;
}
down_write(os_rwlock);
return NV_OK;
}
NV_STATUS NV_API_CALL os_cond_acquire_rwlock_read(void *pRwLock)
{
os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock;
if (down_read_trylock(os_rwlock))
{
return NV_ERR_TIMEOUT_RETRY;
}
return NV_OK;
}
NV_STATUS NV_API_CALL os_cond_acquire_rwlock_write(void *pRwLock)
{
os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock;
if (down_write_trylock(os_rwlock))
{
return NV_ERR_TIMEOUT_RETRY;
}
return NV_OK;
}
void NV_API_CALL os_release_rwlock_read(void *pRwLock)
{
os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock;
up_read(os_rwlock);
}
void NV_API_CALL os_release_rwlock_write(void *pRwLock)
{
os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock;
up_write(os_rwlock);
}
NvBool NV_API_CALL os_semaphore_may_sleep(void)
{
return NV_MAY_SLEEP();
@@ -473,6 +557,7 @@ NV_STATUS NV_API_CALL os_alloc_mem(
NvU64 size
)
{
NvU64 original_size = size;
unsigned long alloc_size;
if (address == NULL)
@@ -481,6 +566,10 @@ NV_STATUS NV_API_CALL os_alloc_mem(
*address = NULL;
NV_MEM_TRACKING_PAD_SIZE(size);
// check for integer overflow on size
if (size < original_size)
return NV_ERR_INVALID_ARGUMENT;
//
// NV_KMALLOC, nv_vmalloc take an input of 4 bytes in x86. To avoid
// truncation and wrong allocation, below check is required.
@@ -515,7 +604,7 @@ NV_STATUS NV_API_CALL os_alloc_mem(
void NV_API_CALL os_free_mem(void *address)
{
NvU32 size;
NvU64 size;
NV_MEM_TRACKING_RETRIEVE_SIZE(address, size);
@@ -1100,7 +1189,7 @@ NvBool NV_API_CALL os_pat_supported(void)
NvBool NV_API_CALL os_is_efi_enabled(void)
{
return NV_EFI_ENABLED();
return efi_enabled(EFI_BOOT);
}
void NV_API_CALL os_get_screen_info(
@@ -1760,7 +1849,6 @@ NV_STATUS NV_API_CALL os_write_file
NvU64 offset
)
{
#if defined(NV_KERNEL_WRITE_PRESENT)
loff_t f_pos = offset;
ssize_t num_written;
int num_retries = NV_MAX_NUM_FILE_IO_RETRIES;
@@ -1791,9 +1879,6 @@ retry:
}
return NV_OK;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
NV_STATUS NV_API_CALL os_read_file

View File

@@ -222,7 +222,7 @@ NV_STATUS NV_API_CALL os_lock_user_pages(
struct mm_struct *mm = current->mm;
struct page **user_pages;
NvU64 i, pinned;
NvBool write = DRF_VAL(_LOCK_USER_PAGES, _FLAGS, _WRITE, flags), force = 0;
unsigned int gup_flags = DRF_VAL(_LOCK_USER_PAGES, _FLAGS, _WRITE, flags) ? FOLL_WRITE : 0;
int ret;
if (!NV_MAY_SLEEP())
@@ -242,8 +242,8 @@ NV_STATUS NV_API_CALL os_lock_user_pages(
}
nv_mmap_read_lock(mm);
ret = NV_GET_USER_PAGES((unsigned long)address,
page_count, write, force, user_pages, NULL);
ret = NV_PIN_USER_PAGES((unsigned long)address,
page_count, gup_flags, user_pages, NULL);
nv_mmap_read_unlock(mm);
pinned = ret;
@@ -255,7 +255,7 @@ NV_STATUS NV_API_CALL os_lock_user_pages(
else if (pinned < page_count)
{
for (i = 0; i < pinned; i++)
put_page(user_pages[i]);
NV_UNPIN_USER_PAGE(user_pages[i]);
os_free_mem(user_pages);
return NV_ERR_INVALID_ADDRESS;
}
@@ -278,7 +278,7 @@ NV_STATUS NV_API_CALL os_unlock_user_pages(
{
if (write)
set_page_dirty_lock(user_pages[i]);
put_page(user_pages[i]);
NV_UNPIN_USER_PAGE(user_pages[i]);
}
os_free_mem(user_pages);

View File

@@ -100,7 +100,7 @@ nvswitch_procfs_device_remove
return;
}
nv_procfs_unregister_all(nvswitch_dev->procfs_dir, nvswitch_dev->procfs_dir);
proc_remove(nvswitch_dev->procfs_dir);
nvswitch_dev->procfs_dir = NULL;
}
@@ -155,7 +155,7 @@ nvswitch_procfs_exit
return;
}
nv_procfs_unregister_all(nvswitch_procfs_dir, nvswitch_procfs_dir);
proc_remove(nvswitch_procfs_dir);
nvswitch_procfs_dir = NULL;
}