520.61.05

This commit is contained in:
Andy Ritger
2022-10-10 14:59:24 -07:00
parent fe0728787f
commit 90eb10774f
758 changed files with 88383 additions and 26493 deletions

View File

@@ -806,13 +806,14 @@ NV_STATUS NV_API_CALL nv_dma_map_peer
(
nv_dma_device_t *dma_dev,
nv_dma_device_t *peer_dma_dev,
NvU8 bar_index,
NvU8 nv_bar_index,
NvU64 page_count,
NvU64 *va
)
{
struct pci_dev *peer_pci_dev = to_pci_dev(peer_dma_dev->dev);
struct resource *res;
NvU8 bar_index;
NV_STATUS status;
if (peer_pci_dev == NULL)
@@ -822,7 +823,7 @@ NV_STATUS NV_API_CALL nv_dma_map_peer
return NV_ERR_INVALID_REQUEST;
}
BUG_ON(bar_index >= NV_GPU_NUM_BARS);
bar_index = nv_bar_index_to_os_bar_index(peer_pci_dev, nv_bar_index);
res = &peer_pci_dev->resource[bar_index];
if (res->start == 0)
{
@@ -1089,187 +1090,6 @@ void NV_API_CALL nv_dma_release_sgt
#endif /* NV_LINUX_DMA_BUF_H_PRESENT && NV_DRM_AVAILABLE && NV_DRM_DRM_GEM_H_PRESENT */
#if defined(NV_LINUX_DMA_BUF_H_PRESENT)
#endif /* NV_LINUX_DMA_BUF_H_PRESENT */
#ifndef IMPORT_DMABUF_FUNCTIONS_DEFINED

View File

@@ -23,8 +23,6 @@
#include <linux/dma-buf.h>
#include "nv-dmabuf.h"
#if defined(CONFIG_DMA_SHARED_BUFFER)
typedef struct nv_dma_buf_mem_handle
{
@@ -796,7 +794,6 @@ nv_dma_buf_reuse(
return NV_ERR_OPERATING_SYSTEM;
}
if (buf->ops != &nv_dma_buf_ops)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Invalid dma-buf fd\n");
@@ -804,7 +801,6 @@ nv_dma_buf_reuse(
goto cleanup_dmabuf;
}
priv = buf->priv;
if (priv == NULL)
@@ -820,13 +816,8 @@ nv_dma_buf_reuse(
goto cleanup_dmabuf;
}
if (params->index > (priv->total_objects - params->numObjects))
{
status = NV_ERR_INVALID_ARGUMENT;
goto unlock_priv;
}
@@ -900,15 +891,3 @@ nv_dma_buf_export(
#endif // CONFIG_DMA_SHARED_BUFFER
}

View File

@@ -27,11 +27,7 @@
#include "nv-frontend.h"
#if defined(MODULE_LICENSE)
MODULE_LICENSE("Dual MIT/GPL");
#endif
#if defined(MODULE_INFO)
MODULE_INFO(supported, "external");
@@ -50,14 +46,12 @@ MODULE_ALIAS_CHARDEV_MAJOR(NV_MAJOR_DEVICE_NUMBER);
*/
#if defined(MODULE_IMPORT_NS)
/*
* DMA_BUF namespace is added by commit id 16b0314aa746
* ("dma-buf: move dma-buf symbols into the DMA_BUF module namespace") in 5.16
*/
MODULE_IMPORT_NS(DMA_BUF);
#endif
static NvU32 nv_num_instances;

View File

@@ -140,8 +140,9 @@ static int nv_i2c_algo_smbus_xfer(
case I2C_SMBUS_WORD_DATA:
if (read_write != I2C_SMBUS_READ)
{
data->block[1] = (data->word & 0xff);
data->block[2] = (data->word >> 8);
u16 word = data->word;
data->block[1] = (word & 0xff);
data->block[2] = (word >> 8);
}
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
@@ -273,246 +274,6 @@ void NV_API_CALL nv_i2c_del_adapter(nv_state_t *nv, void *data)
}
}
#else // defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
void NV_API_CALL nv_i2c_del_adapter(nv_state_t *nv, void *data)
@@ -524,29 +285,4 @@ void* NV_API_CALL nv_i2c_add_adapter(nv_state_t *nv, NvU32 port)
return NULL;
}
#endif // defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)

View File

@@ -132,13 +132,6 @@ nvidia_vma_access(
pageIndex = ((addr - vma->vm_start) >> PAGE_SHIFT);
pageOffset = (addr & ~PAGE_MASK);
if (!mmap_context->valid)
{
nv_printf(NV_DBG_ERRORS, "NVRM: VM: invalid mmap context\n");
@@ -326,6 +319,7 @@ int nv_encode_caching(
break;
#if defined(NV_PGPROT_WRITE_COMBINED) && \
defined(NV_PGPROT_WRITE_COMBINED_DEVICE)
case NV_MEMORY_DEFAULT:
case NV_MEMORY_WRITECOMBINED:
if (NV_ALLOW_WRITE_COMBINING(memory_type))
{
@@ -516,13 +510,6 @@ int nvidia_mmap_helper(
NvU64 access_start = mmap_context->access_start;
NvU64 access_len = mmap_context->access_size;
if (IS_REG_OFFSET(nv, access_start, access_len))
{
if (nv_encode_caching(&vma->vm_page_prot, NV_MEMORY_UNCACHED,
@@ -544,7 +531,7 @@ int nvidia_mmap_helper(
else
{
if (nv_encode_caching(&vma->vm_page_prot,
rm_disable_iomap_wc() ? NV_MEMORY_UNCACHED : NV_MEMORY_WRITECOMBINED,
rm_disable_iomap_wc() ? NV_MEMORY_UNCACHED : mmap_context->caching,
NV_MEMORY_TYPE_FRAMEBUFFER))
{
if (nv_encode_caching(&vma->vm_page_prot,

View File

@@ -177,11 +177,7 @@ struct nvidia_p2p_page_table {
* This means the pages underlying the range of GPU virtual memory
* will persist until explicitly freed by nvidia_p2p_put_pages().
* Persistent GPU memory mappings are not supported on PowerPC,
* MIG-enabled devices and vGPU.
* @param[in] data
* A non-NULL opaque pointer to private data to be passed to the
* callback function.

View File

@@ -99,7 +99,7 @@ static void nv_init_dynamic_power_management
NV_PCI_DOMAIN_NUMBER(pci_dev),
NV_PCI_BUS_NUMBER(pci_dev),
NV_PCI_SLOT_NUMBER(pci_dev));
if (ret > 0 || ret < sizeof(filename))
if (ret > 0 && ret < sizeof(filename))
{
struct file *file = filp_open(filename, O_RDONLY, 0);
if (!IS_ERR(file))
@@ -156,75 +156,6 @@ static void nv_init_dynamic_power_management
rm_init_dynamic_power_management(sp, nv, pr3_acpi_method_present);
}
/* find nvidia devices and set initial state */
static int
nv_pci_probe
@@ -250,7 +181,6 @@ nv_pci_probe
return -1;
}
#ifdef NV_PCI_SRIOV_SUPPORT
if (pci_dev->is_virtfn)
{
@@ -296,7 +226,6 @@ nv_pci_probe
}
#endif /* NV_PCI_SRIOV_SUPPORT */
if (!rm_is_supported_pci_device(
(pci_dev->class >> 16) & 0xFF,
(pci_dev->class >> 8) & 0xFF,
@@ -498,20 +427,11 @@ next_bar:
nv_init_ibmnpu_info(nv);
#if defined(NVCPU_PPC64LE)
// Use HW NUMA support as a proxy for ATS support. This is true in the only
// PPC64LE platform where ATS is currently supported (IBM P9).
nv_ats_supported &= nv_platform_supports_numa(nvl);
#else
#endif
if (nv_ats_supported)
{
@@ -589,19 +509,19 @@ next_bar:
/* Parse and set any per-GPU registry keys specified. */
nv_parse_per_device_option_string(sp);
rm_set_rm_firmware_requested(sp, nv);
#if defined(NV_VGPU_KVM_BUILD)
if (nvidia_vgpu_vfio_probe(nvl->pci_dev) != NV_OK)
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to register device to vGPU VFIO module");
nvidia_frontend_remove_device((void *)&nv_fops, nvl);
goto err_remove_device;
goto err_vgpu_kvm;
}
#endif
nv_check_and_exclude_gpu(sp, nv);
rm_set_rm_firmware_requested(sp, nv);
#if defined(DPM_FLAG_NO_DIRECT_COMPLETE)
dev_pm_set_driver_flags(nvl->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
#elif defined(DPM_FLAG_NEVER_SKIP)
@@ -619,11 +539,18 @@ next_bar:
return 0;
#if defined(NV_VGPU_KVM_BUILD)
err_vgpu_kvm:
#endif
nv_procfs_remove_gpu(nvl);
rm_cleanup_dynamic_power_management(sp, nv);
#if defined(NV_PM_VT_SWITCH_REQUIRED_PRESENT)
pm_vt_switch_unregister(nvl->dev);
#endif
err_remove_device:
LOCK_NV_LINUX_DEVICES();
nv_linux_remove_device_locked(nvl);
UNLOCK_NV_LINUX_DEVICES();
rm_cleanup_dynamic_power_management(sp, nv);
err_zero_dev:
rm_free_private_state(sp, nv);
err_not_supported:
@@ -654,7 +581,6 @@ nv_pci_remove(struct pci_dev *pci_dev)
NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev),
NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn));
#ifdef NV_PCI_SRIOV_SUPPORT
if (pci_dev->is_virtfn)
{
@@ -666,7 +592,6 @@ nv_pci_remove(struct pci_dev *pci_dev)
}
#endif /* NV_PCI_SRIOV_SUPPORT */
if (nv_kmem_cache_alloc_stack(&sp) != 0)
{
return;

View File

@@ -566,7 +566,6 @@
#define NV_REG_PRESERVE_VIDEO_MEMORY_ALLOCATIONS \
NV_REG_STRING(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS)
/*
* Option: EnableS0ixPowerManagement
*
@@ -615,7 +614,6 @@
#define NV_REG_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
NV_REG_STRING(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD)
/*
* Option: DynamicPowerManagement
*
@@ -847,10 +845,8 @@ NV_DEFINE_REG_ENTRY(__NV_TCE_BYPASS_MODE, NV_TCE_BYPASS_MODE_DEFAULT);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_STREAM_MEMOPS, 0);
NV_DEFINE_REG_ENTRY(__NV_RM_PROFILING_ADMIN_ONLY_PARAMETER, 1);
NV_DEFINE_REG_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS, 0);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT, 0);
NV_DEFINE_REG_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 256);
NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT, 3);
NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 200);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE, NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE);
@@ -864,11 +860,7 @@ NV_DEFINE_REG_ENTRY_GLOBAL(__NV_VMALLOC_HEAP_MAX_SIZE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IGNORE_MMIO_CHECK, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_NVLINK_DISABLE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PCI_DRIVER, 1);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_DBG_BREAKPOINT, 0);
NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS, NULL);
@@ -912,10 +904,8 @@ nv_parm_t nv_parms[] = {
NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(__NV_RM_PROFILING_ADMIN_ONLY,
__NV_RM_PROFILING_ADMIN_ONLY_PARAMETER),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_PCI_DRIVER),

View File

@@ -84,6 +84,7 @@ NV_STATUS NV_API_CALL nv_add_mapping_context_to_file(
nvamc->prot = prot;
nvamc->valid = NV_TRUE;
nvamc->caching = nvuap->caching;
done:
nv_put_file_private(priv);

View File

@@ -222,7 +222,7 @@ static NvU64 nv_get_max_sysmem_address(void)
for_each_online_node(node_id)
{
global_max_pfn = max(global_max_pfn, node_end_pfn(node_id));
global_max_pfn = max(global_max_pfn, (NvU64)node_end_pfn(node_id));
}
return ((global_max_pfn + 1) << PAGE_SHIFT) - 1;
@@ -371,13 +371,6 @@ NV_STATUS nv_alloc_contig_pages(
if (os_is_xen_dom0() || at->flags.unencrypted)
return nv_alloc_coherent_pages(nv, at);
at->order = get_order(at->num_pages * PAGE_SIZE);
gfp_mask = nv_compute_gfp_mask(nv, at);

View File

@@ -37,11 +37,9 @@
#include "nv-vgpu-vfio-interface.h"
#endif
#include "nvlink_proto.h"
#include "nvlink_caps.h"
#include "nv-frontend.h"
#include "nv-hypervisor.h"
#include "nv-ibmnpu.h"
@@ -141,11 +139,6 @@ static NvTristate nv_chipset_is_io_coherent = NV_TRISTATE_INDETERMINATE;
// True if all the successfully probed devices support ATS
// Assigned at device probe (module init) time
NvBool nv_ats_supported = NVCPU_IS_PPC64LE
;
// allow an easy way to convert all debug printfs related to events
@@ -416,16 +409,13 @@ exit:
return rc;
}
static void
nvlink_drivers_exit(void)
{
#if NVCPU_IS_64_BITS
nvswitch_exit();
#endif
#if defined(NVCPU_PPC64LE)
ibmnpu_exit();
#endif
@@ -433,8 +423,6 @@ nvlink_drivers_exit(void)
nvlink_core_exit();
}
static int __init
nvlink_drivers_init(void)
{
@@ -457,7 +445,6 @@ nvlink_drivers_init(void)
}
#endif
#if NVCPU_IS_64_BITS
rc = nvswitch_init();
if (rc < 0)
@@ -470,11 +457,9 @@ nvlink_drivers_init(void)
}
#endif
return rc;
}
static void
nv_module_state_exit(nv_stack_t *sp)
{
@@ -600,9 +585,6 @@ nv_report_applied_patches(void)
static void
nv_drivers_exit(void)
{
nv_pci_unregister_driver();
nvidia_unregister_module(&nv_fops);
@@ -629,16 +611,6 @@ nv_drivers_init(void)
goto exit;
}
exit:
if (rc < 0)
{
@@ -656,10 +628,8 @@ nv_module_exit(nv_stack_t *sp)
rm_shutdown_rm(sp);
nv_destroy_rsync_info();
nvlink_drivers_exit();
nv_cap_drv_exit();
nv_module_resources_exit(sp);
@@ -683,14 +653,12 @@ nv_module_init(nv_stack_t **sp)
goto cap_drv_exit;
}
rc = nvlink_drivers_init();
if (rc < 0)
{
goto cap_drv_exit;
}
nv_init_rsync_info();
nv_sev_init();
@@ -714,10 +682,8 @@ init_rm_exit:
nvlink_exit:
nv_destroy_rsync_info();
nvlink_drivers_exit();
cap_drv_exit:
nv_cap_drv_exit();
nv_module_resources_exit(*sp);
@@ -1266,7 +1232,8 @@ static int nv_start_device(nv_state_t *nv, nvidia_stack_t *sp)
#endif
if (((!(nv->flags & NV_FLAG_USES_MSI)) && (!(nv->flags & NV_FLAG_USES_MSIX)))
&& (nv->interrupt_line == 0) && !(nv->flags & NV_FLAG_SOC_DISPLAY))
&& (nv->interrupt_line == 0) && !(nv->flags & NV_FLAG_SOC_DISPLAY)
&& !(nv->flags & NV_FLAG_SOC_IGPU))
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
"No interrupts of any type are available. Cannot use this GPU.\n");
@@ -1279,9 +1246,6 @@ static int nv_start_device(nv_state_t *nv, nvidia_stack_t *sp)
{
if (nv->flags & NV_FLAG_SOC_DISPLAY)
{
}
else if (!(nv->flags & NV_FLAG_USES_MSIX))
{
@@ -1331,15 +1295,13 @@ static int nv_start_device(nv_state_t *nv, nvidia_stack_t *sp)
if (!rm_init_adapter(sp, nv))
{
if (!(nv->flags & NV_FLAG_USES_MSIX) &&
!(nv->flags & NV_FLAG_SOC_DISPLAY))
!(nv->flags & NV_FLAG_SOC_DISPLAY) &&
!(nv->flags & NV_FLAG_SOC_IGPU))
{
free_irq(nv->interrupt_line, (void *) nvl);
}
else if (nv->flags & NV_FLAG_SOC_DISPLAY)
{
}
#if defined(NV_LINUX_PCIE_MSI_SUPPORTED)
else
@@ -1467,10 +1429,8 @@ static int nv_open_device(nv_state_t *nv, nvidia_stack_t *sp)
return -ENODEV;
}
if (unlikely(NV_ATOMIC_READ(nvl->usage_count) >= NV_S32_MAX))
return -EMFILE;
if ( ! (nv->flags & NV_FLAG_OPEN))
{
@@ -1674,7 +1634,8 @@ void nv_shutdown_adapter(nvidia_stack_t *sp,
}
if (!(nv->flags & NV_FLAG_USES_MSIX) &&
!(nv->flags & NV_FLAG_SOC_DISPLAY))
!(nv->flags & NV_FLAG_SOC_DISPLAY) &&
!(nv->flags & NV_FLAG_SOC_IGPU))
{
free_irq(nv->interrupt_line, (void *)nvl);
if (nv->flags & NV_FLAG_USES_MSI)
@@ -1686,9 +1647,6 @@ void nv_shutdown_adapter(nvidia_stack_t *sp,
}
else if (nv->flags & NV_FLAG_SOC_DISPLAY)
{
}
#if defined(NV_LINUX_PCIE_MSI_SUPPORTED)
else
@@ -3838,9 +3796,6 @@ nvos_count_devices(void)
count = nv_pci_count_devices();
return count;
}
@@ -4901,7 +4856,6 @@ NV_STATUS NV_API_CALL nv_get_device_memory_config(
NvU64 *compr_addr_sys_phys,
NvU64 *addr_guest_phys,
NvU32 *addr_width,
NvU32 *granularity,
NvS32 *node_id
)
{
@@ -4940,43 +4894,9 @@ NV_STATUS NV_API_CALL nv_get_device_memory_config(
*addr_width = nv_volta_dma_addr_size - nv_volta_addr_space_width;
}
if (granularity != NULL)
{
*granularity = nv_volta_addr_space_width;
}
status = NV_OK;
#endif
return status;
}
@@ -5188,7 +5108,6 @@ NvU32 NV_API_CALL nv_get_dev_minor(nv_state_t *nv)
NV_STATUS NV_API_CALL nv_acquire_fabric_mgmt_cap(int fd, int *duped_fd)
{
*duped_fd = nvlink_cap_acquire(fd, NVLINK_CAP_FABRIC_MANAGEMENT);
if (*duped_fd < 0)
{
@@ -5196,9 +5115,6 @@ NV_STATUS NV_API_CALL nv_acquire_fabric_mgmt_cap(int fd, int *duped_fd)
}
return NV_OK;
}
/*
@@ -5419,7 +5335,6 @@ NvBool NV_API_CALL nv_is_gpu_accessible(nv_state_t *nv)
#endif
}
NvBool NV_API_CALL nv_platform_supports_s0ix(void)
{
#if defined(CONFIG_ACPI)
@@ -5483,7 +5398,6 @@ NvBool NV_API_CALL nv_s2idle_pm_configured(void)
return (memcmp(buf, "[s2idle]", 8) == 0);
}
/*
* Function query system chassis info, to figure out if the platform is
* Laptop or Notebook.
@@ -5554,7 +5468,6 @@ void NV_API_CALL nv_flush_coherent_cpu_cache_range(nv_state_t *nv, NvU64 cpu_vir
#if NVCPU_IS_PPC64LE
return nv_ibmnpu_cache_flush_range(nv, cpu_virtual, size);
#elif NVCPU_IS_AARCH64
NvU64 va, cbsize;
NvU64 end_cpu_virtual = cpu_virtual + size;
@@ -5574,7 +5487,6 @@ void NV_API_CALL nv_flush_coherent_cpu_cache_range(nv_state_t *nv, NvU64 cpu_vir
cond_resched();
}
asm volatile("dsb sy" : : : "memory");
#endif
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -126,6 +126,7 @@ NV_STATUS nvGpuOpsQueryCesCaps(struct gpuDevice *device,
NV_STATUS nvGpuOpsDupAllocation(struct gpuAddressSpace *srcVaSpace,
NvU64 srcAddress,
struct gpuAddressSpace *dstVaSpace,
NvU64 dstVaAlignment,
NvU64 *dstAddress);
NV_STATUS nvGpuOpsDupMemory(struct gpuDevice *device,
@@ -275,27 +276,4 @@ NV_STATUS nvGpuOpsPagingChannelPushStream(UvmGpuPagingChannel *channel,
char *methodStream,
NvU32 methodStreamSize);
#endif /* _NV_GPU_OPS_H_*/

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -196,9 +196,7 @@ NV_STATUS nvUvmInterfaceSessionCreate(uvmGpuSessionHandle *session,
memset(platformInfo, 0, sizeof(*platformInfo));
platformInfo->atsSupported = nv_ats_supported;
platformInfo->sevEnabled = os_sev_enabled;
status = rm_gpu_ops_create_session(sp, (gpuSessionHandle *)session);
@@ -700,6 +698,7 @@ EXPORT_SYMBOL(nvUvmInterfaceUnsetPageDirectory);
NV_STATUS nvUvmInterfaceDupAllocation(uvmGpuAddressSpaceHandle srcVaSpace,
NvU64 srcAddress,
uvmGpuAddressSpaceHandle dstVaSpace,
NvU64 dstVaAlignment,
NvU64 *dstAddress)
{
nvidia_stack_t *sp = NULL;
@@ -714,6 +713,7 @@ NV_STATUS nvUvmInterfaceDupAllocation(uvmGpuAddressSpaceHandle srcVaSpace,
(gpuAddressSpaceHandle)srcVaSpace,
srcAddress,
(gpuAddressSpaceHandle)dstVaSpace,
dstVaAlignment,
dstAddress);
nv_kmem_cache_free_stack(sp);
@@ -1420,115 +1420,6 @@ NV_STATUS nvUvmInterfacePagingChannelPushStream(UvmGpuPagingChannelHandle channe
}
EXPORT_SYMBOL(nvUvmInterfacePagingChannelPushStream);
#else // NV_UVM_ENABLE
NV_STATUS nv_uvm_suspend(void)

View File

@@ -69,9 +69,7 @@ struct nvlink_detailed_device_info
NvU64 deviceType;
NvU8 *devUuid;
NvBool bInitialized;
NvBool bEnableALI;
void *dev_info; // Endpoint driver device info opaque
// to core lib. Passed from end point
// driver to core

View File

@@ -0,0 +1,66 @@
/*******************************************************************************
Copyright (c) 2022 NVidia Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
#ifndef NVLINK_INBAND_DRV_HDR_H
#define NVLINK_INBAND_DRV_HDR_H
/*
* This header file defines the header that should be used by RM and NVSwitch
* driver to sync minions on both the sides before an actual inband message
* transfer is initiated.
*
* Modifying the existing header structure is not allowed. A versioning
* policy must be enforced if such changes are needed in the future.
*
* - Avoid use of enums or bit fields. Always use fixed types.
* - Avoid conditional fields in the structs
* - Avoid nested and complex structs. Keep them simple and flat for ease of
* encoding and decoding.
* - Avoid embedded pointers. Flexible arrays at the end of the struct are allowed.
* - Always use the packed struct to typecast inband messages. More details:
* - Always have reserved flags or fields to CYA given the stable ABI conditions.
*/
/* Align to byte boundaries */
#pragma pack(push, 1)
#include "nvtypes.h"
#define NVLINK_INBAND_MAX_XFER_SIZE 0x100
#define NVLINK_INBAND_MAX_XFER_AT_ONCE 4
#define NVLINK_INBAND_DRV_HDR_TYPE_START NVBIT(0)
#define NVLINK_INBAND_DRV_HDR_TYPE_MID NVBIT(1)
#define NVLINK_INBAND_DRV_HDR_TYPE_END NVBIT(2)
/* Rest of the bits are reserved for future use and must be always set zero. */
typedef struct
{
NvU8 data;
} nvlink_inband_drv_hdr_t;
#pragma pack(pop)
/* Don't add any code after this line */
#endif

View File

@@ -0,0 +1,166 @@
/*******************************************************************************
Copyright (c) 2022 NVidia Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
#ifndef NVLINK_INBAND_MSG_HDR_H
#define NVLINK_INBAND_MSG_HDR_H
/*
* Messages do not have individual versioning, instead a strict ABI is maintained. When a change is
* required on existing message, instead of modifying corresponding message structure, a completely
* new message type (like INBAND_MSG_TYPE_XXX_V1, INBAND_MSG_TYPE_XXX_V2) and corresponding message
* definition structure needs to be added. Do not modify existing structs in any way.
*
* Messages may contain fields which are debug only and must be used for logging purpose. Such
* fields shouldn't be trusted.
*
* - Avoid use of enums or bitfields. Always use fixed types.
* - Avoid conditional fields in the structs.
* - Avoid nested and complex structs. Keep them simple and flat for ease of encoding and decoding.
* - Avoid embedded pointers. Flexible arrays at the end of the struct are allowed.
* - Always use the packed struct to typecast inband messages. More details:
* - Always have reserved flags or fields to CYA given the stable ABI conditions.
*/
/* Align to byte boundaries */
#pragma pack(push, 1)
#include "nvtypes.h"
#include "nvmisc.h"
#include "nvCpuUuid.h"
#include "nvstatus.h"
#include "nvstatuscodes.h"
#define NVLINK_INBAND_MAX_MSG_SIZE 4096
#define NVLINK_INBAND_MSG_MAGIC_ID_FM 0xadbc
/* Nvlink Inband messages types */
#define NVLINK_INBAND_MSG_TYPE_GPU_PROBE_REQ 0
#define NVLINK_INBAND_MSG_TYPE_GPU_PROBE_RSP 1
#define NVLINK_INBAND_MSG_TYPE_MC_TEAM_SETUP_REQ 2
#define NVLINK_INBAND_MSG_TYPE_MC_TEAM_SETUP_RSP 3
#define NVLINK_INBAND_MSG_TYPE_MC_TEAM_RELEASE_REQ 4
/* Nvlink Inband message packet header */
typedef struct
{
NvU16 magicId; /* Identifier to represent in-band msg, will be NVLINK_INBAND_MSG_MAGIC_ID */
NvU64 requestId; /* Unique Id for a request and response will carry same id */
NV_STATUS status; /* High level status of the message/request */
NvU16 type; /* Type of encoded message. One of NVLINK_INBAND_MSG_TYPE_xxx */
NvU32 length; /* Length of encoded message */
NvU8 reserved[8]; /* For future use. Must be initialized to zero */
} nvlink_inband_msg_header_t;
#define NVLINK_INBAND_GPU_PROBE_CAPS_SRIOV_ENABLED NVBIT(0)
/* Add more caps as need in the future */
typedef struct
{
NvU32 pciInfo; /* Encoded as Bus:Device:Function.(debug only) */
NvU8 moduleId; /* GPIO based physical/module ID of the GPU. (debug only) */
NvUuid uuid; /* UUID of the GPU. (debug only) */
NvU64 discoveredLinkMask; /* GPU's discovered NVLink mask info. (debug only) */
NvU64 enabledLinkMask; /* GPU's currently enabled NvLink mask info. (debug only) */
NvU32 gpuCapMask; /* GPU capabilities, one of NVLINK_INBAND_GPU_PROBE_CAPS */
NvU8 reserved[16]; /* For future use. Must be initialized to zero */
} nvlink_inband_gpu_probe_req_t;
typedef struct
{
nvlink_inband_msg_header_t msgHdr;
nvlink_inband_gpu_probe_req_t probeReq;
} nvlink_inband_gpu_probe_req_msg_t;
#define NVLINK_INBAND_FM_CAPS_MC_TEAM_SETUP_V1 NVBIT64(0)
#define NVLINK_INBAND_FM_CAPS_MC_TEAM_RELEASE_V1 NVBIT64(1)
typedef struct
{
NvU64 gpuHandle; /* Unique handle assigned by initialization entity for this GPU */
NvU32 gfId; /* GFID which supports NVLink */
NvU64 fmCaps; /* Capability of FM e.g. what features FM support. */
NvU16 nodeId; /* Node ID of the system where this GPU belongs */
NvU16 fabricPartitionId; /* Partition ID if the GPU belongs to a fabric partition */
NvU16 clusterId; /* Cluster ID to which this node belongs */
NvU64 gpaAddress; /* GPA starting address for the GPU */
NvU64 gpaAddressRange; /* GPU GPA address range */
NvU64 flaAddress; /* FLA starting address for the GPU */
NvU64 flaAddressRange; /* GPU FLA address range */
NvU8 reserved[32]; /* For future use. Must be initialized to zero */
} nvlink_inband_gpu_probe_rsp_t;
typedef struct
{
nvlink_inband_msg_header_t msgHdr;
nvlink_inband_gpu_probe_rsp_t probeRsp;
} nvlink_inband_gpu_probe_rsp_msg_t;
typedef struct
{
NvU64 mcAllocSize; /* Multicast allocation size requested */
NvU32 flags; /* For future use. Must be initialized to zero */
NvU8 reserved[8]; /* For future use. Must be initialized to zero */
NvU16 numGpuHandles; /* Number of GPUs in this team */
NvU64 gpuHandles[]; /* Array of probed handles, should be last */
} nvlink_inband_mc_team_setup_req_t;
typedef struct
{
nvlink_inband_msg_header_t msgHdr;
nvlink_inband_mc_team_setup_req_t mcTeamSetupReq;
} nvlink_inband_mc_team_setup_req_msg_t;
typedef struct
{
NvU64 mcTeamHandle; /* Unique handle assigned for this Multicast team */
NvU32 flags; /* For future use. Must be initialized to zero */
NvU8 reserved[8]; /* For future use. Must be initialized to zero */
NvU64 mcAddressBase; /* FLA starting address assigned for the Multicast slot */
NvU64 mcAddressSize; /* Size of FLA assigned to the Multicast slot */
} nvlink_inband_mc_team_setup_rsp_t;
typedef struct
{
nvlink_inband_msg_header_t msgHdr;
nvlink_inband_mc_team_setup_rsp_t mcTeamSetupRsp;
} nvlink_inband_mc_team_setup_rsp_msg_t;
typedef struct
{
NvU64 mcTeamHandle; /* Unique handle assigned for the Multicast team */
NvU32 flags; /* For future use. Must be initialized to zero */
NvU8 reserved[8]; /* For future use. Must be initialized to zero */
} nvlink_inband_mc_team_release_req_t;
typedef struct
{
nvlink_inband_msg_header_t msgHdr;
nvlink_inband_mc_team_release_req_t mcTeamReleaseReq;
} nvlink_inband_mc_team_release_req_msg_t;
#pragma pack(pop)
/* Don't add any code after this line */
#endif

View File

@@ -207,11 +207,6 @@ static int nvlink_fops_release(struct inode *inode, struct file *filp)
nvlink_print(NVLINK_DBG_INFO, "nvlink driver close\n");
mutex_lock(&nvlink_drvctx.lock);
if (private->capability_fds.fabric_mgmt > 0)

View File

@@ -33,14 +33,12 @@
int nvlink_core_init (void);
void nvlink_core_exit (void);
/*
* Functions defined in nvswitch_linux.c
*/
int nvswitch_init (void);
void nvswitch_exit (void);
#if defined(NVCPU_AARCH64)
/*
* Functions defined in tegrashim_linux.c (Tegra only)

View File

@@ -28,14 +28,6 @@
#include "nv-time.h"
extern char *NVreg_TemporaryFilePath;
#define MAX_ERROR_STRING 512
@@ -52,16 +44,12 @@ NvU8 os_page_shift = PAGE_SHIFT;
NvU32 os_sev_status = 0;
NvBool os_sev_enabled = 0;
#if defined(CONFIG_DMA_SHARED_BUFFER)
NvBool os_dma_buf_enabled = NV_TRUE;
#else
NvBool os_dma_buf_enabled = NV_FALSE;
#endif // CONFIG_DMA_SHARED_BUFFER
void NV_API_CALL os_disable_console_access(void)
{
console_lock();
@@ -974,6 +962,11 @@ void NV_API_CALL os_dbg_set_level(NvU32 new_debuglevel)
cur_debuglevel = new_debuglevel;
}
NvU64 NV_API_CALL os_get_max_user_va(void)
{
return TASK_SIZE;
}
NV_STATUS NV_API_CALL os_schedule(void)
{
if (NV_MAY_SLEEP())
@@ -1964,52 +1957,6 @@ void NV_API_CALL os_wake_up
complete_all(&wq->q);
}
nv_cap_t* NV_API_CALL os_nv_cap_init
(
const char *path
@@ -2063,101 +2010,3 @@ void NV_API_CALL os_nv_cap_close_fd
nv_cap_close_fd(fd);
}

View File

@@ -158,49 +158,47 @@ void NV_API_CALL os_pci_remove(
#endif
}
NV_STATUS NV_API_CALL
os_enable_pci_req_atomics(
void *handle,
enum os_pci_req_atomics_type type
)
{
#ifdef NV_PCI_ENABLE_ATOMIC_OPS_TO_ROOT_PRESENT
int ret;
u16 val;
switch (type)
{
case OS_INTF_PCIE_REQ_ATOMICS_32BIT:
ret = pci_enable_atomic_ops_to_root(handle,
PCI_EXP_DEVCAP2_ATOMIC_COMP32);
break;
case OS_INTF_PCIE_REQ_ATOMICS_64BIT:
ret = pci_enable_atomic_ops_to_root(handle,
PCI_EXP_DEVCAP2_ATOMIC_COMP64);
break;
case OS_INTF_PCIE_REQ_ATOMICS_128BIT:
ret = pci_enable_atomic_ops_to_root(handle,
PCI_EXP_DEVCAP2_ATOMIC_COMP128);
break;
default:
ret = -1;
break;
}
if (ret == 0)
{
/*
* GPUs that don't support Requester Atomics have its
* PCI_EXP_DEVCTL2_ATOMIC_REQ always set to 0 even after SW enables it.
*/
if ((pcie_capability_read_word(handle, PCI_EXP_DEVCTL2, &val) == 0) &&
(val & PCI_EXP_DEVCTL2_ATOMIC_REQ))
{
return NV_OK;
}
}
#endif
return NV_ERR_NOT_SUPPORTED;
}