530.30.02

This commit is contained in:
Andy Ritger
2023-02-28 11:12:44 -08:00
parent e598191e8e
commit 4397463e73
928 changed files with 124728 additions and 88525 deletions

View File

@@ -899,7 +899,6 @@ nvswitch_os_vsnprintf
void
nvswitch_os_assert_log
(
int cond,
const char *pFormat,
...
);

View File

@@ -2509,26 +2509,22 @@ nvswitch_os_vsnprintf
void
nvswitch_os_assert_log
(
int cond,
const char *fmt,
...
)
{
if(cond == 0x0)
if (printk_ratelimit())
{
if (printk_ratelimit())
{
va_list arglist;
char fmt_printk[NVSWITCH_LOG_BUFFER_SIZE];
va_list arglist;
char fmt_printk[NVSWITCH_LOG_BUFFER_SIZE];
va_start(arglist, fmt);
vsnprintf(fmt_printk, sizeof(fmt_printk), fmt, arglist);
va_end(arglist);
nvswitch_os_print(NVSWITCH_DBG_LEVEL_ERROR, fmt_printk);
WARN_ON(1);
}
dbg_breakpoint();
}
va_start(arglist, fmt);
vsnprintf(fmt_printk, sizeof(fmt_printk), fmt, arglist);
va_end(arglist);
nvswitch_os_print(NVSWITCH_DBG_LEVEL_ERROR, fmt_printk);
WARN_ON(1);
}
dbg_breakpoint();
}
/*

View File

@@ -37,8 +37,12 @@
#define NVSWITCH_IRQ_PIN 3
#define NVSWITCH_OS_ASSERT(_cond) \
nvswitch_os_assert_log((_cond), "NVSwitch: Assertion failed in %s() at %s:%d\n", \
__FUNCTION__ , __FILE__, __LINE__)
do { \
if (!(_cond)) { \
nvswitch_os_assert_log("NVSwitch: Assertion failed in %s() at %s:%d\n", \
__FUNCTION__ , __FILE__, __LINE__); \
} \
} while(0)
#define NVSWITCH_KMALLOC_LIMIT (128 * 1024)

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -36,6 +36,7 @@ static NV_STATUS nv_acpi_extract_package (const union acpi_object *, void *, N
static NV_STATUS nv_acpi_extract_object (const union acpi_object *, void *, NvU32, NvU32 *);
static void nv_acpi_powersource_hotplug_event(acpi_handle, u32, void *);
static void nv_acpi_nvpcf_event (acpi_handle, u32, void *);
static acpi_status nv_acpi_find_methods (acpi_handle, u32, void *, void **);
static NV_STATUS nv_acpi_nvif_method (NvU32, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *);
@@ -65,6 +66,13 @@ static NvBool battery_present = NV_FALSE;
#define ACPI_VIDEO_CLASS "video"
#endif
// Used for NVPCF event handling
static acpi_handle nvpcf_handle = NULL;
static acpi_handle nvpcf_device_handle = NULL;
static nv_acpi_t *nvpcf_nv_acpi_object = NULL;
#define ACPI_NVPCF_EVENT_CHANGE 0xC0
static int nv_acpi_get_device_handle(nv_state_t *nv, acpi_handle *dev_handle)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
@@ -80,51 +88,42 @@ static int nv_acpi_get_device_handle(nv_state_t *nv, acpi_handle *dev_handle)
#endif
}
static int nv_acpi_notify(struct notifier_block *nb,
unsigned long val, void *data)
/*
* This callback will be invoked by the acpi_notifier_call_chain()
*/
static int nv_acpi_notifier_call_chain_handler(
struct notifier_block *nb,
unsigned long val,
void *data
)
{
struct acpi_bus_event *info = data;
nv_stack_t *sp = NULL;
nv_linux_state_t *nvl = container_of(nb, nv_linux_state_t, acpi_nb);
nv_state_t *nv = NV_STATE_PTR(nvl);
if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
if (nv_kmem_cache_alloc_stack(&sp) == 0) {
/*
* Function to handle device specific ACPI events
* such as display hotplug and D-notifier events.
*/
rm_acpi_notify(sp, nv, info->type);
nv_kmem_cache_free_stack(sp);
}
else
nv_printf(NV_DBG_ERRORS,
"NVRM: nv_acpi_notify: failed to allocate stack\n");
/*
* The ACPI_VIDEO_NOTIFY_PROBE will be sent for display hot-plug/unplug.
* This event will be received first by the acpi-video driver
* and then it will be notified through acpi_notifier_call_chain().
*/
if (!strcmp(info->device_class, ACPI_VIDEO_CLASS) &&
(info->type == ACPI_VIDEO_NOTIFY_PROBE))
{
/*
* Special case for ACPI_VIDEO_NOTIFY_PROBE event: intentionally return
* NOTIFY_BAD to inform acpi-video to stop generating keypresses for
* this event.
* Intentionally return NOTIFY_BAD to inform acpi-video to stop
* generating keypresses for this event. The default behavior in the
* acpi-video driver for an ACPI_VIDEO_NOTIFY_PROBE, is to send a
* KEY_SWITCHVIDEOMODE evdev event, which causes the desktop settings
* daemons like gnome-setting-daemon to switch mode and this impacts
* the notebooks having external HDMI connected.
*/
if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
return NOTIFY_BAD;
}
return NOTIFY_BAD;
}
return NOTIFY_DONE;
}
void nv_acpi_register_notifier(nv_linux_state_t *nvl)
{
nvl->acpi_nb.notifier_call = nv_acpi_notify;
register_acpi_notifier(&nvl->acpi_nb);
}
void nv_acpi_unregister_notifier(nv_linux_state_t *nvl)
{
unregister_acpi_notifier(&nvl->acpi_nb);
}
NV_STATUS NV_API_CALL nv_acpi_get_powersource(NvU32 *ac_plugged)
{
unsigned long long val;
@@ -167,12 +166,31 @@ static void nv_acpi_powersource_hotplug_event(acpi_handle handle, u32 event_type
rm_power_source_change_event(pNvAcpiObject->sp, !ac_plugged);
}
}
static void nv_acpi_nvpcf_event(acpi_handle handle, u32 event_type, void *data)
{
nv_acpi_t *pNvAcpiObject = data;
if (event_type == ACPI_NVPCF_EVENT_CHANGE)
{
rm_acpi_nvpcf_notify(pNvAcpiObject->sp);
}
else
{
nv_printf(NV_DBG_INFO,"NVRM: %s: NVPCF event 0x%x is not supported\n", event_type, __FUNCTION__);
}
}
/*
* End of ACPI event handler functions
*/
/* Do the necessary allocations and install notifier "handler" on the device-node "device" */
static nv_acpi_t* nv_install_notifier(struct acpi_handle *handle, acpi_notify_handler handler)
static nv_acpi_t* nv_install_notifier(
struct acpi_handle *handle,
acpi_notify_handler handler,
void *notifier_data
)
{
nvidia_stack_t *sp = NULL;
nv_acpi_t *pNvAcpiObject = NULL;
@@ -196,6 +214,7 @@ static nv_acpi_t* nv_install_notifier(struct acpi_handle *handle, acpi_notify_ha
// store a handle reference in our object
pNvAcpiObject->handle = handle;
pNvAcpiObject->sp = sp;
pNvAcpiObject->notifier_data = notifier_data;
status = acpi_install_notify_handler(handle, ACPI_DEVICE_NOTIFY,
handler, pNvAcpiObject);
@@ -237,6 +256,49 @@ static void nv_uninstall_notifier(nv_acpi_t *pNvAcpiObject, acpi_notify_handler
return;
}
static void nv_acpi_notify_event(acpi_handle handle, u32 event_type, void *data)
{
nv_acpi_t *pNvAcpiObject = data;
nv_state_t *nvl = pNvAcpiObject->notifier_data;
/*
* Function to handle device specific ACPI events such as display hotplug,
* GPS and D-notifier events.
*/
rm_acpi_notify(pNvAcpiObject->sp, NV_STATE_PTR(nvl), event_type);
}
void nv_acpi_register_notifier(nv_linux_state_t *nvl)
{
acpi_handle dev_handle = NULL;
/* Install the ACPI notifier corresponding to dGPU ACPI device. */
if ((nvl->nv_acpi_object == NULL) &&
nv_acpi_get_device_handle(NV_STATE_PTR(nvl), &dev_handle) &&
(dev_handle != NULL))
{
nvl->nv_acpi_object = nv_install_notifier(dev_handle, nv_acpi_notify_event, nvl);
if (nvl->nv_acpi_object == NULL)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: nv_acpi_register_notifier: failed to install notifier\n");
}
}
nvl->acpi_nb.notifier_call = nv_acpi_notifier_call_chain_handler;
register_acpi_notifier(&nvl->acpi_nb);
}
void nv_acpi_unregister_notifier(nv_linux_state_t *nvl)
{
unregister_acpi_notifier(&nvl->acpi_nb);
if (nvl->nv_acpi_object != NULL)
{
nv_uninstall_notifier(nvl->nv_acpi_object, nv_acpi_notify_event);
nvl->nv_acpi_object = NULL;
}
}
/*
* acpi methods init function.
* check if the NVIF, _DSM and WMMX methods are present in the acpi namespace.
@@ -268,10 +330,15 @@ void NV_API_CALL nv_acpi_methods_init(NvU32 *handlesPresent)
// devices
if (psr_nv_acpi_object == NULL)
{
psr_nv_acpi_object = nv_install_notifier(psr_device_handle, nv_acpi_powersource_hotplug_event);
psr_nv_acpi_object = nv_install_notifier(psr_device_handle, nv_acpi_powersource_hotplug_event, NULL);
}
}
if (nvpcf_handle && (nvpcf_nv_acpi_object == NULL))
{
nvpcf_nv_acpi_object = nv_install_notifier(nvpcf_device_handle, nv_acpi_nvpcf_event, NULL);
}
return;
}
@@ -300,6 +367,12 @@ acpi_status nv_acpi_find_methods(
psr_device_handle = handle;
}
if (!acpi_get_handle(handle, "NPCF", &method_handle))
{
nvpcf_handle = method_handle;
nvpcf_device_handle = handle;
}
return 0;
}
@@ -316,6 +389,15 @@ void NV_API_CALL nv_acpi_methods_uninit(void)
psr_device_handle = NULL;
psr_nv_acpi_object = NULL;
}
if (nvpcf_nv_acpi_object != NULL)
{
nv_uninstall_notifier(nvpcf_nv_acpi_object, nv_acpi_nvpcf_event);
nvpcf_handle = NULL;
nvpcf_device_handle = NULL;
nvpcf_nv_acpi_object = NULL;
}
}
static NV_STATUS nv_acpi_extract_integer(

View File

@@ -350,9 +350,15 @@ int nv_encode_caching(
return 1;
#endif
case NV_MEMORY_CACHED:
if (NV_ALLOW_CACHING(memory_type))
break;
// Intentional fallthrough.
if (!NV_ALLOW_CACHING(memory_type))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: memory type %d does not allow caching!\n",
memory_type);
return 1;
}
break;
default:
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: cache type %d not supported for memory type %d!\n",
@@ -529,6 +535,7 @@ int nvidia_mmap_helper(
{
return -ENXIO;
}
if (IS_REG_OFFSET(nv, access_start, access_len))
{
if (nv_encode_caching(&vma->vm_page_prot, NV_MEMORY_UNCACHED,

View File

@@ -156,6 +156,119 @@ static void nv_init_dynamic_power_management
rm_init_dynamic_power_management(sp, nv, pr3_acpi_method_present);
}
static int nv_resize_pcie_bars(struct pci_dev *pci_dev) {
#if defined(NV_PCI_REBAR_GET_POSSIBLE_SIZES_PRESENT)
u16 cmd;
int r, old_size, requested_size;
unsigned long sizes;
int ret = 0;
#if NV_IS_EXPORT_SYMBOL_PRESENT_pci_find_host_bridge
struct pci_host_bridge *host;
#endif
if (NVreg_EnableResizableBar == 0)
{
nv_printf(NV_DBG_INFO, "NVRM: resizable BAR disabled by regkey, skipping\n");
return 0;
}
// Check if BAR1 has PCIe rebar capabilities
sizes = pci_rebar_get_possible_sizes(pci_dev, NV_GPU_BAR1);
if (sizes == 0) {
/* ReBAR not available. Nothing to do. */
return 0;
}
/* Try to resize the BAR to the largest supported size */
requested_size = fls(sizes) - 1;
/* Save the current size, just in case things go wrong */
old_size = pci_rebar_bytes_to_size(pci_resource_len(pci_dev, NV_GPU_BAR1));
if (old_size == requested_size) {
nv_printf(NV_DBG_INFO, "NVRM: %04x:%02x:%02x.%x: BAR1 already at requested size.\n",
NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev),
NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn));
return 0;
}
#if NV_IS_EXPORT_SYMBOL_PRESENT_pci_find_host_bridge
/* If the kernel will refuse us, don't even try to resize,
but give an informative error */
host = pci_find_host_bridge(pci_dev->bus);
if (host->preserve_config) {
nv_printf(NV_DBG_INFO, "NVRM: Not resizing BAR because the firmware forbids moving windows.\n");
return 0;
}
#endif
nv_printf(NV_DBG_INFO, "NVRM: %04x:%02x:%02x.%x: Attempting to resize BAR1.\n",
NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev),
NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn));
/* Disable memory decoding - required by the kernel APIs */
pci_read_config_word(pci_dev, PCI_COMMAND, &cmd);
pci_write_config_word(pci_dev, PCI_COMMAND, cmd & ~PCI_COMMAND_MEMORY);
/* Release BAR1 */
pci_release_resource(pci_dev, NV_GPU_BAR1);
/* Release BAR3 - we don't want to resize it, it's in the same bridge, so we'll want to move it */
pci_release_resource(pci_dev, NV_GPU_BAR3);
resize:
/* Attempt to resize BAR1 to the largest supported size */
r = pci_resize_resource(pci_dev, NV_GPU_BAR1, requested_size);
if (r) {
if (r == -ENOSPC)
{
/* step through smaller sizes down to original size */
if (requested_size > old_size)
{
clear_bit(fls(sizes) - 1, &sizes);
requested_size = fls(sizes) - 1;
goto resize;
}
else
{
nv_printf(NV_DBG_ERRORS, "NVRM: No address space to allocate resized BAR1.\n");
}
}
else if (r == -EOPNOTSUPP)
{
nv_printf(NV_DBG_WARNINGS, "NVRM: BAR resize resource not supported.\n");
}
else
{
nv_printf(NV_DBG_WARNINGS, "NVRM: BAR resizing failed with error `%d`.\n", r);
}
}
/* Re-attempt assignment of PCIe resources */
pci_assign_unassigned_bus_resources(pci_dev->bus);
if ((pci_resource_flags(pci_dev, NV_GPU_BAR1) & IORESOURCE_UNSET) ||
(pci_resource_flags(pci_dev, NV_GPU_BAR3) & IORESOURCE_UNSET)) {
if (requested_size != old_size) {
/* Try to get the BAR back with the original size */
requested_size = old_size;
goto resize;
}
/* Something went horribly wrong and the kernel didn't manage to re-allocate BAR1.
This is unlikely (because we had space before), but can happen. */
nv_printf(NV_DBG_ERRORS, "NVRM: FATAL: Failed to re-allocate BAR1.\n");
ret = -ENODEV;
}
/* Re-enable memory decoding */
pci_write_config_word(pci_dev, PCI_COMMAND, cmd);
return ret;
#else
nv_printf(NV_DBG_INFO, "NVRM: Resizable BAR is not supported on this kernel version.\n");
return 0;
#endif /* NV_PCI_REBAR_GET_POSSIBLE_SIZES_PRESENT */
}
/* find nvidia devices and set initial state */
static int
nv_pci_probe
@@ -349,6 +462,7 @@ next_bar:
(NvU64)NV_PCI_RESOURCE_START(pci_dev, i),
NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev),
NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn));
goto failed;
}
@@ -365,6 +479,12 @@ next_bar:
goto failed;
}
if (nv_resize_pcie_bars(pci_dev)) {
nv_printf(NV_DBG_ERRORS,
"NVRM: Fatal Error while attempting to resize PCIe BARs.\n");
goto failed;
}
NV_KZALLOC(nvl, sizeof(nv_linux_state_t));
if (nvl == NULL)
{

View File

@@ -699,6 +699,22 @@
#define NV_REG_ENABLE_PCIE_RELAXED_ORDERING_MODE \
NV_REG_STRING(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE)
/*
* Option: EnableResizableBar
*
* Description:
*
* When this option is enabled, the NVIDIA driver will attempt to resize
* BAR1 to match framebuffer size, or the next largest available size on
* supported machines. This is currently only implemented for Linux.
*
* Possible values:
* 0 - Do not enable PCI BAR resizing
* 1 - Enable PCI BAR resizing
*/
#define __NV_ENABLE_RESIZABLE_BAR EnableResizableBar
#define NV_REG_ENABLE_RESIZABLE_BAR NV_REG_STRING(__NV_ENABLE_RESIZABLE_BAR)
/*
* Option: EnableGpuFirmware
*
@@ -825,6 +841,26 @@
#define NV_DMA_REMAP_PEER_MMIO_DISABLE 0x00000000
#define NV_DMA_REMAP_PEER_MMIO_ENABLE 0x00000001
/*
* Option: NVreg_RmNvlinkBandwidth
*
* Description:
*
* This option allows user to reduce the NVLINK P2P bandwidth to save power.
* The option is in the string format.
*
* Possible string values:
* OFF: 0% bandwidth
* MIN: 15%-25% bandwidth depending on the system's NVLink topology
* HALF: 50% bandwidth
* 3QUARTER: 75% bandwidth
* FULL: 100% bandwidth (default)
*
* This option is only for Hopper+ GPU with NVLINK version 4.0.
*/
#define __NV_RM_NVLINK_BW RmNvlinkBandwidth
#define NV_RM_NVLINK_BW NV_REG_STRING(__NV_RM_NVLINK_BW)
#if defined(NV_DEFINE_REGISTRY_KEY_TABLE)
/*
@@ -861,6 +897,7 @@ NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IGNORE_MMIO_CHECK, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_NVLINK_DISABLE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PCI_DRIVER, 1);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_RESIZABLE_BAR, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_DBG_BREAKPOINT, 0);
NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS, NULL);
@@ -870,6 +907,7 @@ NV_DEFINE_REG_STRING_ENTRY(__NV_GPU_BLACKLIST, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_TEMPORARY_FILE_PATH, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_EXCLUDED_GPUS, NULL);
NV_DEFINE_REG_ENTRY(__NV_DMA_REMAP_PEER_MMIO, NV_DMA_REMAP_PEER_MMIO_ENABLE);
NV_DEFINE_REG_STRING_ENTRY(__NV_RM_NVLINK_BW, NULL);
/*
*----------------registry database definition----------------------
@@ -910,6 +948,7 @@ nv_parm_t nv_parms[] = {
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_PCI_DRIVER),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_RESIZABLE_BAR),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_DBG_BREAKPOINT),

View File

@@ -265,7 +265,7 @@ static unsigned int nv_compute_gfp_mask(
gfp_mask |= __GFP_ZERO;
#endif
#if defined(__GFP_THISNODE)
if (at->flags.node0)
if (at->flags.node)
gfp_mask |= __GFP_THISNODE;
#endif
// Compound pages are required by vm_insert_page for high-order page
@@ -384,9 +384,9 @@ NV_STATUS nv_alloc_contig_pages(
at->order = get_order(at->num_pages * PAGE_SIZE);
gfp_mask = nv_compute_gfp_mask(nv, at);
if (at->flags.node0)
if (at->flags.node)
{
NV_ALLOC_PAGES_NODE(virt_addr, 0, at->order, gfp_mask);
NV_ALLOC_PAGES_NODE(virt_addr, at->node_id, at->order, gfp_mask);
}
else
{
@@ -529,9 +529,9 @@ NV_STATUS nv_alloc_system_pages(
gfp_mask);
at->flags.coherent = NV_TRUE;
}
else if (at->flags.node0)
else if (at->flags.node)
{
NV_ALLOC_PAGES_NODE(virt_addr, 0, 0, gfp_mask);
NV_ALLOC_PAGES_NODE(virt_addr, at->node_id, 0, gfp_mask);
}
else
{

View File

@@ -280,11 +280,12 @@ void nv_sev_init(
static
nv_alloc_t *nvos_create_alloc(
struct device *dev,
int num_pages
int num_pages
)
{
nv_alloc_t *at;
unsigned int pt_size, i;
nv_alloc_t *at;
unsigned int pt_size;
unsigned int i;
NV_KZALLOC(at, sizeof(nv_alloc_t));
if (at == NULL)
@@ -295,6 +296,7 @@ nv_alloc_t *nvos_create_alloc(
at->dev = dev;
pt_size = num_pages * sizeof(nvidia_pte_t *);
if (os_alloc_mem((void **)&at->page_table, pt_size) != NV_OK)
{
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate page table\n");
@@ -3303,6 +3305,7 @@ NV_STATUS NV_API_CALL nv_alloc_pages(
NvU32 cache_type,
NvBool zeroed,
NvBool unencrypted,
NvS32 node_id,
NvU64 *pte_array,
void **priv_data
)
@@ -3314,7 +3317,7 @@ NV_STATUS NV_API_CALL nv_alloc_pages(
NvU32 i;
struct device *dev = NULL;
nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_alloc_pages: %d pages\n", page_count);
nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_alloc_pages: %d pages, nodeid %d\n", page_count, node_id);
nv_printf(NV_DBG_MEMINFO, "NVRM: VM: contig %d cache_type %d\n",
contiguous, cache_type);
@@ -3372,9 +3375,18 @@ NV_STATUS NV_API_CALL nv_alloc_pages(
* See Bug 1920398 for more details.
*/
if (nv && nvl->npu && !nvl->dma_dev.nvlink)
at->flags.node0 = NV_TRUE;
{
at->flags.node = NV_TRUE;
at->node_id = 0;
}
#endif
if (node_id != NUMA_NO_NODE)
{
at->flags.node = NV_TRUE;
at->node_id = node_id;
}
if (at->flags.contig)
status = nv_alloc_contig_pages(nv, at);
else
@@ -5069,23 +5081,36 @@ void nv_linux_remove_device_locked(nv_linux_state_t *nvl)
void NV_API_CALL nv_control_soc_irqs(nv_state_t *nv, NvBool bEnable)
{
int count;
unsigned long flags;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
if (nv->current_soc_irq != -1)
return;
NV_SPIN_LOCK_IRQSAVE(&nvl->soc_isr_lock, flags);
if (bEnable)
{
for (count = 0; count < nv->num_soc_irqs; count++)
{
nv->soc_irq_info[count].bh_pending = NV_FALSE;
nv->current_soc_irq = -1;
enable_irq(nv->soc_irq_info[count].irq_num);
if (nv->soc_irq_info[count].ref_count == 0)
{
nv->soc_irq_info[count].ref_count++;
enable_irq(nv->soc_irq_info[count].irq_num);
}
}
}
else
{
for (count = 0; count < nv->num_soc_irqs; count++)
{
disable_irq_nosync(nv->soc_irq_info[count].irq_num);
if (nv->soc_irq_info[count].ref_count == 1)
{
nv->soc_irq_info[count].ref_count--;
disable_irq_nosync(nv->soc_irq_info[count].irq_num);
}
}
}
NV_SPIN_UNLOCK_IRQRESTORE(&nvl->soc_isr_lock, flags);
}
NvU32 NV_API_CALL nv_get_dev_minor(nv_state_t *nv)
@@ -5509,3 +5534,4 @@ void NV_API_CALL nv_get_updated_emu_seg(
*end = min((resource_size_t)*end, p->end);
}
}

View File

@@ -76,26 +76,26 @@ NV_STATUS nvGpuOpsMemoryAllocSys (gpuAddressSpaceHandle vaSpace,
NV_STATUS nvGpuOpsPmaAllocPages(void *pPma,
NvLength pageCount,
NvU32 pageSize,
NvU64 pageSize,
gpuPmaAllocationOptions *pPmaAllocOptions,
NvU64 *pPages);
void nvGpuOpsPmaFreePages(void *pPma,
NvU64 *pPages,
NvLength pageCount,
NvU32 pageSize,
NvU64 pageSize,
NvU32 flags);
NV_STATUS nvGpuOpsPmaPinPages(void *pPma,
NvU64 *pPages,
NvLength pageCount,
NvU32 pageSize,
NvU64 pageSize,
NvU32 flags);
NV_STATUS nvGpuOpsPmaUnpinPages(void *pPma,
NvU64 *pPages,
NvLength pageCount,
NvU32 pageSize);
NvU64 pageSize);
NV_STATUS nvGpuOpsChannelAllocate(gpuAddressSpaceHandle vaSpace,
const gpuChannelAllocParams *params,
@@ -112,7 +112,7 @@ void nvGpuOpsMemoryFree(gpuAddressSpaceHandle vaSpace,
NV_STATUS nvGpuOpsMemoryCpuMap(gpuAddressSpaceHandle vaSpace,
NvU64 memory, NvLength length,
void **cpuPtr, NvU32 pageSize);
void **cpuPtr, NvU64 pageSize);
void nvGpuOpsMemoryCpuUnMap(gpuAddressSpaceHandle vaSpace,
void* cpuPtr);
@@ -276,4 +276,6 @@ NV_STATUS nvGpuOpsPagingChannelPushStream(UvmGpuPagingChannel *channel,
char *methodStream,
NvU32 methodStreamSize);
NV_STATUS nvGpuOpsFlushReplayableFaultBuffer(struct gpuDevice *device);
#endif /* _NV_GPU_OPS_H_*/

View File

@@ -957,6 +957,18 @@ NV_STATUS nvUvmInterfaceGetNonReplayableFaults(UvmGpuFaultInfo *pFaultInfo,
}
EXPORT_SYMBOL(nvUvmInterfaceGetNonReplayableFaults);
NV_STATUS nvUvmInterfaceFlushReplayableFaultBuffer(uvmGpuDeviceHandle device)
{
nvidia_stack_t *sp = nvUvmGetSafeStack();
NV_STATUS status;
status = rm_gpu_ops_flush_replayable_fault_buffer(sp, (gpuDeviceHandle)device);
nvUvmFreeSafeStack(sp);
return status;
}
EXPORT_SYMBOL(nvUvmInterfaceFlushReplayableFaultBuffer);
NV_STATUS nvUvmInterfaceDestroyAccessCntrInfo(uvmGpuDeviceHandle device,
UvmGpuAccessCntrInfo *pAccessCntrInfo)
{

View File

@@ -135,6 +135,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += pnv_pci_get_npu_dev
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_get_ibm_chip_id
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_bus_address
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_stop_and_remove_bus_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_rebar_get_possible_sizes
NV_CONFTEST_FUNCTION_COMPILE_TESTS += register_cpu_notifier
NV_CONFTEST_FUNCTION_COMPILE_TESTS += cpuhp_setup_state
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_map_resource
@@ -166,6 +167,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_of_platform_populate
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_dma_configure
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_property_count_elems_of_size
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_property_read_variable_u8_array
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_property_read_variable_u32_array
NV_CONFTEST_FUNCTION_COMPILE_TESTS += i2c_new_client_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += i2c_unregister_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_get_named_gpio
@@ -189,6 +191,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_set_mask_and_coherent
NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_clk_bulk_get_all
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_task_ioprio
NV_CONFTEST_FUNCTION_COMPILE_TESTS += mdev_set_iommu_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += offline_and_remove_memory
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_of_node_to_nid
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_sme_active
@@ -209,6 +212,10 @@ NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_get_dram_num_channe
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dram_types
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_pxm_to_node
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_screen_info
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_i2c_bus_status
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_fuse_control_read
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_get_platform
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_pci_find_host_bridge
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += swiotlb_dma_ops
@@ -217,7 +224,6 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_has_address
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_insert_pfn_prot
NV_CONFTEST_TYPE_COMPILE_TESTS += vmf_insert_pfn_prot
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += vmbus_channel_has_ringbuffer_page
NV_CONFTEST_TYPE_COMPILE_TESTS += kmem_cache_has_kobj_remove_work
NV_CONFTEST_TYPE_COMPILE_TESTS += sysfs_slab_unlink
NV_CONFTEST_TYPE_COMPILE_TESTS += proc_ops
@@ -244,3 +250,5 @@ NV_CONFTEST_GENERIC_COMPILE_TESTS += vm_fault_t
NV_CONFTEST_GENERIC_COMPILE_TESTS += pci_class_multimedia_hd_audio
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += vfio_pci_core_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += cmd_uphy_display_port_init
NV_CONFTEST_GENERIC_COMPILE_TESTS += cmd_uphy_display_port_off

View File

@@ -1790,11 +1790,6 @@ NV_STATUS NV_API_CALL os_numa_memblock_size
return NV_OK;
}
NV_STATUS NV_API_CALL os_call_nv_vmbus(NvU32 vmbus_cmd, void *input)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL os_open_temporary_file
(
void **ppFile

View File

@@ -317,6 +317,14 @@ NV_STATUS NV_API_CALL os_registry_init(void)
return NV_ERR_NO_MEMORY;
}
if (NVreg_RmNvlinkBandwidth != NULL)
{
rm_write_registry_string(sp, NULL,
"RmNvlinkBandwidth",
NVreg_RmNvlinkBandwidth,
strlen(NVreg_RmNvlinkBandwidth));
}
if (NVreg_RmMsg != NULL)
{
rm_write_registry_string(sp, NULL,