mirror of
https://github.com/NVIDIA/open-gpu-kernel-modules.git
synced 2026-01-31 05:29:47 +00:00
580.94.16
This commit is contained in:
@@ -62,6 +62,8 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += migrate_vma_added_flags
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += migrate_device_range
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += handle_mm_fault_has_pt_regs_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += get_dev_pagemap_has_pgmap_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += dev_pagemap_ops_has_folio_free
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += zone_device_page_init_has_order_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_unified_nodes
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_home_node
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mpol_preferred_many_present
|
||||
|
||||
@@ -74,6 +74,16 @@ module_param(uvm_disable_hmm, bool, 0444);
|
||||
#include "uvm_va_policy.h"
|
||||
#include "uvm_tools.h"
|
||||
|
||||
//
|
||||
// Pass 0 as the order, when actual large order support is added this
|
||||
// function will need to be revisited
|
||||
//
|
||||
#if defined(NV_ZONE_DEVICE_PAGE_INIT_HAS_ORDER_ARG)
|
||||
#define ZONE_DEVICE_PAGE_INIT(page) zone_device_page_init(page, 0)
|
||||
#else
|
||||
#define ZONE_DEVICE_PAGE_INIT(page) zone_device_page_init(page)
|
||||
#endif
|
||||
|
||||
// The function nv_PageSwapCache() wraps the check for page swap cache flag in
|
||||
// order to support a wide variety of kernel versions.
|
||||
// The function PageSwapCache() is removed after 32f51ead3d77 ("mm: remove
|
||||
@@ -2146,7 +2156,7 @@ static void fill_dst_pfn(uvm_va_block_t *va_block,
|
||||
|
||||
UVM_ASSERT(!page_count(dpage));
|
||||
UVM_ASSERT(!dpage->zone_device_data);
|
||||
zone_device_page_init(dpage);
|
||||
ZONE_DEVICE_PAGE_INIT(dpage);
|
||||
dpage->zone_device_data = gpu_chunk;
|
||||
atomic64_inc(&va_block->hmm.va_space->hmm.allocated_page_count);
|
||||
}
|
||||
|
||||
@@ -3159,6 +3159,11 @@ static void devmem_page_free(struct page *page)
|
||||
&gpu->pmm.root_chunks.va_block_lazy_free_q_item);
|
||||
}
|
||||
|
||||
static void devmem_folio_free(struct folio *folio)
|
||||
{
|
||||
devmem_page_free(&folio->page);
|
||||
}
|
||||
|
||||
// This is called by HMM when the CPU faults on a ZONE_DEVICE private entry.
|
||||
static vm_fault_t devmem_fault(struct vm_fault *vmf)
|
||||
{
|
||||
@@ -3177,7 +3182,11 @@ static vm_fault_t devmem_fault_entry(struct vm_fault *vmf)
|
||||
|
||||
static const struct dev_pagemap_ops uvm_pmm_devmem_ops =
|
||||
{
|
||||
#if defined(NV_PAGEMAP_OPS_HAS_FOLIO_FREE)
|
||||
.folio_free = devmem_folio_free,
|
||||
#else
|
||||
.page_free = devmem_page_free,
|
||||
#endif
|
||||
.migrate_to_ram = devmem_fault_entry,
|
||||
};
|
||||
|
||||
@@ -3272,6 +3281,11 @@ static void device_p2p_page_free(struct page *page)
|
||||
page->zone_device_data = NULL;
|
||||
nv_kref_put(&p2p_mem->refcount, device_p2p_page_free_wake);
|
||||
}
|
||||
|
||||
static void device_p2p_folio_free(struct folio *folio)
|
||||
{
|
||||
device_p2p_page_free(&folio->page);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if UVM_CDMM_PAGES_SUPPORTED()
|
||||
@@ -3280,9 +3294,18 @@ static void device_coherent_page_free(struct page *page)
|
||||
device_p2p_page_free(page);
|
||||
}
|
||||
|
||||
static void device_coherent_folio_free(struct folio *folio)
|
||||
{
|
||||
device_p2p_page_free(&folio->page);
|
||||
}
|
||||
|
||||
static const struct dev_pagemap_ops uvm_device_coherent_pgmap_ops =
|
||||
{
|
||||
#if defined(NV_PAGEMAP_OPS_HAS_FOLIO_FREE)
|
||||
.folio_free = device_coherent_folio_free,
|
||||
#else
|
||||
.page_free = device_coherent_page_free,
|
||||
#endif
|
||||
};
|
||||
|
||||
static NV_STATUS uvm_pmm_cdmm_init(uvm_parent_gpu_t *parent_gpu)
|
||||
@@ -3419,7 +3442,11 @@ static bool uvm_pmm_gpu_check_orphan_pages(uvm_pmm_gpu_t *pmm)
|
||||
|
||||
static const struct dev_pagemap_ops uvm_device_p2p_pgmap_ops =
|
||||
{
|
||||
#if defined(NV_PAGEMAP_OPS_HAS_FOLIO_FREE)
|
||||
.folio_free = device_p2p_folio_free,
|
||||
#else
|
||||
.page_free = device_p2p_page_free,
|
||||
#endif
|
||||
};
|
||||
|
||||
void uvm_pmm_gpu_device_p2p_init(uvm_parent_gpu_t *parent_gpu)
|
||||
@@ -3477,12 +3504,10 @@ void uvm_pmm_gpu_device_p2p_init(uvm_parent_gpu_t *parent_gpu)
|
||||
|
||||
void uvm_pmm_gpu_device_p2p_deinit(uvm_parent_gpu_t *parent_gpu)
|
||||
{
|
||||
unsigned long pci_start_pfn = pci_resource_start(parent_gpu->pci_dev,
|
||||
uvm_device_p2p_static_bar(parent_gpu)) >> PAGE_SHIFT;
|
||||
struct page *p2p_page;
|
||||
|
||||
if (parent_gpu->device_p2p_initialised && !uvm_parent_gpu_is_coherent(parent_gpu)) {
|
||||
p2p_page = pfn_to_page(pci_start_pfn);
|
||||
struct page *p2p_page = pfn_to_page(pci_resource_start(parent_gpu->pci_dev,
|
||||
uvm_device_p2p_static_bar(parent_gpu)) >> PAGE_SHIFT);
|
||||
|
||||
devm_memunmap_pages(&parent_gpu->pci_dev->dev, page_pgmap(p2p_page));
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,8 @@
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include "uvm_common.h"
|
||||
#include "uvm_ioctl.h"
|
||||
#include "uvm_linux.h"
|
||||
@@ -63,6 +65,12 @@ static NV_STATUS handle_fault(struct vm_area_struct *vma, unsigned long start, u
|
||||
ret = UVM_HANDLE_MM_FAULT(vma, start + (i * PAGE_SIZE), fault_flags);
|
||||
if (ret & VM_FAULT_ERROR)
|
||||
return errno_to_nv_status(vm_fault_to_errno(ret, fault_flags));
|
||||
|
||||
// Depending on the kernel version and the active preemption model,
|
||||
// calls to handle_mm_fault might not have had a chance to check for
|
||||
// for scheduling points. Insert an explicit yield point here to prevent
|
||||
// large buffers from triggering soft lockup timeout.
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
return NV_OK;
|
||||
|
||||
Reference in New Issue
Block a user