575.51.02

This commit is contained in:
Bernhard Stoeckner
2025-04-17 19:35:38 +02:00
parent e8113f665d
commit 4159579888
1142 changed files with 309085 additions and 272273 deletions

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -294,7 +294,6 @@ static NV_STATUS nv_alloc_coherent_pages(
NvU32 i;
unsigned int gfp_mask;
unsigned long virt_addr = 0;
dma_addr_t bus_addr;
nv_linux_state_t *nvl;
struct device *dev;
@@ -312,7 +311,7 @@ static NV_STATUS nv_alloc_coherent_pages(
virt_addr = (unsigned long)dma_alloc_coherent(dev,
at->num_pages * PAGE_SIZE,
&bus_addr,
&at->dma_handle,
gfp_mask);
if (!virt_addr)
{
@@ -327,7 +326,6 @@ static NV_STATUS nv_alloc_coherent_pages(
page_ptr->virt_addr = virt_addr + i * PAGE_SIZE;
page_ptr->phys_addr = virt_to_phys((void *)page_ptr->virt_addr);
page_ptr->dma_addr = bus_addr + i * PAGE_SIZE;
}
if (at->cache_type != NV_MEMORY_CACHED)
@@ -358,7 +356,7 @@ static void nv_free_coherent_pages(
}
dma_free_coherent(dev, at->num_pages * PAGE_SIZE,
(void *)page_ptr->virt_addr, page_ptr->dma_addr);
(void *)page_ptr->virt_addr, at->dma_handle);
}
NV_STATUS nv_alloc_contig_pages(
@@ -372,13 +370,12 @@ NV_STATUS nv_alloc_contig_pages(
unsigned int gfp_mask;
unsigned long virt_addr = 0;
NvU64 phys_addr;
struct device *dev = at->dev;
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages);
// TODO: This is a temporary WAR, and will be removed after fixing bug 200732409.
if (os_is_xen_dom0() || at->flags.unencrypted)
if (os_is_xen_dom0())
return nv_alloc_coherent_pages(nv, at);
at->order = get_order(at->num_pages * PAGE_SIZE);
@@ -396,6 +393,11 @@ NV_STATUS nv_alloc_contig_pages(
else
{
NV_GET_FREE_PAGES(virt_addr, at->order, gfp_mask);
// In CC, NV_GET_FREE_PAGES only allocates protected sysmem.
// To get unprotected sysmem, this memory is marked as unencrypted.
nv_set_memory_decrypted_zeroed(at->flags.unencrypted, virt_addr, 1 << at->order,
at->num_pages * PAGE_SIZE);
}
if (virt_addr == 0)
{
@@ -432,7 +434,6 @@ NV_STATUS nv_alloc_contig_pages(
page_ptr = at->page_table[i];
page_ptr->phys_addr = phys_addr;
page_ptr->virt_addr = virt_addr;
page_ptr->dma_addr = nv_phys_to_dma(dev, page_ptr->phys_addr);
NV_MAYBE_RESERVE_PAGE(page_ptr);
}
@@ -456,6 +457,11 @@ failed:
}
page_ptr = at->page_table[0];
// For unprotected sysmem in CC, memory is marked as unencrypted during allocation.
// NV_FREE_PAGES only deals with protected sysmem. Mark memory as encrypted and protected before free.
nv_set_memory_encrypted(at->flags.unencrypted, page_ptr->virt_addr, 1 << at->order);
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
return status;
@@ -490,6 +496,10 @@ void nv_free_contig_pages(
page_ptr = at->page_table[0];
// For unprotected sysmem in CC, memory is marked as unencrypted during allocation.
// NV_FREE_PAGES only deals with protected sysmem. Mark memory as encrypted and protected before free.
nv_set_memory_encrypted(at->flags.unencrypted, page_ptr->virt_addr, 1 << at->order);
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
}
@@ -504,8 +514,6 @@ NV_STATUS nv_alloc_system_pages(
unsigned int gfp_mask;
unsigned long virt_addr = 0;
NvU64 phys_addr;
struct device *dev = at->dev;
dma_addr_t bus_addr;
unsigned int alloc_page_size = PAGE_SIZE << at->order;
unsigned int alloc_num_pages = NV_CEIL(at->num_pages * PAGE_SIZE, alloc_page_size);
@@ -521,15 +529,7 @@ NV_STATUS nv_alloc_system_pages(
for (i = 0; i < alloc_num_pages; i++)
{
if (at->flags.unencrypted && (dev != NULL))
{
virt_addr = (unsigned long)dma_alloc_coherent(dev,
alloc_page_size,
&bus_addr,
gfp_mask);
at->flags.coherent = NV_TRUE;
}
else if (at->flags.node)
if (at->flags.node)
{
unsigned long ptr = 0ULL;
NV_ALLOC_PAGES_NODE(ptr, at->node_id, at->order, gfp_mask);
@@ -545,6 +545,11 @@ NV_STATUS nv_alloc_system_pages(
else
{
NV_GET_FREE_PAGES(virt_addr, at->order, gfp_mask);
// In CC, NV_GET_FREE_PAGES only allocates protected sysmem.
// To get unprotected sysmem, this memory is marked as unencrypted.
nv_set_memory_decrypted_zeroed(at->flags.unencrypted, virt_addr, 1 << at->order,
alloc_page_size);
}
if (virt_addr == 0)
@@ -563,6 +568,11 @@ NV_STATUS nv_alloc_system_pages(
for (sub_page_idx = 0; sub_page_idx < os_pages_in_page; sub_page_idx++)
{
unsigned long sub_page_virt_addr = virt_addr + sub_page_offset;
unsigned int base_page_idx = (i * os_pages_in_page) + sub_page_idx;
if (base_page_idx >= at->num_pages)
break;
phys_addr = nv_get_kern_phys_address(sub_page_virt_addr);
if (phys_addr == 0)
{
@@ -586,21 +596,10 @@ NV_STATUS nv_alloc_system_pages(
}
#endif
page_ptr = at->page_table[(i * os_pages_in_page) + sub_page_idx];
page_ptr = at->page_table[base_page_idx];
page_ptr->phys_addr = phys_addr;
page_ptr->virt_addr = sub_page_virt_addr;
//
// Use unencrypted dma_addr returned by dma_alloc_coherent() as
// nv_phys_to_dma() returns encrypted dma_addr when AMD SEV is enabled.
//
if (at->flags.coherent)
page_ptr->dma_addr = bus_addr;
else if (dev != NULL)
page_ptr->dma_addr = nv_phys_to_dma(dev, page_ptr->phys_addr);
else
page_ptr->dma_addr = page_ptr->phys_addr;
NV_MAYBE_RESERVE_PAGE(page_ptr);
sub_page_offset += PAGE_SIZE;
}
@@ -618,15 +617,12 @@ failed:
{
page_ptr = at->page_table[j * os_pages_in_page];
NV_MAYBE_UNRESERVE_PAGE(page_ptr);
if (at->flags.coherent)
{
dma_free_coherent(dev, alloc_page_size, (void *)page_ptr->virt_addr,
page_ptr->dma_addr);
}
else
{
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
}
// For unprotected sysmem in CC, memory is marked as unencrypted during allocation.
// NV_FREE_PAGES only deals with protected sysmem. Mark memory as encrypted and protected before free.
nv_set_memory_encrypted(at->flags.unencrypted, page_ptr->virt_addr, 1 << at->order);
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
}
}
@@ -639,7 +635,6 @@ void nv_free_system_pages(
{
nvidia_pte_t *page_ptr;
unsigned int i;
struct device *dev = at->dev;
unsigned int alloc_page_size = PAGE_SIZE << at->order;
unsigned int os_pages_in_page = alloc_page_size / PAGE_SIZE;
@@ -661,18 +656,47 @@ void nv_free_system_pages(
{
page_ptr = at->page_table[i];
if (at->flags.coherent)
{
dma_free_coherent(dev, alloc_page_size, (void *)page_ptr->virt_addr,
page_ptr->dma_addr);
}
else
{
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
}
// For unprotected sysmem in CC, memory is marked as unencrypted during allocation.
// NV_FREE_PAGES only deals with protected sysmem. Mark memory as encrypted and protected before free.
nv_set_memory_encrypted(at->flags.unencrypted, page_ptr->virt_addr, 1 << at->order);
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
}
}
static NvUPtr nv_vmap(struct page **pages, NvU32 page_count,
NvBool cached, NvBool unencrypted)
{
void *ptr;
pgprot_t prot = PAGE_KERNEL;
#if defined(NVCPU_X86_64)
#if defined(PAGE_KERNEL_NOENC)
if (unencrypted)
{
prot = cached ? nv_adjust_pgprot(PAGE_KERNEL_NOENC) :
nv_adjust_pgprot(NV_PAGE_KERNEL_NOCACHE_NOENC);
}
else
#endif
{
prot = cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE;
}
#elif defined(NVCPU_AARCH64)
prot = cached ? PAGE_KERNEL : NV_PGPROT_UNCACHED(PAGE_KERNEL);
#endif
/* All memory cached in PPC64LE; can't honor 'cached' input. */
ptr = vmap(pages, page_count, VM_MAP, prot);
NV_MEMDBG_ADD(ptr, page_count * PAGE_SIZE);
return (NvUPtr)ptr;
}
static void nv_vunmap(NvUPtr vaddr, NvU32 page_count)
{
vunmap((void *)vaddr);
NV_MEMDBG_REMOVE((void *)vaddr, page_count * PAGE_SIZE);
}
NvUPtr nv_vm_map_pages(
struct page **pages,
NvU32 count,