530.30.02

This commit is contained in:
Andy Ritger
2023-02-28 11:12:44 -08:00
parent e598191e8e
commit 4397463e73
928 changed files with 124728 additions and 88525 deletions

View File

@@ -30,6 +30,10 @@
#include "uvm_va_block.h"
#include "uvm_va_range.h"
#include "uvm_va_space.h"
#include "uvm_kvmalloc.h"
#include "uvm_hal.h"
#include "uvm_push.h"
#include "uvm_processors.h"
// Pre-allocated array used for dma-to-virt translations
static uvm_reverse_map_t g_sysmem_translations[PAGES_PER_UVM_VA_BLOCK];
@@ -576,3 +580,640 @@ NV_STATUS uvm_test_pmm_sysmem(UVM_TEST_PMM_SYSMEM_PARAMS *params, struct file *f
return status;
}
static NV_STATUS cpu_chunk_map_on_cpu(uvm_cpu_chunk_t *chunk, void **cpu_addr)
{
struct page **pages;
uvm_chunk_size_t chunk_size = uvm_cpu_chunk_get_size(chunk);
size_t num_pages = uvm_cpu_chunk_num_pages(chunk);
NV_STATUS status = NV_OK;
UVM_ASSERT(cpu_addr);
// Map the CPU chunk on the CPU.
if (chunk_size > PAGE_SIZE) {
size_t i;
pages = uvm_kvmalloc(num_pages * sizeof(*pages));
if (!pages)
return NV_ERR_NO_MEMORY;
for (i = 0; i < num_pages; i++)
pages[i] = chunk->page + i;
}
else {
pages = &chunk->page;
}
*cpu_addr = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
if (!*cpu_addr)
status = NV_ERR_NO_MEMORY;
if (chunk_size > PAGE_SIZE)
uvm_kvfree(pages);
return status;
}
static NV_STATUS test_cpu_chunk_mapping_access(uvm_cpu_chunk_t *chunk, uvm_gpu_t *gpu)
{
NvU64 dma_addr;
uvm_gpu_phys_address_t gpu_phys_addr;
uvm_gpu_address_t gpu_addr;
uvm_push_t push;
NvU32 *cpu_addr;
uvm_chunk_size_t chunk_size = uvm_cpu_chunk_get_size(chunk);
size_t i;
NV_STATUS status = NV_OK;
TEST_NV_CHECK_RET(cpu_chunk_map_on_cpu(chunk, (void **)&cpu_addr));
memset(cpu_addr, 0, chunk_size);
dma_addr = uvm_cpu_chunk_get_gpu_phys_addr(chunk, gpu->parent);
gpu_phys_addr = uvm_gpu_phys_address(UVM_APERTURE_SYS, dma_addr);
if (uvm_mmu_gpu_needs_dynamic_sysmem_mapping(gpu))
gpu_addr = uvm_gpu_address_virtual_from_sysmem_phys(gpu, gpu_phys_addr.address);
else
gpu_addr = uvm_gpu_address_from_phys(gpu_phys_addr);
TEST_NV_CHECK_GOTO(uvm_push_begin_acquire(gpu->channel_manager,
UVM_CHANNEL_TYPE_GPU_TO_CPU,
NULL,
&push,
"GPU -> CPU {%s, %llx} %u bytes",
uvm_gpu_address_aperture_string(gpu_addr),
gpu_addr.address,
chunk_size),
done);
gpu->parent->ce_hal->memset_4(&push, gpu_addr, 0xdeadc0de, chunk_size);
TEST_NV_CHECK_GOTO(uvm_push_end_and_wait(&push), done);
for (i = 0; i < chunk_size / sizeof(*cpu_addr); i++) {
if (cpu_addr[i] != 0xdeadc0de) {
UVM_TEST_PRINT("GPU write of {%s, 0x%llx} %u bytes expected pattern 0x%08x, but offset %zu is 0x%08x\n",
uvm_gpu_address_aperture_string(gpu_addr),
gpu_addr.address,
chunk_size,
0xdeadc0de,
i * sizeof(*cpu_addr),
cpu_addr[i]);
status = NV_ERR_INVALID_STATE;
break;
}
}
done:
vunmap(cpu_addr);
return status;
}
static NV_STATUS test_cpu_chunk_alloc(uvm_chunk_size_t size,
uvm_cpu_chunk_alloc_flags_t flags,
uvm_cpu_chunk_t **out_chunk)
{
uvm_cpu_chunk_t *chunk;
NV_STATUS status = NV_OK;
size_t i;
UVM_ASSERT(out_chunk);
// It is possible that the allocation fails due to lack of large pages
// rather than an API issue, which will result in a false negative.
// However, that should be very rare.
TEST_NV_CHECK_RET(uvm_cpu_chunk_alloc(size, flags, &chunk));
// Check general state of the chunk:
// - chunk should be a physical chunk,
// - chunk should have the correct size,
// - chunk should have the correct number of base pages, and
TEST_CHECK_GOTO(uvm_cpu_chunk_is_physical(chunk), done);
TEST_CHECK_GOTO(uvm_cpu_chunk_get_size(chunk) == size, done);
TEST_CHECK_GOTO(uvm_cpu_chunk_num_pages(chunk) == size / PAGE_SIZE, done);
if (flags & UVM_CPU_CHUNK_ALLOC_FLAGS_ZERO) {
NvU64 *cpu_addr;
TEST_NV_CHECK_GOTO(cpu_chunk_map_on_cpu(chunk, (void **)&cpu_addr), done);
for (i = 0; i < size / sizeof(*cpu_addr); i++)
TEST_CHECK_GOTO(cpu_addr[i] == 0, done);
vunmap(cpu_addr);
}
for (i = 0; i < size / PAGE_SIZE; i++) {
if (flags & UVM_CPU_CHUNK_ALLOC_FLAGS_ZERO)
TEST_CHECK_GOTO(uvm_cpu_chunk_is_dirty(chunk, i), done);
else
TEST_CHECK_GOTO(!uvm_cpu_chunk_is_dirty(chunk, i), done);
}
done:
if (status == NV_OK)
*out_chunk = chunk;
else
uvm_cpu_chunk_free(chunk);
return status;
}
static NV_STATUS test_cpu_chunk_mapping_basic_verify(uvm_gpu_t *gpu,
uvm_cpu_chunk_alloc_flags_t flags,
uvm_chunk_size_t size)
{
uvm_cpu_chunk_t *chunk;
uvm_cpu_physical_chunk_t *phys_chunk;
NvU64 dma_addr;
NV_STATUS status = NV_OK;
TEST_NV_CHECK_RET(test_cpu_chunk_alloc(size, flags, &chunk));
phys_chunk = uvm_cpu_chunk_to_physical(chunk);
// Check state of the physical chunk:
// - gpu_mappings.max_entries should be 1 (for the static entry),
// - gpu_mappings.dma_addrs_mask should be 0.
// - no GPU mapping address.
TEST_CHECK_GOTO(phys_chunk->gpu_mappings.max_entries == 1, done);
TEST_CHECK_GOTO(uvm_processor_mask_get_gpu_count(&phys_chunk->gpu_mappings.dma_addrs_mask) == 0, done);
TEST_CHECK_GOTO(uvm_cpu_chunk_get_gpu_phys_addr(chunk, gpu->parent) == 0, done);
TEST_NV_CHECK_GOTO(uvm_cpu_chunk_map_gpu(chunk, gpu), done);
// Test basic access.
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(chunk, gpu), done);
// Test double map is harmless.
dma_addr = uvm_cpu_chunk_get_gpu_phys_addr(chunk, gpu->parent);
TEST_NV_CHECK_GOTO(uvm_cpu_chunk_map_gpu(chunk, gpu), done);
TEST_CHECK_GOTO(uvm_cpu_chunk_get_gpu_phys_addr(chunk, gpu->parent) == dma_addr, done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(chunk, gpu), done);
// Test unmap, remap.
uvm_cpu_chunk_unmap_gpu_phys(chunk, gpu->parent);
TEST_CHECK_GOTO(uvm_cpu_chunk_get_gpu_phys_addr(chunk, gpu->parent) == 0, done);
TEST_NV_CHECK_GOTO(uvm_cpu_chunk_map_gpu(chunk, gpu), done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(chunk, gpu), done);
done:
// Test free with mapped GPUs still works.
uvm_cpu_chunk_free(chunk);
return status;;
}
static NV_STATUS test_cpu_chunk_mapping_basic(uvm_gpu_t *gpu, uvm_cpu_chunk_alloc_flags_t flags)
{
uvm_chunk_sizes_mask_t chunk_sizes = uvm_cpu_chunk_get_allocation_sizes();
uvm_chunk_size_t size;
for_each_chunk_size(size, chunk_sizes)
TEST_NV_CHECK_RET(test_cpu_chunk_mapping_basic_verify(gpu, flags, size));
return NV_OK;
}
static NV_STATUS test_cpu_chunk_mapping_array(uvm_gpu_t *gpu1, uvm_gpu_t *gpu2, uvm_gpu_t *gpu3)
{
NV_STATUS status = NV_OK;
uvm_cpu_chunk_t *chunk;
uvm_cpu_physical_chunk_t *phys_chunk;
NvU64 dma_addr_gpu2;
TEST_NV_CHECK_RET(test_cpu_chunk_alloc(PAGE_SIZE, UVM_CPU_CHUNK_ALLOC_FLAGS_NONE, &chunk));
phys_chunk = uvm_cpu_chunk_to_physical(chunk);
TEST_NV_CHECK_GOTO(uvm_cpu_chunk_map_gpu(chunk, gpu2), done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(chunk, gpu2), done);
TEST_NV_CHECK_GOTO(uvm_cpu_chunk_map_gpu(chunk, gpu3), done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(chunk, gpu2), done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(chunk, gpu3), done);
dma_addr_gpu2 = uvm_cpu_chunk_get_gpu_phys_addr(chunk, gpu2->parent);
uvm_cpu_chunk_unmap_gpu_phys(chunk, gpu3->parent);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(chunk, gpu2), done);
TEST_NV_CHECK_GOTO(uvm_cpu_chunk_map_gpu(chunk, gpu1), done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(chunk, gpu1), done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(chunk, gpu2), done);
// DMA mapping addresses for different GPUs live in different IOMMU spaces,
// so it would be perfectly legal for them to have the same IOVA, and even
// if they lived in the same space we freed GPU3's address so it would be
// available for reuse.
// What we need to ensure is that GPU2's address didn't change after we map
// GPU1. It's true that we may get a false negative if both addresses
// happened to alias and we had a bug in how the addresses are shifted in
// the dense array, but that's better than intermittent failure.
TEST_CHECK_GOTO(uvm_cpu_chunk_get_gpu_phys_addr(chunk, gpu2->parent) == dma_addr_gpu2, done);
done:
uvm_cpu_chunk_free(chunk);
return status;
}
static NV_STATUS do_test_cpu_chunk_split_and_merge(uvm_cpu_chunk_t *chunk, uvm_gpu_t *gpu)
{
NV_STATUS status = NV_OK;
uvm_chunk_size_t size = uvm_cpu_chunk_get_size(chunk);
uvm_chunk_sizes_mask_t alloc_sizes = uvm_cpu_chunk_get_allocation_sizes();
size_t num_split_chunks;
uvm_cpu_chunk_t **split_chunks;
uvm_cpu_chunk_t *merged_chunk;
uvm_chunk_size_t split_size;
NvU64 phys_dma_addr;
size_t map_chunk;
size_t i;
split_size = uvm_chunk_find_prev_size(alloc_sizes, size);
UVM_ASSERT(split_size != UVM_CHUNK_SIZE_INVALID);
num_split_chunks = size / split_size;
split_chunks = uvm_kvmalloc_zero(num_split_chunks * sizeof(*split_chunks));
if (!split_chunks)
return NV_ERR_NO_MEMORY;
TEST_NV_CHECK_GOTO(uvm_cpu_chunk_map_gpu(chunk, gpu), done_free);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(chunk, gpu), done_free);
uvm_cpu_chunk_unmap_gpu_phys(chunk, gpu->parent);
TEST_NV_CHECK_GOTO(uvm_cpu_chunk_split(chunk, split_chunks), done_free);
TEST_CHECK_GOTO(nv_kref_read(&chunk->refcount) == num_split_chunks, done);
for (i = 0; i < num_split_chunks; i++) {
TEST_CHECK_GOTO(split_chunks[i], done);
TEST_CHECK_GOTO(uvm_cpu_chunk_is_logical(split_chunks[i]), done);
TEST_CHECK_GOTO(uvm_cpu_chunk_get_size(split_chunks[i]) == split_size, done);
TEST_NV_CHECK_GOTO(uvm_cpu_chunk_map_gpu(split_chunks[i], gpu), done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(split_chunks[i], gpu), done);
}
// Test CPU chunk merging.
merged_chunk = uvm_cpu_chunk_merge(split_chunks);
TEST_CHECK_GOTO(uvm_cpu_chunk_get_size(merged_chunk) == size, done_free);
TEST_CHECK_GOTO(merged_chunk == chunk, done_free);
// Since all logical chunks were mapped, the entire merged chunk should
// be accessible without needing to map it.
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(merged_chunk, gpu), done_free);
// Test that GPU mappings are transferred after a split
phys_dma_addr = uvm_cpu_chunk_get_gpu_phys_addr(chunk, gpu->parent);
TEST_NV_CHECK_GOTO(uvm_cpu_chunk_split(chunk, split_chunks), done_free);
for (i = 0; i < num_split_chunks; i++) {
NvU64 dma_addr;
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(split_chunks[i], gpu), done);
dma_addr = uvm_cpu_chunk_get_gpu_phys_addr(split_chunks[i], gpu->parent);
TEST_CHECK_GOTO(dma_addr == phys_dma_addr + (i * split_size), done);
uvm_cpu_chunk_unmap_gpu_phys(split_chunks[i], gpu->parent);
}
// Test that mapping one logical chunk does not affect others.
map_chunk = num_split_chunks / 2;
TEST_NV_CHECK_GOTO(uvm_cpu_chunk_map_gpu(split_chunks[map_chunk], gpu), done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(split_chunks[map_chunk], gpu), done);
for (i = 0; i < num_split_chunks; i++) {
if (i != map_chunk)
TEST_CHECK_GOTO(uvm_cpu_chunk_get_gpu_phys_addr(split_chunks[i], gpu->parent) == 0, done);
}
if (split_size > PAGE_SIZE) {
for (i = 0; i < num_split_chunks; i++)
TEST_NV_CHECK_GOTO(do_test_cpu_chunk_split_and_merge(split_chunks[i], gpu), done);
}
// Map all chunks before merging.
for (i = 0; i < num_split_chunks; i++)
TEST_NV_CHECK_GOTO(uvm_cpu_chunk_map_gpu(split_chunks[i], gpu), done);
// Test CPU chunk merging.
merged_chunk = uvm_cpu_chunk_merge(split_chunks);
// At this point, all split chunks have been merged.
num_split_chunks = 0;
TEST_CHECK_GOTO(uvm_cpu_chunk_get_size(merged_chunk) == size, done_free);
TEST_CHECK_GOTO(merged_chunk == chunk, done_free);
// Since all logical chunks were mapped, the entire merged chunk should
// be accessible without needing to map it.
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_access(merged_chunk, gpu), done_free);
done:
for (i = 0; i < num_split_chunks; i++)
uvm_cpu_chunk_free(split_chunks[i]);
done_free:
uvm_kvfree(split_chunks);
return status;
}
static NV_STATUS test_cpu_chunk_split_and_merge(uvm_gpu_t *gpu)
{
uvm_chunk_sizes_mask_t alloc_sizes = uvm_cpu_chunk_get_allocation_sizes();
uvm_chunk_size_t size;
size = uvm_chunk_find_next_size(alloc_sizes, PAGE_SIZE);
for_each_chunk_size_from(size, alloc_sizes) {
uvm_cpu_chunk_t *chunk;
NV_STATUS status;
TEST_NV_CHECK_RET(test_cpu_chunk_alloc(size, UVM_CPU_CHUNK_ALLOC_FLAGS_NONE, &chunk));
status = do_test_cpu_chunk_split_and_merge(chunk, gpu);
uvm_cpu_chunk_free(chunk);
if (status != NV_OK)
return status;
}
return NV_OK;
}
static NV_STATUS test_cpu_chunk_dirty_split(uvm_cpu_chunk_t *chunk)
{
uvm_chunk_size_t size = uvm_cpu_chunk_get_size(chunk);
uvm_chunk_size_t split_size;
uvm_chunk_sizes_mask_t alloc_sizes = uvm_cpu_chunk_get_allocation_sizes();
uvm_cpu_chunk_t **split_chunks;
uvm_cpu_chunk_t *merged_chunk;
size_t num_pages = size / PAGE_SIZE;
size_t num_split_chunks;
size_t num_split_chunk_pages;
size_t i;
NV_STATUS status = NV_OK;
split_size = uvm_chunk_find_prev_size(alloc_sizes, size);
UVM_ASSERT(split_size != UVM_CHUNK_SIZE_INVALID);
num_split_chunks = size / split_size;
num_split_chunk_pages = split_size / PAGE_SIZE;
split_chunks = uvm_kvmalloc_zero(num_split_chunks * sizeof(*split_chunks));
if (!split_chunks)
return NV_ERR_NO_MEMORY;
TEST_NV_CHECK_GOTO(uvm_cpu_chunk_split(chunk, split_chunks), done_free);
// The parent chunk had only the even pages set as dirty. Make sure
// that's still the case after the split.
for (i = 0; i < num_split_chunks; i++) {
uvm_page_index_t chunk_page;
for (chunk_page = 0; chunk_page < num_split_chunk_pages; chunk_page++) {
if (((i * num_split_chunk_pages) + chunk_page) % 2)
TEST_CHECK_GOTO(!uvm_cpu_chunk_is_dirty(split_chunks[i], chunk_page), done);
else
TEST_CHECK_GOTO(uvm_cpu_chunk_is_dirty(split_chunks[i], chunk_page), done);
}
}
if (split_size > PAGE_SIZE) {
for (i = 0; i < num_split_chunks; i++)
TEST_NV_CHECK_GOTO(test_cpu_chunk_dirty_split(split_chunks[i]), done);
}
merged_chunk = uvm_cpu_chunk_merge(split_chunks);
num_split_chunks = 0;
for (i = 0; i < num_pages; i++) {
if (i % 2)
TEST_CHECK_GOTO(!uvm_cpu_chunk_is_dirty(merged_chunk, i), done_free);
else
TEST_CHECK_GOTO(uvm_cpu_chunk_is_dirty(merged_chunk, i), done_free);
}
done:
for (i = 0; i < num_split_chunks; i++)
uvm_cpu_chunk_free(split_chunks[i]);
done_free:
uvm_kvfree(split_chunks);
return status;
}
static NV_STATUS test_cpu_chunk_dirty(uvm_gpu_t *gpu)
{
NV_STATUS status = NV_OK;
uvm_cpu_chunk_t *chunk;
uvm_chunk_size_t size;
uvm_chunk_sizes_mask_t alloc_sizes = uvm_cpu_chunk_get_allocation_sizes();
size_t i;
for_each_chunk_size(size, alloc_sizes) {
uvm_cpu_physical_chunk_t *phys_chunk;
size_t num_pages;
TEST_NV_CHECK_RET(test_cpu_chunk_alloc(size, UVM_CPU_CHUNK_ALLOC_FLAGS_NONE, &chunk));
phys_chunk = uvm_cpu_chunk_to_physical(chunk);
num_pages = uvm_cpu_chunk_num_pages(chunk);
for (i = 0; i < num_pages; i++)
TEST_CHECK_GOTO(!uvm_cpu_chunk_is_dirty(chunk, i), done);
if (size > PAGE_SIZE)
TEST_CHECK_GOTO(bitmap_empty(phys_chunk->dirty_bitmap, num_pages), done);
uvm_cpu_chunk_free(chunk);
TEST_NV_CHECK_RET(test_cpu_chunk_alloc(size, UVM_CPU_CHUNK_ALLOC_FLAGS_ZERO, &chunk));
phys_chunk = uvm_cpu_chunk_to_physical(chunk);
num_pages = uvm_cpu_chunk_num_pages(chunk);
// Allocating the chunk with UVM_CPU_CHUNK_ALLOC_FLAGS_ZERO will set the
// entire chunk as dirty.
for (i = 0; i < num_pages; i++)
TEST_CHECK_GOTO(uvm_cpu_chunk_is_dirty(chunk, i), done);
if (size > PAGE_SIZE)
TEST_CHECK_GOTO(bitmap_full(phys_chunk->dirty_bitmap, num_pages), done);
// For chunks larger than PAGE_SIZE, marking individual pages in a
// physical chunk should not affect the entire chunk.
for (i = 0; i < num_pages; i++) {
uvm_cpu_chunk_mark_clean(chunk, i);
TEST_CHECK_GOTO(!uvm_cpu_chunk_is_dirty(chunk, i), done);
if (size > PAGE_SIZE) {
TEST_CHECK_GOTO(bitmap_empty(phys_chunk->dirty_bitmap, i + 1), done);
TEST_CHECK_GOTO(bitmap_weight(phys_chunk->dirty_bitmap, num_pages) == num_pages - (i + 1), done);
}
}
for (i = 0; i < num_pages; i++) {
uvm_cpu_chunk_mark_dirty(chunk, i);
TEST_CHECK_GOTO(uvm_cpu_chunk_is_dirty(chunk, i), done);
if (size > PAGE_SIZE) {
TEST_CHECK_GOTO(bitmap_full(phys_chunk->dirty_bitmap, i + 1), done);
TEST_CHECK_GOTO(bitmap_weight(phys_chunk->dirty_bitmap, num_pages) == i + 1, done);
}
}
// Leave only even pages as dirty
for (i = 1; i < num_pages; i += 2)
uvm_cpu_chunk_mark_clean(chunk, i);
for (i = 0; i < num_pages; i++) {
if (i % 2) {
TEST_CHECK_GOTO(!uvm_cpu_chunk_is_dirty(chunk, i), done);
if (size > PAGE_SIZE)
TEST_CHECK_GOTO(!test_bit(i, phys_chunk->dirty_bitmap), done);
}
else {
TEST_CHECK_GOTO(uvm_cpu_chunk_is_dirty(chunk, i), done);
if (size > PAGE_SIZE)
TEST_CHECK_GOTO(test_bit(i, phys_chunk->dirty_bitmap), done);
}
}
if (size > PAGE_SIZE)
TEST_NV_CHECK_GOTO(test_cpu_chunk_dirty_split(chunk), done);
done:
uvm_cpu_chunk_free(chunk);
if (status != NV_OK)
break;
}
return status;
}
NV_STATUS do_test_cpu_chunk_free(uvm_cpu_chunk_t *chunk, uvm_va_space_t *va_space, uvm_processor_mask_t *test_gpus)
{
NV_STATUS status = NV_OK;
uvm_cpu_chunk_t **split_chunks;
uvm_chunk_sizes_mask_t alloc_sizes = uvm_cpu_chunk_get_allocation_sizes();
size_t size = uvm_cpu_chunk_get_size(chunk);
uvm_chunk_size_t split_size = uvm_chunk_find_prev_size(alloc_sizes, size);
size_t num_split_chunks = size / split_size;
uvm_gpu_t *gpu;
size_t i;
size_t j;
split_chunks = uvm_kvmalloc_zero(num_split_chunks * sizeof(*split_chunks));
if (!split_chunks) {
UVM_TEST_PRINT("Failed to allocate split chunk array memory");
status = NV_ERR_NO_MEMORY;
goto done_free;
}
TEST_NV_CHECK_GOTO(uvm_cpu_chunk_split(chunk, split_chunks), done_free);
// The caller does not free the input chunk.
// So, we have to do it in this function. However, beyond this point
// the input chunk will be freed by freeing the split chunks.
chunk = NULL;
// Map every other chunk.
// The call to uvm_cpu_chunk_unmap_gpu_phys() is here in case this is part
// of a double split (see below). In that case, the parent chunk would be
// either mapped or unmapped.
//
// If it is mapped, we have to unmap the subchunks in
// order for the mapping check below to succeed. If it is unmapped, the
// calls are noops.
for (i = 0; i < num_split_chunks; i++) {
for_each_va_space_gpu_in_mask(gpu, va_space, test_gpus) {
if (i & (1 << uvm_id_gpu_index(gpu->id)))
TEST_NV_CHECK_GOTO(uvm_cpu_chunk_map_gpu(split_chunks[i], gpu), done);
else
uvm_cpu_chunk_unmap_gpu_phys(split_chunks[i], gpu->parent);
}
}
// Do a double split if we can
if (split_size > PAGE_SIZE) {
size_t chunk_to_be_resplit;
// Test an even (mapped) chunk.
chunk_to_be_resplit = num_split_chunks / 2;
TEST_NV_CHECK_GOTO(do_test_cpu_chunk_free(split_chunks[chunk_to_be_resplit], va_space, test_gpus), done);
// The chunk would have been freed by do_test_cpu_chunk_free().
split_chunks[chunk_to_be_resplit] = NULL;
// Test an odd (unmapped) chunk.
chunk_to_be_resplit += 1;
TEST_NV_CHECK_GOTO(do_test_cpu_chunk_free(split_chunks[chunk_to_be_resplit], va_space, test_gpus), done);
split_chunks[chunk_to_be_resplit] = NULL;
}
for (i = 0; i < num_split_chunks; i++) {
if (!split_chunks[i])
continue;
uvm_cpu_chunk_free(split_chunks[i]);
split_chunks[i] = NULL;
for (j = i + 1; j < num_split_chunks; j++) {
if (!split_chunks[j])
continue;
TEST_CHECK_GOTO(uvm_cpu_chunk_is_logical(split_chunks[j]), done);
TEST_CHECK_GOTO(uvm_cpu_chunk_get_size(split_chunks[j]) == split_size, done);
for_each_va_space_gpu_in_mask(gpu, va_space, test_gpus) {
if (j & (1 << uvm_id_gpu_index(gpu->id)))
TEST_CHECK_GOTO(uvm_cpu_chunk_get_gpu_phys_addr(split_chunks[j], gpu->parent), done);
else
TEST_CHECK_GOTO(!uvm_cpu_chunk_get_gpu_phys_addr(split_chunks[j], gpu->parent), done);
}
}
}
done:
for (i = 0; i < num_split_chunks; i++) {
if (split_chunks[i])
uvm_cpu_chunk_free(split_chunks[i]);
}
done_free:
if (chunk)
uvm_cpu_chunk_free(chunk);
uvm_kvfree(split_chunks);
return status;
}
NV_STATUS test_cpu_chunk_free(uvm_va_space_t *va_space, uvm_processor_mask_t *test_gpus)
{
uvm_cpu_chunk_t *chunk;
uvm_chunk_sizes_mask_t alloc_sizes = uvm_cpu_chunk_get_allocation_sizes();
size_t size = uvm_chunk_find_next_size(alloc_sizes, PAGE_SIZE);
for_each_chunk_size_from(size, alloc_sizes) {
TEST_NV_CHECK_RET(test_cpu_chunk_alloc(size, UVM_CPU_CHUNK_ALLOC_FLAGS_NONE, &chunk));
TEST_NV_CHECK_RET(do_test_cpu_chunk_free(chunk, va_space, test_gpus));
}
return NV_OK;
}
NV_STATUS uvm_test_cpu_chunk_api(UVM_TEST_CPU_CHUNK_API_PARAMS *params, struct file *filp)
{
uvm_va_space_t *va_space = uvm_va_space_get(filp);
uvm_processor_mask_t test_gpus;
uvm_gpu_t *gpu;
NV_STATUS status = NV_OK;
uvm_va_space_down_read(va_space);
uvm_processor_mask_and(&test_gpus,
&va_space->registered_gpus,
&va_space->accessible_from[uvm_id_value(UVM_ID_CPU)]);
for_each_va_space_gpu_in_mask(gpu, va_space, &test_gpus) {
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_basic(gpu, UVM_CPU_CHUNK_ALLOC_FLAGS_NONE), done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_basic(gpu, UVM_CPU_CHUNK_ALLOC_FLAGS_ZERO), done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_split_and_merge(gpu), done);
TEST_NV_CHECK_GOTO(test_cpu_chunk_dirty(gpu), done);
}
TEST_NV_CHECK_GOTO(test_cpu_chunk_free(va_space, &test_gpus), done);
if (uvm_processor_mask_get_gpu_count(&test_gpus) >= 3) {
uvm_gpu_t *gpu2, *gpu3;
gpu = uvm_processor_mask_find_first_va_space_gpu(&test_gpus, va_space);
gpu2 = uvm_processor_mask_find_next_va_space_gpu(&test_gpus, va_space, gpu);
gpu3 = uvm_processor_mask_find_next_va_space_gpu(&test_gpus, va_space, gpu2);
TEST_NV_CHECK_GOTO(test_cpu_chunk_mapping_array(gpu, gpu2, gpu3), done);
}
done:
uvm_va_space_up_read(va_space);
return status;
}