580.65.06

This commit is contained in:
Maneet Singh
2025-08-04 11:15:02 -07:00
parent d890313300
commit 307159f262
1315 changed files with 477791 additions and 279973 deletions

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2011-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -132,11 +132,16 @@ struct nvidia_p2p_page {
} registers;
} nvidia_p2p_page_t;
#define NVIDIA_P2P_PAGE_TABLE_VERSION 0x00010002
#define NVIDIA_P2P_PAGE_TABLE_VERSION 0x00020000
#define NVIDIA_P2P_PAGE_TABLE_VERSION_COMPATIBLE(p) \
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_PAGE_TABLE_VERSION)
/*
* Page Table Flags
*/
#define NVIDIA_P2P_PAGE_TABLE_FLAGS_CPU_CACHEABLE 0x1
typedef
struct nvidia_p2p_page_table {
uint32_t version;
@@ -144,6 +149,7 @@ struct nvidia_p2p_page_table {
struct nvidia_p2p_page **pages;
uint32_t entries;
uint8_t *gpu_uuid;
uint32_t flags;
} nvidia_p2p_page_table_t;
/*
@@ -153,6 +159,9 @@ struct nvidia_p2p_page_table {
*
* This API only supports pinned, GPU-resident memory, such as that provided
* by cudaMalloc().
* This API does not support Coherent Driver-based Memory Management(CDMM) mode.
* CDMM allows coherent GPU memory to be managed by the driver and not the OS.
* This is done by the driver not onlining the memory as a NUMA node.
*
* This API may sleep.
*
@@ -201,7 +210,7 @@ int nvidia_p2p_get_pages( uint64_t p2p_token, uint32_t va_space,
* accessible to a third-party device. The pages will persist until
* explicitly freed by nvidia_p2p_put_pages_persistent().
*
* Persistent GPU memory mappings are not supported on PowerPC,
* Persistent GPU memory mappings are not supported on
* MIG-enabled devices and vGPU.
*
* This API only supports pinned, GPU-resident memory, such as that provided

View File

@@ -242,6 +242,61 @@ err:
return 0;
}
/* acquire return code: 1 mine, 0 - not mine */
static int nv_mem_acquire_nc(unsigned long addr, size_t size, void *peer_mem_private_data,
char *peer_mem_name, void **client_context)
{
int ret = 0;
struct nv_mem_context *nv_mem_context;
nv_mem_context = kzalloc(sizeof *nv_mem_context, GFP_KERNEL);
if (!nv_mem_context)
/* Error case handled as not mine */
return 0;
nv_mem_context->pad1 = NV_MEM_CONTEXT_MAGIC;
nv_mem_context->page_virt_start = addr & GPU_PAGE_MASK;
nv_mem_context->page_virt_end = (addr + size + GPU_PAGE_SIZE - 1) & GPU_PAGE_MASK;
nv_mem_context->mapped_size = nv_mem_context->page_virt_end - nv_mem_context->page_virt_start;
nv_mem_context->pad2 = NV_MEM_CONTEXT_MAGIC;
#ifdef NVIDIA_P2P_CAP_GET_PAGES_PERSISTENT_API
ret = nvidia_p2p_get_pages_persistent(nv_mem_context->page_virt_start,
nv_mem_context->mapped_size,
&nv_mem_context->page_table, 0);
#else
ret = nvidia_p2p_get_pages(0, 0, nv_mem_context->page_virt_start, nv_mem_context->mapped_size,
&nv_mem_context->page_table, NULL, NULL);
#endif
if (ret < 0)
goto err;
#ifdef NVIDIA_P2P_CAP_GET_PAGES_PERSISTENT_API
ret = nvidia_p2p_put_pages_persistent(nv_mem_context->page_virt_start,
nv_mem_context->page_table, 0);
#else
ret = nvidia_p2p_put_pages(0, 0, nv_mem_context->page_virt_start,
nv_mem_context->page_table);
#endif
if (ret < 0) {
peer_err("nv_mem_acquire -- error %d while calling nvidia_p2p_put_pages()\n", ret);
goto err;
}
/* 1 means mine */
*client_context = nv_mem_context;
__module_get(THIS_MODULE);
return 1;
err:
memset(nv_mem_context, 0, sizeof(*nv_mem_context));
kfree(nv_mem_context);
/* Error case handled as not mine */
return 0;
}
static int nv_dma_map(struct sg_table *sg_head, void *context,
struct device *dma_device, int dmasync,
int *nmap)
@@ -477,7 +532,7 @@ static int nv_mem_get_pages_nc(unsigned long addr,
}
static struct peer_memory_client nv_mem_client_nc = {
.acquire = nv_mem_acquire,
.acquire = nv_mem_acquire_nc,
.get_pages = nv_mem_get_pages_nc,
.dma_map = nv_dma_map,
.dma_unmap = nv_dma_unmap,