mirror of
https://github.com/NVIDIA/open-gpu-kernel-modules.git
synced 2026-01-27 03:29:47 +00:00
560.28.03
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -25,9 +25,21 @@
|
||||
#define __DETECT_SELF_HOSTED_H__
|
||||
|
||||
// PCI devIds 0x2340-0x237f are for Self-Hosted Hopper
|
||||
static inline int pci_devid_is_self_hosted(unsigned short devid)
|
||||
static inline int pci_devid_is_self_hosted_hopper(unsigned short devid)
|
||||
{
|
||||
return devid >= 0x2340 && devid <= 0x237f;
|
||||
}
|
||||
|
||||
// PCI devIds 0x2940-0x297f are for Self-Hosted Blackwell
|
||||
static inline int pci_devid_is_self_hosted_blackwell(unsigned short devid)
|
||||
{
|
||||
return devid >= 0x2940 && devid <= 0x297f;
|
||||
}
|
||||
|
||||
static inline int pci_devid_is_self_hosted(unsigned short devid)
|
||||
{
|
||||
return pci_devid_is_self_hosted_hopper(devid) ||
|
||||
pci_devid_is_self_hosted_blackwell(devid);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -159,7 +159,14 @@ static int lkca_aead_internal(struct crypto_aead *aead,
|
||||
}
|
||||
|
||||
if (rc != 0) {
|
||||
pr_info("Encryption FAILED\n");
|
||||
if (enc) {
|
||||
pr_info("aead.c: Encryption failed with error %i\n", rc);
|
||||
} else {
|
||||
pr_info("aead.c: Decryption failed with error %i\n", rc);
|
||||
if (rc == -EBADMSG) {
|
||||
pr_info("aead.c: Authentication tag mismatch!\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
*data_out_size = data_in_size;
|
||||
|
||||
42
kernel-open/nvidia/libspdm_internal_crypt_lib.c
Normal file
42
kernel-open/nvidia/libspdm_internal_crypt_lib.c
Normal file
@@ -0,0 +1,42 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Comments, prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
|
||||
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
|
||||
*/
|
||||
|
||||
#include "os-interface.h"
|
||||
#include "internal_crypt_lib.h"
|
||||
#include "library/cryptlib.h"
|
||||
|
||||
bool libspdm_check_crypto_backend(void)
|
||||
{
|
||||
#ifdef USE_LKCA
|
||||
nv_printf(NV_DBG_INFO, "libspdm_check_crypto_backend: LKCA wrappers found.\n");
|
||||
nv_printf(NV_DBG_INFO, "libspdm_check_crypto_backend: LKCA calls may still fail if modules have not been loaded!\n");
|
||||
return true;
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "libspdm_check_crypto_backend: Error - libspdm expects LKCA but found stubs!\n");
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@
|
||||
|
||||
#include "ioctl_nvswitch.h"
|
||||
|
||||
const static struct
|
||||
static const struct
|
||||
{
|
||||
NvlStatus status;
|
||||
int err;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -1089,40 +1089,3 @@ void NV_API_CALL nv_dma_release_sgt
|
||||
{
|
||||
}
|
||||
#endif /* NV_LINUX_DMA_BUF_H_PRESENT && NV_DRM_AVAILABLE && NV_DRM_DRM_GEM_H_PRESENT */
|
||||
|
||||
#if defined(NV_LINUX_DMA_BUF_H_PRESENT)
|
||||
#endif /* NV_LINUX_DMA_BUF_H_PRESENT */
|
||||
|
||||
#ifndef IMPORT_DMABUF_FUNCTIONS_DEFINED
|
||||
|
||||
NV_STATUS NV_API_CALL nv_dma_import_dma_buf
|
||||
(
|
||||
nv_dma_device_t *dma_dev,
|
||||
struct dma_buf *dma_buf,
|
||||
NvU32 *size,
|
||||
struct sg_table **sgt,
|
||||
nv_dma_buf_t **import_priv
|
||||
)
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_dma_import_from_fd
|
||||
(
|
||||
nv_dma_device_t *dma_dev,
|
||||
NvS32 fd,
|
||||
NvU32 *size,
|
||||
struct sg_table **sgt,
|
||||
nv_dma_buf_t **import_priv
|
||||
)
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_dma_release_dma_buf
|
||||
(
|
||||
nv_dma_buf_t *import_priv
|
||||
)
|
||||
{
|
||||
}
|
||||
#endif /* !IMPORT_DMABUF_FUNCTIONS_DEFINED */
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -1308,3 +1308,129 @@ nv_dma_buf_export(
|
||||
#endif // CONFIG_DMA_SHARED_BUFFER
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_dma_import_dma_buf
|
||||
(
|
||||
nv_dma_device_t *dma_dev,
|
||||
struct dma_buf *dma_buf,
|
||||
NvU32 *size,
|
||||
struct sg_table **sgt,
|
||||
nv_dma_buf_t **import_priv
|
||||
)
|
||||
{
|
||||
#if defined(CONFIG_DMA_SHARED_BUFFER)
|
||||
nv_dma_buf_t *nv_dma_buf = NULL;
|
||||
struct dma_buf_attachment *dma_attach = NULL;
|
||||
struct sg_table *map_sgt = NULL;
|
||||
NV_STATUS status = NV_OK;
|
||||
|
||||
if ((dma_dev == NULL) ||
|
||||
(dma_buf == NULL) ||
|
||||
(size == NULL) ||
|
||||
(sgt == NULL) ||
|
||||
(import_priv == NULL))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "Import arguments are NULL!\n");
|
||||
return NV_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
status = os_alloc_mem((void **)&nv_dma_buf, sizeof(*nv_dma_buf));
|
||||
if (status != NV_OK)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "Can't allocate mem for nv_buf!\n");
|
||||
return status;
|
||||
}
|
||||
|
||||
get_dma_buf(dma_buf);
|
||||
|
||||
dma_attach = dma_buf_attach(dma_buf, dma_dev->dev);
|
||||
if (IS_ERR_OR_NULL(dma_attach))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "Can't attach dma_buf!\n");
|
||||
status = NV_ERR_OPERATING_SYSTEM;
|
||||
|
||||
goto dma_buf_attach_fail;
|
||||
}
|
||||
|
||||
map_sgt = dma_buf_map_attachment(dma_attach, DMA_BIDIRECTIONAL);
|
||||
if (IS_ERR_OR_NULL(map_sgt))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "Can't map dma attachment!\n");
|
||||
status = NV_ERR_OPERATING_SYSTEM;
|
||||
|
||||
goto dma_buf_map_fail;
|
||||
}
|
||||
|
||||
nv_dma_buf->dma_buf = dma_buf;
|
||||
nv_dma_buf->dma_attach = dma_attach;
|
||||
nv_dma_buf->sgt = map_sgt;
|
||||
|
||||
*size = dma_buf->size;
|
||||
*import_priv = nv_dma_buf;
|
||||
*sgt = map_sgt;
|
||||
|
||||
return NV_OK;
|
||||
|
||||
dma_buf_map_fail:
|
||||
dma_buf_detach(dma_buf, dma_attach);
|
||||
dma_buf_attach_fail:
|
||||
os_free_mem(nv_dma_buf);
|
||||
dma_buf_put(dma_buf);
|
||||
|
||||
return status;
|
||||
#else
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif // CONFIG_DMA_SHARED_BUFFER
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_dma_import_from_fd
|
||||
(
|
||||
nv_dma_device_t *dma_dev,
|
||||
NvS32 fd,
|
||||
NvU32 *size,
|
||||
struct sg_table **sgt,
|
||||
nv_dma_buf_t **import_priv
|
||||
)
|
||||
{
|
||||
#if defined(CONFIG_DMA_SHARED_BUFFER)
|
||||
struct dma_buf *dma_buf = dma_buf_get(fd);
|
||||
NV_STATUS status;
|
||||
|
||||
if (IS_ERR_OR_NULL(dma_buf))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "Can't get dma_buf from fd!\n");
|
||||
return NV_ERR_OPERATING_SYSTEM;
|
||||
}
|
||||
|
||||
status = nv_dma_import_dma_buf(dma_dev,
|
||||
dma_buf, size, sgt, import_priv);
|
||||
dma_buf_put(dma_buf);
|
||||
|
||||
return status;
|
||||
#else
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif // CONFIG_DMA_SHARED_BUFFER
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_dma_release_dma_buf
|
||||
(
|
||||
nv_dma_buf_t *import_priv
|
||||
)
|
||||
{
|
||||
#if defined(CONFIG_DMA_SHARED_BUFFER)
|
||||
nv_dma_buf_t *nv_dma_buf = NULL;
|
||||
|
||||
if (import_priv == NULL)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
nv_dma_buf = (nv_dma_buf_t *)import_priv;
|
||||
dma_buf_unmap_attachment(nv_dma_buf->dma_attach, nv_dma_buf->sgt,
|
||||
DMA_BIDIRECTIONAL);
|
||||
dma_buf_detach(nv_dma_buf->dma_buf, nv_dma_buf->dma_attach);
|
||||
dma_buf_put(nv_dma_buf->dma_buf);
|
||||
|
||||
os_free_mem(nv_dma_buf);
|
||||
#endif // CONFIG_DMA_SHARED_BUFFER
|
||||
}
|
||||
|
||||
|
||||
80
kernel-open/nvidia/nv-host1x.c
Normal file
80
kernel-open/nvidia/nv-host1x.c
Normal file
@@ -0,0 +1,80 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
|
||||
#include "os-interface.h"
|
||||
#include "nv-linux.h"
|
||||
|
||||
#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(NV_LINUX_NVHOST_T194_H_PRESENT)
|
||||
#include <linux/nvhost.h>
|
||||
#include <linux/nvhost_t194.h>
|
||||
|
||||
NV_STATUS nv_get_syncpoint_aperture
|
||||
(
|
||||
NvU32 syncpointId,
|
||||
NvU64 *physAddr,
|
||||
NvU64 *limit,
|
||||
NvU32 *offset
|
||||
)
|
||||
{
|
||||
struct platform_device *host1x_pdev = NULL;
|
||||
phys_addr_t base;
|
||||
size_t size;
|
||||
|
||||
#if NV_IS_EXPORT_SYMBOL_PRESENT_nvhost_get_default_device
|
||||
host1x_pdev = nvhost_get_default_device();
|
||||
if (host1x_pdev == NULL)
|
||||
{
|
||||
return NV_ERR_INVALID_DEVICE;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if NV_IS_EXPORT_SYMBOL_PRESENT_nvhost_syncpt_unit_interface_get_aperture && \
|
||||
NV_IS_EXPORT_SYMBOL_PRESENT_nvhost_syncpt_unit_interface_get_byte_offset
|
||||
nvhost_syncpt_unit_interface_get_aperture(
|
||||
host1x_pdev, &base, &size);
|
||||
|
||||
*physAddr = base;
|
||||
*limit = nvhost_syncpt_unit_interface_get_byte_offset(1);
|
||||
*offset = nvhost_syncpt_unit_interface_get_byte_offset(syncpointId);
|
||||
#else
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
#else
|
||||
|
||||
NV_STATUS nv_get_syncpoint_aperture
|
||||
(
|
||||
NvU32 syncpointId,
|
||||
NvU64 *physAddr,
|
||||
NvU64 *limit,
|
||||
NvU32 *offset
|
||||
)
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -176,7 +176,7 @@ static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
|
||||
{
|
||||
|
||||
unsigned i, j;
|
||||
const static unsigned attempts = 3;
|
||||
static const unsigned attempts = 3;
|
||||
struct task_struct *thread[3];
|
||||
|
||||
for (i = 0;; i++) {
|
||||
|
||||
@@ -368,7 +368,7 @@ int nv_encode_caching(
|
||||
return 0;
|
||||
}
|
||||
|
||||
int static nvidia_mmap_peer_io(
|
||||
static int nvidia_mmap_peer_io(
|
||||
struct vm_area_struct *vma,
|
||||
nv_alloc_t *at,
|
||||
NvU64 page_index,
|
||||
@@ -389,7 +389,7 @@ int static nvidia_mmap_peer_io(
|
||||
return ret;
|
||||
}
|
||||
|
||||
int static nvidia_mmap_sysmem(
|
||||
static int nvidia_mmap_sysmem(
|
||||
struct vm_area_struct *vma,
|
||||
nv_alloc_t *at,
|
||||
NvU64 page_index,
|
||||
|
||||
@@ -24,9 +24,9 @@
|
||||
#include <linux/module.h> // for MODULE_FIRMWARE
|
||||
|
||||
// must precede "nv.h" and "nv-firmware.h" includes
|
||||
#define NV_FIRMWARE_PATH_FOR_FILENAME(filename) "nvidia/" NV_VERSION_STRING "/" filename
|
||||
#define NV_FIRMWARE_DECLARE_GSP_FILENAME(filename) \
|
||||
MODULE_FIRMWARE(NV_FIRMWARE_PATH_FOR_FILENAME(filename));
|
||||
#define NV_FIRMWARE_FOR_NAME(name) "nvidia/" NV_VERSION_STRING "/" name ".bin"
|
||||
#define NV_FIRMWARE_DECLARE_GSP(name) \
|
||||
MODULE_FIRMWARE(NV_FIRMWARE_FOR_NAME(name));
|
||||
#include "nv-firmware.h"
|
||||
|
||||
#include "nvmisc.h"
|
||||
@@ -3945,7 +3945,9 @@ const void* NV_API_CALL nv_get_firmware(
|
||||
|
||||
// path is relative to /lib/firmware
|
||||
// if this fails it will print an error to dmesg
|
||||
if (request_firmware(&fw, nv_firmware_path(fw_type, fw_chip_family), nvl->dev) != 0)
|
||||
if (request_firmware(&fw,
|
||||
nv_firmware_for_chip_family(fw_type, fw_chip_family),
|
||||
nvl->dev) != 0)
|
||||
return NULL;
|
||||
|
||||
*fw_size = fw->size;
|
||||
@@ -4042,6 +4044,16 @@ int NV_API_CALL nv_get_event(
|
||||
nvidia_event_t *nvet;
|
||||
unsigned long eflags;
|
||||
|
||||
//
|
||||
// Note that the head read/write is not atomic when done outside of the
|
||||
// spinlock, so this might not be a valid pointer at all. But if we read
|
||||
// NULL here that means that the value indeed was NULL and we can bail
|
||||
// early since there's no events. Otherwise, we have to do a proper read
|
||||
// under a spinlock.
|
||||
//
|
||||
if (nvlfp->event_data_head == NULL)
|
||||
return NV_ERR_GENERIC;
|
||||
|
||||
NV_SPIN_LOCK_IRQSAVE(&nvlfp->fp_lock, eflags);
|
||||
|
||||
nvet = nvlfp->event_data_head;
|
||||
@@ -5923,11 +5935,6 @@ void NV_API_CALL nv_disallow_runtime_suspend
|
||||
#endif
|
||||
}
|
||||
|
||||
NvU32 NV_API_CALL nv_get_os_type(void)
|
||||
{
|
||||
return OS_TYPE_LINUX;
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_flush_coherent_cpu_cache_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size)
|
||||
{
|
||||
#if NVCPU_IS_PPC64LE
|
||||
@@ -6082,14 +6089,16 @@ void NV_API_CALL nv_get_screen_info(
|
||||
NvU64 *pFbSize
|
||||
)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
struct pci_dev *pci_dev = nvl->pci_dev;
|
||||
int i;
|
||||
|
||||
*pPhysicalAddress = 0;
|
||||
*pFbWidth = *pFbHeight = *pFbDepth = *pFbPitch = *pFbSize = 0;
|
||||
|
||||
#if defined(CONFIG_FB) && defined(NV_NUM_REGISTERED_FB_PRESENT)
|
||||
if (num_registered_fb > 0)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_registered_fb; i++)
|
||||
{
|
||||
if (!registered_fb[i])
|
||||
@@ -6154,17 +6163,17 @@ void NV_API_CALL nv_get_screen_info(
|
||||
*pFbDepth = screen_info.lfb_depth;
|
||||
*pFbPitch = screen_info.lfb_linelength;
|
||||
*pFbSize = (NvU64)(*pFbHeight) * (NvU64)(*pFbPitch);
|
||||
return;
|
||||
}
|
||||
}
|
||||
#else
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If screen info can't be fetched with previous methods, then try
|
||||
* to get the base address and size from the memory resource tree.
|
||||
*/
|
||||
if (pci_dev != NULL)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
struct pci_dev *pci_dev = nvl->pci_dev;
|
||||
int i;
|
||||
|
||||
if (pci_dev == NULL)
|
||||
return;
|
||||
|
||||
BUILD_BUG_ON(NV_GPU_BAR_INDEX_IMEM != NV_GPU_BAR_INDEX_FB + 1);
|
||||
for (i = NV_GPU_BAR_INDEX_FB; i <= NV_GPU_BAR_INDEX_IMEM; i++)
|
||||
{
|
||||
@@ -6197,7 +6206,6 @@ void NV_API_CALL nv_get_screen_info(
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -285,15 +285,12 @@ NV_STATUS nvGpuOpsFlushReplayableFaultBuffer(gpuFaultInfo *pFaultInfo,
|
||||
NV_STATUS nvGpuOpsTogglePrefetchFaults(gpuFaultInfo *pFaultInfo,
|
||||
NvBool bEnable);
|
||||
|
||||
NV_STATUS nvGpuOpsKeyRotationChannelDisable(struct gpuChannel *channelList[],
|
||||
NvU32 channelListCount);
|
||||
|
||||
// Interface used for CCSL
|
||||
NV_STATUS nvGpuOpsCcslContextInit(struct ccslContext_t **ctx,
|
||||
gpuChannelHandle channel);
|
||||
NV_STATUS nvGpuOpsCcslContextClear(struct ccslContext_t *ctx);
|
||||
NV_STATUS nvGpuOpsCcslContextUpdate(UvmCslContext *contextList[],
|
||||
NvU32 contextListCount);
|
||||
NV_STATUS nvGpuOpsCcslRotateKey(UvmCslContext *contextList[],
|
||||
NvU32 contextListCount);
|
||||
NV_STATUS nvGpuOpsCcslRotateIv(struct ccslContext_t *ctx,
|
||||
NvU8 direction);
|
||||
NV_STATUS nvGpuOpsCcslEncrypt(struct ccslContext_t *ctx,
|
||||
@@ -311,6 +308,7 @@ NV_STATUS nvGpuOpsCcslDecrypt(struct ccslContext_t *ctx,
|
||||
NvU32 bufferSize,
|
||||
NvU8 const *inputBuffer,
|
||||
NvU8 const *decryptIv,
|
||||
NvU32 keyRotationId,
|
||||
NvU8 *outputBuffer,
|
||||
NvU8 const *addAuthData,
|
||||
NvU32 addAuthDataSize,
|
||||
@@ -326,7 +324,8 @@ NV_STATUS nvGpuOpsIncrementIv(struct ccslContext_t *ctx,
|
||||
NvU8 direction,
|
||||
NvU64 increment,
|
||||
NvU8 *iv);
|
||||
NV_STATUS nvGpuOpsLogDeviceEncryption(struct ccslContext_t *ctx,
|
||||
NvU32 bufferSize);
|
||||
NV_STATUS nvGpuOpsLogEncryption(struct ccslContext_t *ctx,
|
||||
NvU8 direction,
|
||||
NvU32 bufferSize);
|
||||
|
||||
#endif /* _NV_GPU_OPS_H_*/
|
||||
|
||||
@@ -1478,15 +1478,6 @@ NV_STATUS nvUvmInterfacePagingChannelPushStream(UvmGpuPagingChannelHandle channe
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfacePagingChannelPushStream);
|
||||
|
||||
NV_STATUS nvUvmInterfaceKeyRotationChannelDisable(uvmGpuChannelHandle channelList[],
|
||||
NvU32 channeListCount)
|
||||
{
|
||||
nvidia_stack_t *sp = nvUvmGetSafeStack();
|
||||
|
||||
return rm_gpu_ops_key_rotation_channel_disable(sp, ((gpuChannelHandle *)channelList), channeListCount);
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceKeyRotationChannelDisable);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslInitContext(UvmCslContext *uvmCslContext,
|
||||
uvmGpuChannelHandle channel)
|
||||
{
|
||||
@@ -1525,17 +1516,23 @@ void nvUvmInterfaceDeinitCslContext(UvmCslContext *uvmCslContext)
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceDeinitCslContext);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslUpdateContext(UvmCslContext *contextList[],
|
||||
NvU32 contextListCount)
|
||||
NV_STATUS nvUvmInterfaceCslRotateKey(UvmCslContext *contextList[],
|
||||
NvU32 contextListCount)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = contextList[0]->nvidia_stack;
|
||||
nvidia_stack_t *sp;
|
||||
|
||||
status = rm_gpu_ops_ccsl_context_update(sp, contextList, contextListCount);
|
||||
if ((contextList == NULL) || (contextListCount == 0) || (contextList[0] == NULL))
|
||||
{
|
||||
return NV_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
sp = contextList[0]->nvidia_stack;
|
||||
status = rm_gpu_ops_ccsl_rotate_key(sp, contextList, contextListCount);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslUpdateContext);
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslRotateKey);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext,
|
||||
UvmCslOperation operation)
|
||||
@@ -1572,6 +1569,7 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
|
||||
NvU32 bufferSize,
|
||||
NvU8 const *inputBuffer,
|
||||
UvmCslIv const *decryptIv,
|
||||
NvU32 keyRotationId,
|
||||
NvU8 *outputBuffer,
|
||||
NvU8 const *addAuthData,
|
||||
NvU32 addAuthDataSize,
|
||||
@@ -1585,6 +1583,7 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
|
||||
bufferSize,
|
||||
inputBuffer,
|
||||
(NvU8 *)decryptIv,
|
||||
keyRotationId,
|
||||
outputBuffer,
|
||||
addAuthData,
|
||||
addAuthDataSize,
|
||||
@@ -1635,17 +1634,18 @@ NV_STATUS nvUvmInterfaceCslIncrementIv(UvmCslContext *uvmCslContext,
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslIncrementIv);
|
||||
|
||||
NV_STATUS nvUvmInterfaceCslLogExternalEncryption(UvmCslContext *uvmCslContext,
|
||||
NvU32 bufferSize)
|
||||
NV_STATUS nvUvmInterfaceCslLogEncryption(UvmCslContext *uvmCslContext,
|
||||
UvmCslOperation operation,
|
||||
NvU32 bufferSize)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
|
||||
|
||||
status = rm_gpu_ops_ccsl_log_device_encryption(sp, uvmCslContext->ctx, bufferSize);
|
||||
status = rm_gpu_ops_ccsl_log_encryption(sp, uvmCslContext->ctx, operation, bufferSize);
|
||||
|
||||
return status;
|
||||
}
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslLogExternalEncryption);
|
||||
EXPORT_SYMBOL(nvUvmInterfaceCslLogEncryption);
|
||||
|
||||
#else // NV_UVM_ENABLE
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ NVIDIA_SOURCES += nvidia/nv-rsync.c
|
||||
NVIDIA_SOURCES += nvidia/nv-msi.c
|
||||
NVIDIA_SOURCES += nvidia/nv-caps.c
|
||||
NVIDIA_SOURCES += nvidia/nv-caps-imex.c
|
||||
NVIDIA_SOURCES += nvidia/nv-host1x.c
|
||||
NVIDIA_SOURCES += nvidia/nv_uvm_interface.c
|
||||
NVIDIA_SOURCES += nvidia/libspdm_aead.c
|
||||
NVIDIA_SOURCES += nvidia/libspdm_ecc.c
|
||||
@@ -41,6 +42,7 @@ NVIDIA_SOURCES += nvidia/libspdm_rsa.c
|
||||
NVIDIA_SOURCES += nvidia/libspdm_aead_aes_gcm.c
|
||||
NVIDIA_SOURCES += nvidia/libspdm_sha.c
|
||||
NVIDIA_SOURCES += nvidia/libspdm_hmac_sha.c
|
||||
NVIDIA_SOURCES += nvidia/libspdm_internal_crypt_lib.c
|
||||
NVIDIA_SOURCES += nvidia/libspdm_hkdf_sha.c
|
||||
NVIDIA_SOURCES += nvidia/libspdm_ec.c
|
||||
NVIDIA_SOURCES += nvidia/libspdm_x509.c
|
||||
|
||||
@@ -228,8 +228,6 @@ NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_clear_in
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_alloc_mem_from_gscco
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_free_gscco_mem
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_memory_block_size_bytes
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_platform_is_fpga
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_platform_is_sim
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += crypto
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_follow_pte
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
@@ -38,4 +38,4 @@ bool libspdm_aead_aes_gcm_decrypt_prealloc(void *context,
|
||||
const uint8_t *data_in, size_t data_in_size,
|
||||
const uint8_t *tag, size_t tag_size,
|
||||
uint8_t *data_out, size_t *data_out_size);
|
||||
|
||||
bool libspdm_check_crypto_backend(void);
|
||||
|
||||
@@ -1325,6 +1325,16 @@ NV_STATUS NV_API_CALL os_get_version_info(os_version_info * pOsVersionInfo)
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL os_get_is_openrm(NvBool *bIsOpenRm)
|
||||
{
|
||||
#if defined(NVCPU_X86_64) || defined(NVCPU_AARCH64)
|
||||
*bIsOpenRm = NV_TRUE;
|
||||
return NV_OK;
|
||||
#else // defined(NVCPU_X86_64) || defined(NVCPU_AARCH64)
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif // defined(NVCPU_X86_64) || defined(NVCPU_AARCH64)
|
||||
}
|
||||
|
||||
NvBool NV_API_CALL os_is_xen_dom0(void)
|
||||
{
|
||||
#if defined(NV_DOM0_KERNEL_PRESENT)
|
||||
|
||||
Reference in New Issue
Block a user